summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorIan Boros <ian.boros@mongodb.com>2020-01-30 13:10:55 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-28 22:16:41 +0000
commitcfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726 (patch)
tree7ab1e1ce8e2edd6837952c131fe14d43a0633235 /src/mongo
parent793ae32c597f197b6445750aa9bfdaabc206132d (diff)
downloadmongo-cfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726.tar.gz
SERVER-45406 Plumb ExpressionContext through PlanStage
This patch includes also moves ownership of the collator to the ExpressionContext.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp6
-rw-r--r--src/mongo/db/catalog/collection.h49
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp94
-rw-r--r--src/mongo/db/catalog/collection_impl.h26
-rw-r--r--src/mongo/db/catalog/collection_mock.h11
-rw-r--r--src/mongo/db/catalog/database_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp7
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h3
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp12
-rw-r--r--src/mongo/db/commands/count_cmd.cpp12
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp13
-rw-r--r--src/mongo/db/commands/list_collections.cpp11
-rw-r--r--src/mongo/db/commands/list_databases.cpp5
-rw-r--r--src/mongo/db/commands/list_indexes.cpp6
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp14
-rw-r--r--src/mongo/db/exec/and_hash.cpp8
-rw-r--r--src/mongo/db/exec/and_hash.h4
-rw-r--r--src/mongo/db/exec/and_sorted.cpp4
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp10
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp4
-rw-r--r--src/mongo/db/exec/change_stream_proxy.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp17
-rw-r--r--src/mongo/db/exec/collection_scan.h2
-rw-r--r--src/mongo/db/exec/count.cpp4
-rw-r--r--src/mongo/db/exec/count.h2
-rw-r--r--src/mongo/db/exec/count_scan.cpp8
-rw-r--r--src/mongo/db/exec/count_scan.h2
-rw-r--r--src/mongo/db/exec/delete.cpp14
-rw-r--r--src/mongo/db/exec/delete.h2
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp10
-rw-r--r--src/mongo/db/exec/distinct_scan.h2
-rw-r--r--src/mongo/db/exec/ensure_sorted.cpp4
-rw-r--r--src/mongo/db/exec/ensure_sorted.h2
-rw-r--r--src/mongo/db/exec/eof.cpp2
-rw-r--r--src/mongo/db/exec/eof.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp10
-rw-r--r--src/mongo/db/exec/fetch.h2
-rw-r--r--src/mongo/db/exec/geo_near.cpp44
-rw-r--r--src/mongo/db/exec/geo_near.h12
-rw-r--r--src/mongo/db/exec/idhack.cpp17
-rw-r--r--src/mongo/db/exec/idhack.h4
-rw-r--r--src/mongo/db/exec/index_scan.cpp10
-rw-r--r--src/mongo/db/exec/index_scan.h2
-rw-r--r--src/mongo/db/exec/limit.cpp4
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp4
-rw-r--r--src/mongo/db/exec/merge_sort.h2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp9
-rw-r--r--src/mongo/db/exec/multi_iterator.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp8
-rw-r--r--src/mongo/db/exec/multi_plan.h2
-rw-r--r--src/mongo/db/exec/near.cpp8
-rw-r--r--src/mongo/db/exec/near.h2
-rw-r--r--src/mongo/db/exec/or.cpp7
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp12
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h4
-rw-r--r--src/mongo/db/exec/plan_stage.h25
-rw-r--r--src/mongo/db/exec/projection.cpp10
-rw-r--r--src/mongo/db/exec/projection.h6
-rw-r--r--src/mongo/db/exec/projection_executor_builder_test.cpp5
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp4
-rw-r--r--src/mongo/db/exec/queued_data_stage.h2
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp11
-rw-r--r--src/mongo/db/exec/record_store_fast_count.cpp6
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.h6
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp8
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h8
-rw-r--r--src/mongo/db/exec/requires_index_stage.cpp4
-rw-r--r--src/mongo/db/exec/requires_index_stage.h2
-rw-r--r--src/mongo/db/exec/return_key.h4
-rw-r--r--src/mongo/db/exec/shard_filter.cpp4
-rw-r--r--src/mongo/db/exec/shard_filter.h2
-rw-r--r--src/mongo/db/exec/skip.cpp4
-rw-r--r--src/mongo/db/exec/skip.h2
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp6
-rw-r--r--src/mongo/db/exec/sort_key_generator.h2
-rw-r--r--src/mongo/db/exec/sort_test.cpp18
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp58
-rw-r--r--src/mongo/db/exec/subplan.cpp19
-rw-r--r--src/mongo/db/exec/subplan.h2
-rw-r--r--src/mongo/db/exec/text.cpp18
-rw-r--r--src/mongo/db/exec/text.h2
-rw-r--r--src/mongo/db/exec/text_match.cpp4
-rw-r--r--src/mongo/db/exec/text_match.h2
-rw-r--r--src/mongo/db/exec/text_or.cpp11
-rw-r--r--src/mongo/db/exec/text_or.h2
-rw-r--r--src/mongo/db/exec/trial_stage.cpp17
-rw-r--r--src/mongo/db/exec/trial_stage.h2
-rw-r--r--src/mongo/db/exec/update_stage.cpp45
-rw-r--r--src/mongo/db/exec/update_stage.h4
-rw-r--r--src/mongo/db/exec/upsert_stage.cpp24
-rw-r--r--src/mongo/db/exec/upsert_stage.h2
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp7
-rw-r--r--src/mongo/db/matcher/expression_expr.cpp8
-rw-r--r--src/mongo/db/matcher/expression_expr_test.cpp8
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp7
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp80
-rw-r--r--src/mongo/db/matcher/expression_with_placeholder_test.cpp5
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp5
-rw-r--r--src/mongo/db/ops/delete.cpp7
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp19
-rw-r--r--src/mongo/db/ops/parsed_delete.h17
-rw-r--r--src/mongo/db/ops/parsed_update.cpp32
-rw-r--r--src/mongo/db/ops/parsed_update.h24
-rw-r--r--src/mongo/db/ops/update.cpp8
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp14
-rw-r--r--src/mongo/db/pipeline/accumulator_test.cpp15
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_check_resume_token_test.cpp2
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp51
-rw-r--r--src/mongo/db/pipeline/expression_context.h48
-rw-r--r--src/mongo/db/pipeline/expression_trim_test.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp29
-rw-r--r--src/mongo/db/query/canonical_query.cpp25
-rw-r--r--src/mongo/db/query/canonical_query.h5
-rw-r--r--src/mongo/db/query/get_executor.cpp108
-rw-r--r--src/mongo/db/query/get_executor.h20
-rw-r--r--src/mongo/db/query/internal_plans.cpp74
-rw-r--r--src/mongo/db/query/internal_plans.h28
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp37
-rw-r--r--src/mongo/db/query/projection_test.cpp6
-rw-r--r--src/mongo/db/query/query_planner_partialidx_test.cpp24
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp18
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h13
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp5
-rw-r--r--src/mongo/db/query/query_solution_test.cpp5
-rw-r--r--src/mongo/db/query/stage_builder.cpp39
-rw-r--r--src/mongo/db/repl/apply_ops.cpp3
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp4
-rw-r--r--src/mongo/db/update/addtoset_node_test.cpp17
-rw-r--r--src/mongo/db/update/compare_node_test.cpp13
-rw-r--r--src/mongo/db/update/pull_node_test.cpp30
-rw-r--r--src/mongo/db/update/pullall_node_test.cpp5
-rw-r--r--src/mongo/db/update/push_node_test.cpp5
-rw-r--r--src/mongo/db/update/update_driver.cpp2
-rw-r--r--src/mongo/db/update/update_object_node_test.cpp5
-rw-r--r--src/mongo/dbtests/cursor_manager_test.cpp5
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp28
-rw-r--r--src/mongo/dbtests/matchertests.cpp10
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp7
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp5
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp17
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp143
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp44
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_count_scan.cpp27
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp29
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp9
-rw-r--r--src/mongo/dbtests/query_stage_limit_skip.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp80
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp90
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_sort_key_generator.cpp53
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp30
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_trial.cpp33
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp39
-rw-r--r--src/mongo/embedded/stitch_support/stitch_support.cpp28
171 files changed, 1436 insertions, 1178 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 521ba897fda..2967e62ff0f 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -186,9 +186,8 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
bool upsert,
const BSONObj& writeConcern) {
namespace mmb = mutablebson;
- const CollatorInterface* collator = nullptr;
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, collectionName));
+ new ExpressionContext(opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionName));
UpdateDriver driver(std::move(expCtx));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
driver.parse(updatePattern, arrayFilters);
@@ -299,9 +298,8 @@ Status AuthzManagerExternalStateMock::_queryVector(
const NamespaceString& collectionName,
const BSONObj& query,
std::vector<BSONObjCollection::iterator>* result) {
- const CollatorInterface* collator = nullptr;
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, collectionName));
+ new ExpressionContext(opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionName));
StatusWithMatchExpression parseResult = MatchExpressionParser::parse(query, std::move(expCtx));
if (!parseResult.isOK()) {
return parseResult.getStatus();
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index fdfb119719b..86deaf95b0a 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -185,6 +185,53 @@ public:
};
/**
+ * A Collection::Validator represents a filter that is applied to all documents that are
+ * inserted. Enforcement of Validators being well formed is done lazily, so the 'Validator'
+ * class may represent a validator which is not well formed.
+ */
+ struct Validator {
+
+ /**
+ * Returns whether the validator's filter is well formed.
+ */
+ bool isOK() const {
+ return filter.isOK();
+ }
+
+ /**
+ * Returns OK or the error encounter when parsing the validator.
+ */
+ Status getStatus() const {
+ return filter.getStatus();
+ }
+
+ /**
+ * Empty means no validator. This must outlive 'filter'.
+ */
+ BSONObj validatorDoc;
+
+ /**
+ * A special ExpressionContext used to evaluate the filter match expression. This should
+ * outlive 'filter'.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtxForFilter;
+
+ /**
+ * The collection validator MatchExpression. This is stored as a StatusWith, as we lazily
+ * enforce that collection validators are well formed.
+ *
+ * -A non-OK Status indicates that the validator is not well formed, and any attempts to
+ * enforce the validator should error.
+ *
+ * -A value of Status::OK/nullptr indicates that there is no validator.
+ *
+ * -Anything else indicates a well formed validator. The MatchExpression will maintain
+ * pointers into _validatorDoc.
+ */
+ StatusWithMatchExpression filter = {nullptr};
+ };
+
+ /**
* Callback function for callers of insertDocumentForBulkLoader().
*/
using OnRecordInsertedFn = std::function<Status(const RecordId& loc)>;
@@ -354,7 +401,7 @@ public:
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
- virtual StatusWithMatchExpression parseValidator(
+ virtual Validator parseValidator(
OperationContext* opCtx,
const BSONObj& validator,
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index d45b3e27d23..221d042bf80 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -256,7 +256,6 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx,
_needCappedLock(supportsDocLocking() && _recordStore && _recordStore->isCapped() &&
_ns.db() != "local"),
_indexCatalog(std::make_unique<IndexCatalogImpl>(this)),
- _swValidator{nullptr},
_cappedNotifier(_recordStore && _recordStore->isCapped()
? std::make_unique<CappedInsertNotifier>()
: nullptr) {
@@ -288,22 +287,23 @@ void CollectionImpl::init(OperationContext* opCtx) {
auto collectionOptions =
DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, getCatalogId());
_collator = parseCollation(opCtx, _ns, collectionOptions.collation);
- _validatorDoc = collectionOptions.validator.getOwned();
+ auto validatorDoc = collectionOptions.validator.getOwned();
// Enforce that the validator can be used on this namespace.
- uassertStatusOK(checkValidatorCanBeUsedOnNs(_validatorDoc, ns(), _uuid));
+ uassertStatusOK(checkValidatorCanBeUsedOnNs(validatorDoc, ns(), _uuid));
// Store the result (OK / error) of parsing the validator, but do not enforce that the result is
// OK. This is intentional, as users may have validators on disk which were considered well
// formed in older versions but not in newer versions.
- _swValidator =
- parseValidator(opCtx, _validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!_swValidator.isOK()) {
+ _validator =
+ parseValidator(opCtx, validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
+ if (!_validator.isOK()) {
// Log an error and startup warning if the collection validator is malformed.
LOGV2_WARNING_OPTIONS(20293,
{logv2::LogTag::kStartupWarnings},
- "Collection {ns} has malformed validator: {swValidator_getStatus}",
+ "Collection {ns} has malformed validator: {validatorStatus}",
+ "Collection has malformed validator",
"ns"_attr = _ns,
- "swValidator_getStatus"_attr = _swValidator.getStatus());
+ "validatorStatus"_attr = _validator.getStatus());
}
_validationAction = uassertStatusOK(_parseValidationAction(collectionOptions.validationAction));
_validationLevel = uassertStatusOK(_parseValidationLevel(collectionOptions.validationLevel));
@@ -366,11 +366,11 @@ bool CollectionImpl::findDoc(OperationContext* opCtx,
}
Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& document) const {
- if (!_swValidator.isOK()) {
- return _swValidator.getStatus();
+ if (!_validator.isOK()) {
+ return _validator.getStatus();
}
- const auto* const validatorMatchExpr = _swValidator.getValue().get();
+ const auto* const validatorMatchExpr = _validator.filter.getValue().get();
if (!validatorMatchExpr)
return Status::OK();
@@ -394,26 +394,26 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
}
-StatusWithMatchExpression CollectionImpl::parseValidator(
+Collection::Validator CollectionImpl::parseValidator(
OperationContext* opCtx,
const BSONObj& validator,
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
maxFeatureCompatibilityVersion) const {
if (MONGO_unlikely(allowSettingMalformedCollectionValidators.shouldFail())) {
- return {nullptr};
+ return {validator, nullptr, nullptr};
}
if (validator.isEmpty())
- return {nullptr};
+ return {validator, nullptr, nullptr};
Status canUseValidatorInThisContext = checkValidatorCanBeUsedOnNs(validator, ns(), _uuid);
if (!canUseValidatorInThisContext.isOK()) {
- return canUseValidatorInThisContext;
+ return {validator, nullptr, canUseValidatorInThisContext};
}
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, _collator.get(), ns()));
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, CollatorInterface::cloneCollator(_collator.get()), ns());
// The MatchExpression and contained ExpressionContext created as part of the validator are
// owned by the Collection and will outlive the OperationContext they were created under.
@@ -430,11 +430,14 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
MatchExpressionParser::parse(validator, expCtx, ExtensionsCallbackNoop(), allowedFeatures);
if (!statusWithMatcher.isOK()) {
- return StatusWithMatchExpression{
+ return {
+ validator,
+ boost::intrusive_ptr<ExpressionContext>(nullptr),
statusWithMatcher.getStatus().withContext("Parsing of collection validator failed")};
}
- return statusWithMatcher;
+ return Collection::Validator{
+ validator, std::move(expCtx), std::move(statusWithMatcher.getValue())};
}
Status CollectionImpl::insertDocumentsForOplog(OperationContext* opCtx,
@@ -443,8 +446,8 @@ Status CollectionImpl::insertDocumentsForOplog(OperationContext* opCtx,
dassert(opCtx->lockState()->isWriteLocked());
// Since this is only for the OpLog, we can assume these for simplicity.
- invariant(_swValidator.isOK());
- invariant(_swValidator.getValue() == nullptr);
+ invariant(_validator.isOK());
+ invariant(_validator.filter.getValue() == nullptr);
invariant(!_indexCatalog->haveAnyIndexes());
Status status = _recordStore->insertRecords(opCtx, records, timestamps);
@@ -806,7 +809,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
}
bool CollectionImpl::updateWithDamagesSupported() const {
- if (!_swValidator.isOK() || _swValidator.getValue() != nullptr)
+ if (!_validator.isOK() || _validator.filter.getValue() != nullptr)
return false;
return _recordStore->updateWithDamagesSupported();
@@ -986,22 +989,18 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
// Note that, by the time we reach this, we should have already done a pre-parse that checks for
// banned features, so we don't need to include that check again.
- auto statusWithMatcher =
+ auto newValidator =
parseValidator(opCtx, validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!statusWithMatcher.isOK())
- return statusWithMatcher.getStatus();
+ if (!newValidator.isOK())
+ return newValidator.getStatus();
DurableCatalog::get(opCtx)->updateValidator(
opCtx, getCatalogId(), validatorDoc, getValidationLevel(), getValidationAction());
- opCtx->recoveryUnit()->onRollback([this,
- oldValidator = std::move(_swValidator),
- oldValidatorDoc = std::move(_validatorDoc)]() mutable {
- this->_swValidator = std::move(oldValidator);
- this->_validatorDoc = std::move(oldValidatorDoc);
+ opCtx->recoveryUnit()->onRollback([this, oldValidator = std::move(_validator)]() mutable {
+ this->_validator = std::move(oldValidator);
});
- _swValidator = std::move(statusWithMatcher);
- _validatorDoc = std::move(validatorDoc);
+ _validator = std::move(newValidator);
return Status::OK();
}
@@ -1038,8 +1037,11 @@ Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData ne
auto oldValidationLevel = _validationLevel;
_validationLevel = levelSW.getValue();
- DurableCatalog::get(opCtx)->updateValidator(
- opCtx, getCatalogId(), _validatorDoc, getValidationLevel(), getValidationAction());
+ DurableCatalog::get(opCtx)->updateValidator(opCtx,
+ getCatalogId(),
+ _validator.validatorDoc,
+ getValidationLevel(),
+ getValidationAction());
opCtx->recoveryUnit()->onRollback(
[this, oldValidationLevel]() { this->_validationLevel = oldValidationLevel; });
@@ -1058,8 +1060,11 @@ Status CollectionImpl::setValidationAction(OperationContext* opCtx, StringData n
_validationAction = actionSW.getValue();
- DurableCatalog::get(opCtx)->updateValidator(
- opCtx, getCatalogId(), _validatorDoc, getValidationLevel(), getValidationAction());
+ DurableCatalog::get(opCtx)->updateValidator(opCtx,
+ getCatalogId(),
+ _validator.validatorDoc,
+ getValidationLevel(),
+ getValidationAction());
opCtx->recoveryUnit()->onRollback(
[this, oldValidationAction]() { this->_validationAction = oldValidationAction; });
@@ -1073,26 +1078,23 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
opCtx->recoveryUnit()->onRollback([this,
- oldValidator = std::move(_swValidator),
- oldValidatorDoc = std::move(_validatorDoc),
+ oldValidator = std::move(_validator),
oldValidationLevel = _validationLevel,
oldValidationAction = _validationAction]() mutable {
- this->_swValidator = std::move(oldValidator);
- this->_validatorDoc = std::move(oldValidatorDoc);
+ this->_validator = std::move(oldValidator);
this->_validationLevel = oldValidationLevel;
this->_validationAction = oldValidationAction;
});
DurableCatalog::get(opCtx)->updateValidator(
opCtx, getCatalogId(), newValidator, newLevel, newAction);
- _validatorDoc = std::move(newValidator);
- auto validatorSW =
- parseValidator(opCtx, _validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!validatorSW.isOK()) {
- return validatorSW.getStatus();
+ auto validator =
+ parseValidator(opCtx, newValidator, MatchExpressionParser::kAllowAllSpecialFeatures);
+ if (!validator.isOK()) {
+ return validator.getStatus();
}
- _swValidator = std::move(validatorSW.getValue());
+ _validator = std::move(validator);
auto levelSW = _parseValidationLevel(newLevel);
if (!levelSW.isOK()) {
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index faa0f05100a..e3828f9471c 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -90,7 +90,7 @@ public:
}
const BSONObj getValidatorDoc() const final {
- return _validatorDoc.getOwned();
+ return _validator.validatorDoc.getOwned();
}
bool requiresIdIndex() const final;
@@ -235,12 +235,11 @@ public:
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
- StatusWithMatchExpression parseValidator(
- OperationContext* opCtx,
- const BSONObj& validator,
- MatchExpressionParser::AllowedFeatureSet allowedFeatures,
- boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion = boost::none) const final;
+ Validator parseValidator(OperationContext* opCtx,
+ const BSONObj& validator,
+ MatchExpressionParser::AllowedFeatureSet allowedFeatures,
+ boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
+ maxFeatureCompatibilityVersion = boost::none) const final;
/**
* Sets the validator for this collection.
@@ -394,17 +393,8 @@ private:
// If null, the default collation is simple binary compare.
std::unique_ptr<CollatorInterface> _collator;
- // Empty means no validator.
- BSONObj _validatorDoc;
-
- // The collection validator MatchExpression. This is stored as a StatusWith, as we lazily
- // enforce that collection validators are well formed.
- // -A non-OK Status indicates that the validator is not well formed, and any attempts to enforce
- // the validator (inserts) should error.
- // -A value of {nullptr} indicates that there is no validator.
- // -Anything else indicates a well formed validator. The MatchExpression will maintain
- // pointers into _validatorDoc.
- StatusWithMatchExpression _swValidator;
+
+ Validator _validator;
ValidationAction _validationAction;
ValidationLevel _validationLevel;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 001bee4db5d..ecab3451f6d 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -170,12 +170,11 @@ public:
std::abort();
}
- StatusWithMatchExpression parseValidator(
- OperationContext* opCtx,
- const BSONObj& validator,
- MatchExpressionParser::AllowedFeatureSet allowedFeatures,
- boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion) const {
+ Validator parseValidator(OperationContext* opCtx,
+ const BSONObj& validator,
+ MatchExpressionParser::AllowedFeatureSet allowedFeatures,
+ boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
+ maxFeatureCompatibilityVersion) const {
std::abort();
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 685ae79ef5c..f8c08086602 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -878,7 +878,7 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
if (!collectionOptions.validator.isEmpty()) {
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator.get(), nss));
+ new ExpressionContext(opCtx, std::move(collator), nss));
// Save this to a variable to avoid reading the atomic variable multiple times.
const auto currentFCV = serverGlobalParams.featureCompatibility.getVersion();
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index aca5e622a02..44630f8772a 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/multi_key_path_tracker.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/service_context.h"
@@ -97,14 +98,14 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
if (_descriptor->isPartial()) {
const BSONObj& filter = _descriptor->partialFilterExpression();
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, _collator.get(), ns()));
+ _expCtxForFilter = make_intrusive<ExpressionContext>(
+ opCtx, CollatorInterface::cloneCollator(_collator.get()), ns());
// Parsing the partial filter expression is not expected to fail here since the
// expression would have been successfully parsed upstream during index creation.
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filter,
- std::move(expCtx),
+ _expCtxForFilter,
ExtensionsCallbackNoop(),
MatchExpressionParser::kBanAllSpecialFeatures);
invariant(statusWithMatcher.getStatus());
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index 7cb8d5c98c6..4f4691359aa 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -51,6 +51,7 @@ class IndexAccessMethod;
class IndexDescriptor;
class MatchExpression;
class OperationContext;
+class ExpressionContext;
class IndexCatalogEntryImpl : public IndexCatalogEntry {
IndexCatalogEntryImpl(const IndexCatalogEntryImpl&) = delete;
@@ -224,6 +225,8 @@ private:
std::unique_ptr<CollatorInterface> _collator;
std::unique_ptr<MatchExpression> _filterExpression;
+ // Special ExpressionContext used to evaluate the partial filter expression.
+ boost::intrusive_ptr<ExpressionContext> _expCtxForFilter;
// cached stuff
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 0c637730bf6..e8079ede4b9 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -667,6 +667,11 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
}
}
+ // Create an ExpressionContext, used to parse the match expression and to house the collator for
+ // the remaining checks.
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ new ExpressionContext(opCtx, std::move(collator), nss));
+
// Ensure if there is a filter, its valid.
BSONElement filterElement = spec.getField("partialFilterExpression");
if (filterElement) {
@@ -680,10 +685,6 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
"\"partialFilterExpression\" for an index must be a document");
}
- // The collator must outlive the constructed MatchExpression.
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator.get(), nss));
-
// Parsing the partial filter expression is not expected to fail here since the
// expression would have been successfully parsed upstream during index creation.
StatusWithMatchExpression statusWithMatcher =
@@ -717,7 +718,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
}
if (collationElement &&
- !CollatorInterface::collatorsMatch(collator.get(), _collection->getDefaultCollator())) {
+ !CollatorInterface::collatorsMatch(expCtx->getCollator(),
+ _collection->getDefaultCollator())) {
return Status(ErrorCodes::CannotCreateIndex,
"_id index must have the collection default collation");
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 2f2a7c4b09d..876a6f8847d 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -170,8 +170,11 @@ public:
// version on initial entry into count.
auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
+ auto expCtx = makeExpressionContextForGetExecutor(
+ opCtx, request.getCollation().value_or(BSONObj()), nss);
+
auto statusWithPlanExecutor =
- getExecutorCount(opCtx, collection, request, true /*explain*/, nss);
+ getExecutorCount(expCtx, collection, request, true /*explain*/, nss);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -228,7 +231,12 @@ public:
auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
auto statusWithPlanExecutor =
- getExecutorCount(opCtx, collection, request, false /*explain*/, nss);
+ getExecutorCount(makeExpressionContextForGetExecutor(
+ opCtx, request.getCollation().value_or(BSONObj()), nss),
+ collection,
+ request,
+ false /*explain*/,
+ nss);
uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index cbbaa62d243..135ab17773d 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -169,7 +169,7 @@ public:
Collection* const collection = ctx->getCollection();
auto executor = uassertStatusOK(
- getExecutorDistinct(opCtx, collection, QueryPlannerParams::DEFAULT, &parsedDistinct));
+ getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(executor.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -225,7 +225,7 @@ public:
Collection* const collection = ctx->getCollection();
auto executor =
- getExecutorDistinct(opCtx, collection, QueryPlannerParams::DEFAULT, &parsedDistinct);
+ getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct);
uassertStatusOK(executor.getStatus());
{
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 5d995f1c70e..923d44eb10c 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -275,8 +275,9 @@ public:
css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
- const auto exec = uassertStatusOK(
- getExecutorDelete(opCtx, opDebug, collection, &parsedDelete, verbosity));
+
+ const auto exec =
+ uassertStatusOK(getExecutorDelete(opDebug, collection, &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -300,8 +301,8 @@ public:
css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
- const auto exec = uassertStatusOK(
- getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate, verbosity));
+ const auto exec =
+ uassertStatusOK(getExecutorUpdate(opDebug, collection, &parsedUpdate, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -390,7 +391,7 @@ public:
checkIfTransactionOnCappedColl(collection, inTransaction);
const auto exec = uassertStatusOK(getExecutorDelete(
- opCtx, opDebug, collection, &parsedDelete, boost::none /* verbosity */));
+ opDebug, collection, &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -496,7 +497,7 @@ public:
checkIfTransactionOnCappedColl(collection, inTransaction);
const auto exec = uassertStatusOK(getExecutorUpdate(
- opCtx, opDebug, collection, &parsedUpdate, boost::none /* verbosity */));
+ opDebug, collection, &parsedUpdate, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 7ef36d2e1cf..fae03f2159a 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -255,16 +255,17 @@ public:
const bool nameOnly = jsobj["nameOnly"].trueValue();
const bool authorizedCollections = jsobj["authorizedCollections"].trueValue();
+ // The collator is null because collection objects are compared using binary comparison.
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(dbname));
+
// Check for 'filter' argument.
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
uasserted(ErrorCodes::BadValue, "\"filter\" must be an object");
}
- // The collator is null because collection objects are compared using binary comparison.
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, NamespaceString(StringData(dbname))));
+
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
uassertStatusOK(statusWithMatcher.getStatus());
@@ -299,7 +300,7 @@ public:
cursorNss);
auto ws = std::make_unique<WorkingSet>();
- auto root = std::make_unique<QueuedDataStage>(opCtx, ws.get());
+ auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get());
if (db) {
if (auto collNames = _getExactNameMatches(matcher.get())) {
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 9a383f85522..c656355ec06 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -126,9 +126,8 @@ public:
if (auto filterObj = cmd.getFilter()) {
// The collator is null because database metadata objects are compared using simple
// binary comparison.
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, NamespaceString(dbname)));
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(dbname));
auto matcher =
uassertStatusOK(MatchExpressionParser::parse(filterObj.get(), std::move(expCtx)));
filter = std::move(matcher);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 02dc4b7ead2..0c3cf1c3053 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -155,9 +155,13 @@ public:
str::stream() << "ns does not exist: " << ctx.getNss().ns(),
collection);
nss = ctx.getNss();
+
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
+
auto indexList = listIndexesInLock(opCtx, collection, nss, includeBuildUUIDs);
auto ws = std::make_unique<WorkingSet>();
- auto root = std::make_unique<QueuedDataStage>(opCtx, ws.get());
+ auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get());
for (auto&& indexSpec : indexList) {
WorkingSetID id = ws->allocate();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index ad65b9d9a48..104c5da8cba 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -467,8 +467,8 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> createOuterPipelineProxyExe
// Transfer ownership of the Pipeline to the PipelineProxyStage.
auto ws = std::make_unique<WorkingSet>();
auto proxy = hasChangeStream
- ? std::make_unique<ChangeStreamProxyStage>(opCtx, std::move(pipeline), ws.get())
- : std::make_unique<PipelineProxyStage>(opCtx, std::move(pipeline), ws.get());
+ ? std::make_unique<ChangeStreamProxyStage>(expCtx.get(), std::move(pipeline), ws.get())
+ : std::make_unique<PipelineProxyStage>(expCtx.get(), std::move(pipeline), ws.get());
// This PlanExecutor will simply forward requests to the Pipeline, so does not need
// to yield or to be registered with any collection's CursorManager to receive
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 172cf11d2f7..3435008fc6c 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -410,11 +410,8 @@ private:
// info is more accurate.
AutoGetCollection collection(opCtx, _batch.getNamespace(), MODE_IX);
- auto exec = uassertStatusOK(getExecutorUpdate(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection.getCollection(),
- &parsedUpdate,
- verbosity));
+ auto exec = uassertStatusOK(getExecutorUpdate(
+ &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedUpdate, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(
exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
@@ -487,11 +484,8 @@ private:
AutoGetCollection collection(opCtx, _batch.getNamespace(), MODE_IX);
// Explain the plan tree.
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection.getCollection(),
- &parsedDelete,
- verbosity));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(
exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 6c913377c4f..d2666c905e6 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -55,16 +55,16 @@ const size_t AndHashStage::kLookAheadWorks = 10;
// static
const char* AndHashStage::kStageType = "AND_HASH";
-AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+AndHashStage::AndHashStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
-AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage)
- : PlanStage(kStageType, opCtx),
+AndHashStage::AndHashStage(ExpressionContext* expCtx, WorkingSet* ws, size_t maxMemUsage)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 3659504486d..4bd591952f5 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -48,12 +48,12 @@ namespace mongo {
*/
class AndHashStage final : public PlanStage {
public:
- AndHashStage(OperationContext* opCtx, WorkingSet* ws);
+ AndHashStage(ExpressionContext* expCtx, WorkingSet* ws);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
- AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage);
+ AndHashStage(ExpressionContext* expCtx, WorkingSet* ws, size_t maxMemUsage);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 6cda1ad2bf1..57d2eb08b52 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -45,8 +45,8 @@ using std::vector;
// static
const char* AndSortedStage::kStageType = "AND_SORTED";
-AndSortedStage::AndSortedStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+AndSortedStage::AndSortedStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_targetNode(numeric_limits<size_t>::max()),
_targetId(WorkingSet::INVALID_ID),
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 3d72d15c1f9..0236d4a294a 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class AndSortedStage final : public PlanStage {
public:
- AndSortedStage(OperationContext* opCtx, WorkingSet* ws);
+ AndSortedStage(ExpressionContext* expCtx, WorkingSet* ws);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f3032d54da9..a5571725990 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -57,14 +57,14 @@ namespace mongo {
// static
const char* CachedPlanStage::kStageType = "CACHED_PLAN";
-CachedPlanStage::CachedPlanStage(OperationContext* opCtx,
+CachedPlanStage::CachedPlanStage(ExpressionContext* expCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
const QueryPlannerParams& params,
size_t decisionWorks,
std::unique_ptr<PlanStage> root)
- : RequiresAllIndicesStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, expCtx, collection),
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
@@ -227,7 +227,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
if (1 == solutions.size()) {
// Only one possible plan. Build the stages from the solution.
auto newRoot =
- StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws);
+ StageBuilder::build(opCtx(), collection(), *_canonicalQuery, *solutions[0], _ws);
_children.emplace_back(std::move(newRoot));
_replannedQs = std::move(solutions.back());
solutions.pop_back();
@@ -249,7 +249,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
auto cachingMode = shouldCache ? MultiPlanStage::CachingMode::AlwaysCache
: MultiPlanStage::CachingMode::NeverCache;
_children.emplace_back(
- new MultiPlanStage(getOpCtx(), collection(), _canonicalQuery, cachingMode));
+ new MultiPlanStage(expCtx(), collection(), _canonicalQuery, cachingMode));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -258,7 +258,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
}
auto nextPlanRoot =
- StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws);
+ StageBuilder::build(opCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws);
multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 8a5fe5871d3..c57dda3e210 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -56,7 +56,7 @@ class PlanYieldPolicy;
*/
class CachedPlanStage final : public RequiresAllIndicesStage {
public:
- CachedPlanStage(OperationContext* opCtx,
+ CachedPlanStage(ExpressionContext* expCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index c10bccf4cd4..c16255a897b 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -39,10 +39,10 @@ namespace mongo {
const char* ChangeStreamProxyStage::kStageType = "CHANGE_STREAM_PROXY";
-ChangeStreamProxyStage::ChangeStreamProxyStage(OperationContext* opCtx,
+ChangeStreamProxyStage::ChangeStreamProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws)
- : PipelineProxyStage(opCtx, std::move(pipeline), ws, kStageType) {
+ : PipelineProxyStage(expCtx, std::move(pipeline), ws, kStageType) {
// Set _postBatchResumeToken to the initial PBRT that was added to the expression context during
// pipeline construction, and use it to obtain the starting time for _latestOplogTimestamp.
invariant(!_pipeline->getContext()->initialPostBatchResumeToken.isEmpty());
diff --git a/src/mongo/db/exec/change_stream_proxy.h b/src/mongo/db/exec/change_stream_proxy.h
index 0cfc9d8d825..6d115b78885 100644
--- a/src/mongo/db/exec/change_stream_proxy.h
+++ b/src/mongo/db/exec/change_stream_proxy.h
@@ -48,7 +48,7 @@ public:
* The 'pipeline' argument must be a $changeStream pipeline. Passing a non-$changeStream into
* the constructor will cause an invariant() to fail.
*/
- ChangeStreamProxyStage(OperationContext* opCtx,
+ ChangeStreamProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws);
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 5acfcec50fe..d9bf24824b3 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -56,12 +56,12 @@ using std::vector;
// static
const char* CollectionScan::kStageType = "COLLSCAN";
-CollectionScan::CollectionScan(OperationContext* opCtx,
+CollectionScan::CollectionScan(ExpressionContext* expCtx,
const Collection* collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_workingSet(workingSet),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
_params(params) {
@@ -117,11 +117,11 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// snapshot where the oplog entries are not yet visible even after the wait.
invariant(!_params.tailable && collection()->ns().isOplog());
- getOpCtx()->recoveryUnit()->abandonSnapshot();
- collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(getOpCtx());
+ opCtx()->recoveryUnit()->abandonSnapshot();
+ collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx());
}
- _cursor = collection()->getCursor(getOpCtx(), forward);
+ _cursor = collection()->getCursor(opCtx(), forward);
if (!_lastSeenId.isNull()) {
invariant(_params.tailable);
@@ -171,7 +171,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
StatusWith<RecordId> goal = oploghack::keyForOptime(*_params.minTs);
if (goal.isOK()) {
boost::optional<RecordId> startLoc =
- collection()->getRecordStore()->oplogStartHack(getOpCtx(), goal.getValue());
+ collection()->getRecordStore()->oplogStartHack(opCtx(), goal.getValue());
if (startLoc && !startLoc->isNull()) {
LOGV2_DEBUG(20584, 3, "Using direct oplog seek");
record = _cursor->seekExact(*startLoc);
@@ -215,8 +215,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->recordId = record->id;
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- record->data.releaseToBson());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson());
_workingSet->transitionToRecordIdAndObj(id);
return returnIfMatches(member, id, out);
@@ -283,7 +282,7 @@ void CollectionScan::doDetachFromOperationContext() {
void CollectionScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> CollectionScan::getStats() {
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 1c8d815e141..b19915bb2c5 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -53,7 +53,7 @@ class CollectionScan final : public RequiresCollectionStage {
public:
static const char* kStageType;
- CollectionScan(OperationContext* opCtx,
+ CollectionScan(ExpressionContext* expCtx,
const Collection* collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 73620f851df..c646e172f29 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -45,13 +45,13 @@ using std::vector;
// static
const char* CountStage::kStageType = "COUNT";
-CountStage::CountStage(OperationContext* opCtx,
+CountStage::CountStage(ExpressionContext* expCtx,
Collection* collection,
long long limit,
long long skip,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType, opCtx), _limit(limit), _skip(skip), _leftToSkip(_skip), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _limit(limit), _skip(skip), _leftToSkip(_skip), _ws(ws) {
invariant(_skip >= 0);
invariant(_limit >= 0);
invariant(child);
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 3eb0683baf0..2eeaf5266e5 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -46,7 +46,7 @@ namespace mongo {
*/
class CountStage final : public PlanStage {
public:
- CountStage(OperationContext* opCtx,
+ CountStage(ExpressionContext* expCtx,
Collection* collection,
long long limit,
long long skip,
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index be7914299d9..9e9ec9ab490 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -73,8 +73,8 @@ const char* CountScan::kStageType = "COUNT_SCAN";
// When building the CountScan stage we take the keyPattern, index name, and multikey details from
// the CountScanParams rather than resolving them via the IndexDescriptor, since these may differ
// from the descriptor's contents.
-CountScan::CountScan(OperationContext* opCtx, CountScanParams params, WorkingSet* workingSet)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+CountScan::CountScan(ExpressionContext* expCtx, CountScanParams params, WorkingSet* workingSet)
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(std::move(params.keyPattern)),
_shouldDedup(params.isMultiKey),
@@ -112,7 +112,7 @@ PlanStage::StageState CountScan::doWork(WorkingSetID* out) {
if (needInit) {
// First call to work(). Perform cursor init.
- _cursor = indexAccessMethod()->newCursor(getOpCtx());
+ _cursor = indexAccessMethod()->newCursor(opCtx());
_cursor->setEndPosition(_endKey, _endKeyInclusive);
auto keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(
@@ -174,7 +174,7 @@ void CountScan::doDetachFromOperationContext() {
void CountScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> CountScan::getStats() {
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index 5ebe00b0a7a..c8105c713ff 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -87,7 +87,7 @@ struct CountScanParams {
*/
class CountScan final : public RequiresIndexStage {
public:
- CountScan(OperationContext* opCtx, CountScanParams params, WorkingSet* workingSet);
+ CountScan(ExpressionContext* expCtx, CountScanParams params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 4e5678defcc..4d9504a5b16 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -71,12 +71,12 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams* params) {
// static
const char* DeleteStage::kStageType = "DELETE";
-DeleteStage::DeleteStage(OperationContext* opCtx,
+DeleteStage::DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : RequiresMutableCollectionStage(kStageType, opCtx, collection),
+ : RequiresMutableCollectionStage(kStageType, expCtx, collection),
_params(std::move(params)),
_ws(ws),
_idRetrying(WorkingSet::INVALID_ID),
@@ -161,7 +161,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
bool docStillMatches;
try {
docStillMatches = write_stage_common::ensureStillMatches(
- collection(), getOpCtx(), _ws, id, _params->canonicalQuery);
+ collection(), opCtx(), _ws, id, _params->canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
memberFreer.dismiss();
@@ -201,8 +201,8 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// Do the write, unless this is an explain.
if (!_params->isExplain) {
try {
- WriteUnitOfWork wunit(getOpCtx());
- collection()->deleteDocument(getOpCtx(),
+ WriteUnitOfWork wunit(opCtx());
+ collection()->deleteDocument(opCtx(),
_params->stmtId,
recordId,
_params->opDebug,
@@ -261,8 +261,8 @@ void DeleteStage::doRestoreStateRequiresCollection() {
const NamespaceString& ns = collection()->ns();
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Demoted from primary while removing from " << ns.ns(),
- !getOpCtx()->writesAreReplicated() ||
- repl::ReplicationCoordinator::get(getOpCtx())->canAcceptWritesFor(getOpCtx(), ns));
+ !opCtx()->writesAreReplicated() ||
+ repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), ns));
}
unique_ptr<PlanStageStats> DeleteStage::getStats() {
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 139239d9de0..a0f9f056d92 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -99,7 +99,7 @@ class DeleteStage final : public RequiresMutableCollectionStage {
DeleteStage& operator=(const DeleteStage&) = delete;
public:
- DeleteStage(OperationContext* opCtx,
+ DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
Collection* collection,
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 860e6323a40..6df30d00432 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -46,8 +46,8 @@ using std::vector;
// static
const char* DistinctScan::kStageType = "DISTINCT_SCAN";
-DistinctScan::DistinctScan(OperationContext* opCtx, DistinctParams params, WorkingSet* workingSet)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+DistinctScan::DistinctScan(ExpressionContext* expCtx, DistinctParams params, WorkingSet* workingSet)
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(std::move(params.keyPattern)),
_scanDirection(params.scanDirection),
@@ -78,7 +78,7 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) {
boost::optional<IndexKeyEntry> kv;
try {
if (!_cursor)
- _cursor = indexAccessMethod()->newCursor(getOpCtx(), _scanDirection == 1);
+ _cursor = indexAccessMethod()->newCursor(opCtx(), _scanDirection == 1);
kv = _cursor->seek(IndexEntryComparison::makeKeyStringFromSeekPointForSeek(
_seekPoint,
indexAccessMethod()->getSortedDataInterface()->getKeyStringVersion(),
@@ -125,7 +125,7 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) {
member->keyData.push_back(IndexKeyDatum(_keyPattern,
kv->key,
workingSetIndexId(),
- getOpCtx()->recoveryUnit()->getSnapshotId()));
+ opCtx()->recoveryUnit()->getSnapshotId()));
_workingSet->transitionToRecordIdAndIdx(id);
*out = id;
@@ -156,7 +156,7 @@ void DistinctScan::doDetachFromOperationContext() {
void DistinctScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> DistinctScan::getStats() {
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 4596d92d378..e9c5c5c3852 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -95,7 +95,7 @@ struct DistinctParams {
*/
class DistinctScan final : public RequiresIndexStage {
public:
- DistinctScan(OperationContext* opCtx, DistinctParams params, WorkingSet* workingSet);
+ DistinctScan(ExpressionContext* expCtx, DistinctParams params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/ensure_sorted.cpp b/src/mongo/db/exec/ensure_sorted.cpp
index aaec72bf377..f713a2eed3a 100644
--- a/src/mongo/db/exec/ensure_sorted.cpp
+++ b/src/mongo/db/exec/ensure_sorted.cpp
@@ -42,11 +42,11 @@ using std::unique_ptr;
const char* EnsureSortedStage::kStageType = "ENSURE_SORTED";
-EnsureSortedStage::EnsureSortedStage(OperationContext* opCtx,
+EnsureSortedStage::EnsureSortedStage(ExpressionContext* expCtx,
BSONObj pattern,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _sortKeyComparator(pattern) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _sortKeyComparator(pattern) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/ensure_sorted.h b/src/mongo/db/exec/ensure_sorted.h
index 423647eeacf..9d917b93744 100644
--- a/src/mongo/db/exec/ensure_sorted.h
+++ b/src/mongo/db/exec/ensure_sorted.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class EnsureSortedStage final : public PlanStage {
public:
- EnsureSortedStage(OperationContext* opCtx,
+ EnsureSortedStage(ExpressionContext* expCtx,
BSONObj pattern,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index acd290f0f1a..25c7e44ac92 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -43,7 +43,7 @@ using std::vector;
// static
const char* EOFStage::kStageType = "EOF";
-EOFStage::EOFStage(OperationContext* opCtx) : PlanStage(kStageType, opCtx) {}
+EOFStage::EOFStage(ExpressionContext* expCtx) : PlanStage(kStageType, expCtx) {}
EOFStage::~EOFStage() {}
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index 630bee72f9d..e60d6b4319b 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -39,7 +39,7 @@ namespace mongo {
*/
class EOFStage final : public PlanStage {
public:
- EOFStage(OperationContext* opCtx);
+ EOFStage(ExpressionContext* expCtx);
~EOFStage();
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index ce3e60f122b..914158d3191 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -49,12 +49,12 @@ using std::vector;
// static
const char* FetchStage::kStageType = "FETCH";
-FetchStage::FetchStage(OperationContext* opCtx,
+FetchStage::FetchStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
const Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_ws(ws),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
_idRetrying(WorkingSet::INVALID_ID) {
@@ -101,9 +101,9 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
try {
if (!_cursor)
- _cursor = collection()->getCursor(getOpCtx());
+ _cursor = collection()->getCursor(opCtx());
- if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, _cursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _ws, id, _cursor, collection()->ns())) {
_ws->free(id);
return NEED_TIME;
}
@@ -151,7 +151,7 @@ void FetchStage::doDetachFromOperationContext() {
void FetchStage::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 074bd63b4d2..10dddc50a4f 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -50,7 +50,7 @@ class SeekableRecordCursor;
*/
class FetchStage : public RequiresCollectionStage {
public:
- FetchStage(OperationContext* opCtx,
+ FetchStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 6e2b86df096..295b1d1e875 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -287,12 +287,12 @@ GeoNear2DStage::DensityEstimator::DensityEstimator(PlanStage::Children* children
}
// Initialize the internal states
-void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
+void GeoNear2DStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex) {
// Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
// This is handled in query planning.
- IndexScanParams scanParams(opCtx, twoDIndex);
+ IndexScanParams scanParams(expCtx->opCtx, twoDIndex);
scanParams.bounds = _nearParams->baseBounds;
// The "2d" field is always the first in the index
@@ -323,20 +323,20 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]);
invariant(!_indexScan);
- _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
+ _indexScan = new IndexScan(expCtx, scanParams, workingSet, nullptr);
_children->emplace_back(_indexScan);
}
// Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
// from the nearest document.
-PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* opCtx,
+PlanStage::StageState GeoNear2DStage::DensityEstimator::work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(opCtx, workingSet, twoDIndex);
+ buildIndexScan(expCtx, workingSet, twoDIndex);
}
WorkingSetID workingSetID;
@@ -414,7 +414,7 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(opCtx, workingSet, indexDescriptor(), out, &estimatedDistance);
+ _densityEstimator->work(expCtx(), workingSet, indexDescriptor(), out, &estimatedDistance);
if (state == PlanStage::IS_EOF) {
// 2d index only works with legacy points as centroid. $nearSphere will project
@@ -450,10 +450,10 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx,
static const string kTwoDIndexNearStage("GEO_NEAR_2D");
GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex)
- : NearStage(opCtx, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, twoDIndex),
+ : NearStage(expCtx, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, twoDIndex),
_nearParams(nearParams),
_fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
@@ -524,12 +524,12 @@ private:
// Helper class to maintain ownership of a match expression alongside an index scan
class FetchStageWithMatch final : public FetchStage {
public:
- FetchStageWithMatch(OperationContext* opCtx,
+ FetchStageWithMatch(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
MatchExpression* filter,
const Collection* collection)
- : FetchStage(opCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
+ : FetchStage(expCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
private:
// Owns matcher
@@ -702,7 +702,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
.transitional_ignore();
// 2D indexes support covered search over additional fields they contain
- auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, _nearParams.filter);
+ auto scan = std::make_unique<IndexScan>(expCtx(), scanParams, workingSet, _nearParams.filter);
MatchExpression* docMatcher = nullptr;
@@ -714,7 +714,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
// FetchStage owns index scan
_children.emplace_back(std::make_unique<FetchStageWithMatch>(
- opCtx, workingSet, std::move(scan), docMatcher, collection));
+ expCtx(), workingSet, std::move(scan), docMatcher, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
@@ -748,10 +748,10 @@ static int getFieldPosition(const IndexDescriptor* index, const string& fieldNam
static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index)
- : NearStage(opCtx, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, s2Index),
+ : NearStage(expCtx, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, s2Index),
_nearParams(nearParams),
_fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
@@ -827,10 +827,10 @@ GeoNear2DSphereStage::DensityEstimator::DensityEstimator(PlanStage::Children* ch
}
// Setup the index scan stage for neighbors at this level.
-void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
+void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index) {
- IndexScanParams scanParams(opCtx, s2Index);
+ IndexScanParams scanParams(expCtx->opCtx, s2Index);
scanParams.bounds = _nearParams->baseBounds;
// Because the planner doesn't yet set up 2D index bounds, do it ourselves here
@@ -853,18 +853,18 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* op
// Index scan
invariant(!_indexScan);
- _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
+ _indexScan = new IndexScan(expCtx, scanParams, workingSet, nullptr);
_children->emplace_back(_indexScan);
}
-PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* opCtx,
+PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(opCtx, workingSet, s2Index);
+ buildIndexScan(expCtx, workingSet, s2Index);
}
WorkingSetID workingSetID;
@@ -945,7 +945,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(opCtx, workingSet, indexDescriptor(), out, &estimatedDistance);
+ _densityEstimator->work(expCtx(), workingSet, indexDescriptor(), out, &estimatedDistance);
if (state == IS_EOF) {
// We find a document in 4 neighbors at current level, but didn't at previous level.
@@ -1033,11 +1033,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals);
- auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, nullptr);
+ auto scan = std::make_unique<IndexScan>(expCtx(), scanParams, workingSet, nullptr);
// FetchStage owns index scan
_children.emplace_back(std::make_unique<FetchStage>(
- opCtx, workingSet, std::move(scan), _nearParams.filter, collection));
+ expCtx(), workingSet, std::move(scan), _nearParams.filter, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index ce23ccd6e38..eb096064d53 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -69,7 +69,7 @@ struct GeoNearParams {
class GeoNear2DStage final : public NearStage {
public:
GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex);
@@ -92,14 +92,14 @@ private:
const GeoNearParams* nearParams,
const R2Annulus& fullBounds);
- PlanStage::StageState work(OperationContext* opCtx,
+ PlanStage::StageState work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* opCtx,
+ void buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex);
@@ -135,7 +135,7 @@ private:
class GeoNear2DSphereStage final : public NearStage {
public:
GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index);
@@ -163,14 +163,14 @@ private:
// Search for a document in neighbors at current level.
// Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
- PlanStage::StageState work(OperationContext* opCtx,
+ PlanStage::StageState work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* opCtx,
+ void buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index);
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index c3378824a58..d8b9400daba 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -49,22 +49,22 @@ using std::vector;
// static
const char* IDHackStage::kStageType = "IDHACK";
-IDHackStage::IDHackStage(OperationContext* opCtx,
+IDHackStage::IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : RequiresIndexStage(kStageType, opCtx, descriptor, ws),
+ : RequiresIndexStage(kStageType, expCtx, descriptor, ws),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()) {
_specificStats.indexName = descriptor->indexName();
_addKeyMetadata = query->getQueryRequest().returnKey();
}
-IDHackStage::IDHackStage(OperationContext* opCtx,
+IDHackStage::IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : RequiresIndexStage(kStageType, opCtx, descriptor, ws), _workingSet(ws), _key(key) {
+ : RequiresIndexStage(kStageType, expCtx, descriptor, ws), _workingSet(ws), _key(key) {
_specificStats.indexName = descriptor->indexName();
}
@@ -82,7 +82,7 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
WorkingSetID id = WorkingSet::INVALID_ID;
try {
// Look up the key by going directly to the index.
- RecordId recordId = indexAccessMethod()->findSingle(getOpCtx(), _key);
+ RecordId recordId = indexAccessMethod()->findSingle(opCtx(), _key);
// Key not found.
if (recordId.isNull()) {
@@ -100,11 +100,10 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
_workingSet->transitionToRecordIdAndIdx(id);
if (!_recordCursor)
- _recordCursor = collection()->getCursor(getOpCtx());
+ _recordCursor = collection()->getCursor(opCtx());
// Find the document associated with 'id' in the collection's record store.
- if (!WorkingSetCommon::fetch(
- getOpCtx(), _workingSet, id, _recordCursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _workingSet, id, _recordCursor, collection()->ns())) {
// We didn't find a document with RecordId 'id'.
_workingSet->free(id);
_commonStats.isEOF = true;
@@ -156,7 +155,7 @@ void IDHackStage::doDetachFromOperationContext() {
void IDHackStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(getOpCtx());
+ _recordCursor->reattachToOperationContext(opCtx());
}
// static
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 5a56f67b328..5c2ac9f894a 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -48,12 +48,12 @@ class RecordCursor;
class IDHackStage final : public RequiresIndexStage {
public:
/** Takes ownership of all the arguments -collection. */
- IDHackStage(OperationContext* opCtx,
+ IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor);
- IDHackStage(OperationContext* opCtx,
+ IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
const IndexDescriptor* descriptor);
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index f50d6d15fca..dc01957ef8b 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -59,11 +59,11 @@ namespace mongo {
// static
const char* IndexScan::kStageType = "IXSCAN";
-IndexScan::IndexScan(OperationContext* opCtx,
+IndexScan::IndexScan(ExpressionContext* expCtx,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(params.keyPattern.getOwned()),
_bounds(std::move(params.bounds)),
@@ -89,7 +89,7 @@ IndexScan::IndexScan(OperationContext* opCtx,
boost::optional<IndexKeyEntry> IndexScan::initIndexScan() {
// Perform the possibly heavy-duty initialization of the underlying index cursor.
- _indexCursor = indexAccessMethod()->newCursor(getOpCtx(), _forward);
+ _indexCursor = indexAccessMethod()->newCursor(opCtx(), _forward);
// We always seek once to establish the cursor position.
++_specificStats.seeks;
@@ -231,7 +231,7 @@ PlanStage::StageState IndexScan::doWork(WorkingSetID* out) {
WorkingSetMember* member = _workingSet->get(id);
member->recordId = kv->loc;
member->keyData.push_back(IndexKeyDatum(
- _keyPattern, kv->key, workingSetIndexId(), getOpCtx()->recoveryUnit()->getSnapshotId()));
+ _keyPattern, kv->key, workingSetIndexId(), opCtx()->recoveryUnit()->getSnapshotId()));
_workingSet->transitionToRecordIdAndIdx(id);
if (_addKeyMetadata) {
@@ -270,7 +270,7 @@ void IndexScan::doDetachFromOperationContext() {
void IndexScan::doReattachToOperationContext() {
if (_indexCursor)
- _indexCursor->reattachToOperationContext(getOpCtx());
+ _indexCursor->reattachToOperationContext(opCtx());
}
std::unique_ptr<PlanStageStats> IndexScan::getStats() {
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 5ee7d807282..d36f99d9f9b 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -108,7 +108,7 @@ public:
HIT_END
};
- IndexScan(OperationContext* opCtx,
+ IndexScan(ExpressionContext* expCtx,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index e800d614039..41505be622f 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -43,11 +43,11 @@ using std::vector;
// static
const char* LimitStage::kStageType = "LIMIT";
-LimitStage::LimitStage(OperationContext* opCtx,
+LimitStage::LimitStage(ExpressionContext* expCtx,
long long limit,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _numToReturn(limit) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _numToReturn(limit) {
_specificStats.limit = _numToReturn;
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index f807838b540..ffc2f6a509c 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -45,7 +45,7 @@ namespace mongo {
*/
class LimitStage final : public PlanStage {
public:
- LimitStage(OperationContext* opCtx,
+ LimitStage(ExpressionContext* expCtx,
long long limit,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index cc7d40b073e..58a3e33f241 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -47,10 +47,10 @@ using std::vector;
// static
const char* MergeSortStage::kStageType = "SORT_MERGE";
-MergeSortStage::MergeSortStage(OperationContext* opCtx,
+MergeSortStage::MergeSortStage(ExpressionContext* expCtx,
const MergeSortStageParams& params,
WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_pattern(params.pattern),
_collator(params.collator),
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 714f6e0c68a..efb2833b8fb 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -57,7 +57,7 @@ class MergeSortStageParams;
*/
class MergeSortStage final : public PlanStage {
public:
- MergeSortStage(OperationContext* opCtx, const MergeSortStageParams& params, WorkingSet* ws);
+ MergeSortStage(ExpressionContext* expCtx, const MergeSortStageParams& params, WorkingSet* ws);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 761d6b21ed0..8a33f98386e 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -43,10 +43,10 @@ using std::vector;
const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
-MultiIteratorStage::MultiIteratorStage(OperationContext* opCtx,
+MultiIteratorStage::MultiIteratorStage(ExpressionContext* expCtx,
WorkingSet* ws,
Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection), _ws(ws) {}
+ : RequiresCollectionStage(kStageType, expCtx, collection), _ws(ws) {}
void MultiIteratorStage::addIterator(unique_ptr<RecordCursor> it) {
_iterators.push_back(std::move(it));
@@ -74,8 +74,7 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->recordId = record->id;
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- record->data.releaseToBson());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson());
_ws->transitionToRecordIdAndObj(*out);
return PlanStage::ADVANCED;
}
@@ -105,7 +104,7 @@ void MultiIteratorStage::doDetachFromOperationContext() {
void MultiIteratorStage::doReattachToOperationContext() {
for (auto&& iterator : _iterators) {
- iterator->reattachToOperationContext(getOpCtx());
+ iterator->reattachToOperationContext(opCtx());
}
}
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 6abfa760e7a..accc0e4b1a6 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class MultiIteratorStage final : public RequiresCollectionStage {
public:
- MultiIteratorStage(OperationContext* opCtx, WorkingSet* ws, Collection* collection);
+ MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, Collection* collection);
void addIterator(std::unique_ptr<RecordCursor> it);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 96d691fdbb7..4fa9b8766a2 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -61,11 +61,11 @@ using std::vector;
// static
const char* MultiPlanStage::kStageType = "MULTI_PLAN";
-MultiPlanStage::MultiPlanStage(OperationContext* opCtx,
+MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_cachingMode(cachingMode),
_query(cq),
_bestPlanIdx(kNoSuchPlan),
@@ -203,7 +203,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// make sense.
ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
- size_t numWorks = getTrialPeriodWorks(getOpCtx(), collection());
+ size_t numWorks = getTrialPeriodWorks(opCtx(), collection());
size_t numResults = getTrialPeriodNumToReturn(*_query);
try {
@@ -365,7 +365,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
->set(*_query,
solutions,
std::move(ranking),
- getOpCtx()->getServiceContext()->getPreciseClockSource()->now())
+ opCtx()->getServiceContext()->getPreciseClockSource()->now())
.transitional_ignore();
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index f70b2800e63..1c33885d245 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -76,7 +76,7 @@ public:
* If 'shouldCache' is true, writes a cache entry for the winning plan to the plan cache
* when possible. If 'shouldCache' is false, the plan cache will never be written.
*/
- MultiPlanStage(OperationContext* opCtx,
+ MultiPlanStage(ExpressionContext* expCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode = CachingMode::AlwaysCache);
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index ac16083b0a5..30bbb894881 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -42,12 +42,12 @@ namespace mongo {
using std::unique_ptr;
using std::vector;
-NearStage::NearStage(OperationContext* opCtx,
+NearStage::NearStage(ExpressionContext* expCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
const IndexDescriptor* indexDescriptor)
- : RequiresIndexStage(typeName, opCtx, indexDescriptor, workingSet),
+ : RequiresIndexStage(typeName, expCtx, indexDescriptor, workingSet),
_workingSet(workingSet),
_searchState(SearchState_Initializing),
_nextIntervalStats(nullptr),
@@ -67,7 +67,7 @@ NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
- PlanStage::StageState state = initialize(getOpCtx(), _workingSet, out);
+ PlanStage::StageState state = initialize(opCtx(), _workingSet, out);
if (state == PlanStage::IS_EOF) {
_searchState = SearchState_Buffering;
return PlanStage::NEED_TIME;
@@ -139,7 +139,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
if (!_nextInterval) {
StatusWith<CoveredInterval*> intervalStatus =
- nextInterval(getOpCtx(), _workingSet, collection());
+ nextInterval(opCtx(), _workingSet, collection());
if (!intervalStatus.isOK()) {
_searchState = SearchState_Finished;
*error = intervalStatus.getStatus();
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 94d9639b312..8f55c777494 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -104,7 +104,7 @@ protected:
/**
* Subclasses of NearStage must provide basics + a stats object which gets owned here.
*/
- NearStage(OperationContext* opCtx,
+ NearStage(ExpressionContext* expCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 3800536b62c..c50a4981e94 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -44,8 +44,11 @@ using std::vector;
// static
const char* OrStage::kStageType = "OR";
-OrStage::OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : PlanStage(kStageType, opCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
+OrStage::OrStage(ExpressionContext* expCtx,
+ WorkingSet* ws,
+ bool dedup,
+ const MatchExpression* filter)
+ : PlanStage(kStageType, expCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
void OrStage::addChild(std::unique_ptr<PlanStage> child) {
_children.emplace_back(std::move(child));
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 8d2c043ee46..e4ddcbcb2c0 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class OrStage final : public PlanStage {
public:
- OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
+ OrStage(ExpressionContext* expCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index c0c1f2c23d2..bdfaaa746b8 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -46,16 +46,16 @@ using std::vector;
const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
-PipelineProxyStage::PipelineProxyStage(OperationContext* opCtx,
+PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws)
- : PipelineProxyStage(opCtx, std::move(pipeline), ws, kStageType) {}
+ : PipelineProxyStage(expCtx, std::move(pipeline), ws, kStageType) {}
-PipelineProxyStage::PipelineProxyStage(OperationContext* opCtx,
+PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws,
const char* stageTypeName)
- : PlanStage(stageTypeName, opCtx),
+ : PlanStage(stageTypeName, expCtx),
_pipeline(std::move(pipeline)),
_includeMetaData(_pipeline->getContext()->needsMerge), // send metadata to merger
_ws(ws) {
@@ -112,11 +112,11 @@ void PipelineProxyStage::doDetachFromOperationContext() {
}
void PipelineProxyStage::doReattachToOperationContext() {
- _pipeline->reattachToOperationContext(getOpCtx());
+ _pipeline->reattachToOperationContext(opCtx());
}
void PipelineProxyStage::doDispose() {
- _pipeline->dispose(getOpCtx());
+ _pipeline->dispose(opCtx());
}
unique_ptr<PlanStageStats> PipelineProxyStage::getStats() {
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 0bdbbd69a9f..99f8583f3c0 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -46,7 +46,7 @@ namespace mongo {
*/
class PipelineProxyStage : public PlanStage {
public:
- PipelineProxyStage(OperationContext* opCtx,
+ PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws);
@@ -86,7 +86,7 @@ public:
static const char* kStageType;
protected:
- PipelineProxyStage(OperationContext* opCtx,
+ PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws,
const char* stageTypeName);
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index a24e6af266a..3a9475a5ca4 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -34,6 +34,7 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/pipeline/expression_context.h"
namespace mongo {
@@ -105,16 +106,18 @@ class RecordId;
*/
class PlanStage {
public:
- PlanStage(const char* typeName, OperationContext* opCtx)
- : _commonStats(typeName), _opCtx(opCtx) {}
+ PlanStage(const char* typeName, ExpressionContext* expCtx)
+ : _commonStats(typeName), _opCtx(expCtx->opCtx), _expCtx(expCtx) {
+ invariant(expCtx);
+ }
protected:
/**
* Obtain a PlanStage given a child stage. Called during the construction of derived
* PlanStage types with a single direct descendant.
*/
- PlanStage(OperationContext* opCtx, std::unique_ptr<PlanStage> child, const char* typeName)
- : PlanStage(typeName, opCtx) {
+ PlanStage(ExpressionContext* expCtx, std::unique_ptr<PlanStage> child, const char* typeName)
+ : PlanStage(typeName, expCtx) {
_children.push_back(std::move(child));
}
@@ -358,14 +361,14 @@ protected:
/**
* Does stage-specific detaching.
*
- * Implementations of this method cannot use the pointer returned from getOpCtx().
+ * Implementations of this method cannot use the pointer returned from opCtx().
*/
virtual void doDetachFromOperationContext() {}
/**
* Does stage-specific attaching.
*
- * If an OperationContext* is needed, use getOpCtx(), which will return a valid
+ * If an OperationContext* is needed, use opCtx(), which will return a valid
* OperationContext* (the one to which the stage is reattaching).
*/
virtual void doReattachToOperationContext() {}
@@ -377,15 +380,23 @@ protected:
ClockSource* getClock() const;
- OperationContext* getOpCtx() const {
+ OperationContext* opCtx() const {
return _opCtx;
}
+ ExpressionContext* expCtx() const {
+ return _expCtx;
+ }
+
Children _children;
CommonStats _commonStats;
private:
OperationContext* _opCtx;
+
+ // The PlanExecutor holds a strong reference to this which ensures that this pointer remains
+ // valid for the entire lifetime of the PlanStage.
+ ExpressionContext* _expCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 6051f738c84..ea1d1623a75 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -117,12 +117,12 @@ auto rehydrateIndexKey(const BSONObj& keyPattern, const BSONObj& dehydratedKey)
}
} // namespace
-ProjectionStage::ProjectionStage(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStage::ProjectionStage(ExpressionContext* expCtx,
const BSONObj& projObj,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const char* stageType)
- : PlanStage{expCtx->opCtx, std::move(child), stageType},
+ : PlanStage{expCtx, std::move(child), stageType},
_projObj{expCtx->explain ? boost::make_optional(projObj.getOwned()) : boost::none},
_ws{*ws} {}
@@ -178,7 +178,7 @@ ProjectionStageDefault::ProjectionStageDefault(boost::intrusive_ptr<ExpressionCo
const projection_ast::Projection* projection,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : ProjectionStage{expCtx, projObj, ws, std::move(child), "PROJECTION_DEFAULT"},
+ : ProjectionStage{expCtx.get(), projObj, ws, std::move(child), "PROJECTION_DEFAULT"},
_requestedMetadata{projection->metadataDeps()},
_projectType{projection->type()},
_executor{projection_executor::buildProjectionExecutor(
@@ -230,7 +230,7 @@ Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
return Status::OK();
}
-ProjectionStageCovered::ProjectionStageCovered(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
@@ -287,7 +287,7 @@ Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
return Status::OK();
}
-ProjectionStageSimple::ProjectionStageSimple(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index d84ba1168e0..5a5d525cc0a 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class ProjectionStage : public PlanStage {
protected:
- ProjectionStage(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStage(ExpressionContext* expCtx,
const BSONObj& projObj,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
@@ -117,7 +117,7 @@ public:
/**
* ProjectionNodeCovered should obtain a fast-path object through this constructor.
*/
- ProjectionStageCovered(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStageCovered(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
@@ -156,7 +156,7 @@ public:
/**
* ProjectionNodeSimple should obtain a fast-path object through this constructor.
*/
- ProjectionStageSimple(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStageSimple(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/projection_executor_builder_test.cpp b/src/mongo/db/exec/projection_executor_builder_test.cpp
index c5c4f1240bc..58543a23d54 100644
--- a/src/mongo/db/exec/projection_executor_builder_test.cpp
+++ b/src/mongo/db/exec/projection_executor_builder_test.cpp
@@ -231,8 +231,9 @@ TEST_F(ProjectionExecutorTestWithFallBackToDefault, CanProjectFindElemMatch) {
}
TEST_F(ProjectionExecutorTestWithFallBackToDefault, ElemMatchRespectsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- getExpCtx()->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ getExpCtx()->setCollator(std::move(collator));
auto proj = parseWithFindFeaturesEnabled(fromjson("{a: {$elemMatch: {$gte: 'abc'}}}"));
auto executor = createProjectionExecutor(proj);
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 2eee4232245..c5f6339dfaa 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -41,8 +41,8 @@ using std::vector;
const char* QueuedDataStage::kStageType = "QUEUED_DATA";
-QueuedDataStage::QueuedDataStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx), _ws(ws) {}
+QueuedDataStage::QueuedDataStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx), _ws(ws) {}
PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
if (isEOF()) {
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index abf91132bef..b952062803e 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -48,7 +48,7 @@ class RecordId;
*/
class QueuedDataStage final : public PlanStage {
public:
- QueuedDataStage(OperationContext* opCtx, WorkingSet* ws);
+ QueuedDataStage(ExpressionContext* expCtx, WorkingSet* ws);
StageState doWork(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 7e07bf0ff44..46ef8d371e2 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -37,6 +37,7 @@
#include <memory>
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/unittest/unittest.h"
@@ -48,6 +49,8 @@ namespace {
using std::unique_ptr;
+const static NamespaceString kNss("db.dummy");
+
class QueuedDataStageTest : public ServiceContextMongoDTest {
public:
QueuedDataStageTest() {
@@ -56,7 +59,7 @@ public:
}
protected:
- OperationContext* getOpCtx() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -69,7 +72,8 @@ private:
//
TEST_F(QueuedDataStageTest, getValidStats) {
WorkingSet ws;
- auto mock = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
+ auto mock = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
const CommonStats* commonStats = mock->getCommonStats();
ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
const SpecificStats* specificStats = mock->getSpecificStats();
@@ -84,7 +88,8 @@ TEST_F(QueuedDataStageTest, getValidStats) {
TEST_F(QueuedDataStageTest, validateStats) {
WorkingSet ws;
WorkingSetID wsID;
- auto mock = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
+ auto mock = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
// make sure that we're at all zero
const CommonStats* stats = mock->getCommonStats();
diff --git a/src/mongo/db/exec/record_store_fast_count.cpp b/src/mongo/db/exec/record_store_fast_count.cpp
index 392a8e09033..e7142458cb3 100644
--- a/src/mongo/db/exec/record_store_fast_count.cpp
+++ b/src/mongo/db/exec/record_store_fast_count.cpp
@@ -35,11 +35,11 @@ namespace mongo {
const char* RecordStoreFastCountStage::kStageType = "RECORD_STORE_FAST_COUNT";
-RecordStoreFastCountStage::RecordStoreFastCountStage(OperationContext* opCtx,
+RecordStoreFastCountStage::RecordStoreFastCountStage(ExpressionContext* expCtx,
Collection* collection,
long long skip,
long long limit)
- : RequiresCollectionStage(kStageType, opCtx, collection), _skip(skip), _limit(limit) {
+ : RequiresCollectionStage(kStageType, expCtx, collection), _skip(skip), _limit(limit) {
invariant(_skip >= 0);
invariant(_limit >= 0);
}
@@ -54,7 +54,7 @@ PlanStage::StageState RecordStoreFastCountStage::doWork(WorkingSetID* out) {
// This stage never returns a working set member.
*out = WorkingSet::INVALID_ID;
- long long nCounted = collection()->numRecords(getOpCtx());
+ long long nCounted = collection()->numRecords(opCtx());
if (_skip) {
nCounted -= _skip;
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index 973165969be..1986b333bb1 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -42,7 +42,7 @@ class RecordStoreFastCountStage final : public RequiresCollectionStage {
public:
static const char* kStageType;
- RecordStoreFastCountStage(OperationContext* opCtx,
+ RecordStoreFastCountStage(ExpressionContext* expCtx,
Collection* collection,
long long skip,
long long limit);
diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h
index 68516960f3f..15beac52ebf 100644
--- a/src/mongo/db/exec/requires_all_indices_stage.h
+++ b/src/mongo/db/exec/requires_all_indices_stage.h
@@ -41,8 +41,10 @@ namespace mongo {
*/
class RequiresAllIndicesStage : public RequiresCollectionStage {
public:
- RequiresAllIndicesStage(const char* stageType, OperationContext* opCtx, const Collection* coll)
- : RequiresCollectionStage(stageType, opCtx, coll) {
+ RequiresAllIndicesStage(const char* stageType,
+ ExpressionContext* expCtx,
+ const Collection* coll)
+ : RequiresCollectionStage(stageType, expCtx, coll) {
auto allEntriesShared = coll->getIndexCatalog()->getAllReadyEntriesShared();
_indexCatalogEntries.reserve(allEntriesShared.size());
_indexNames.reserve(allEntriesShared.size());
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 31ea9a4fa45..0d120e413cc 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -49,10 +49,10 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// We should be holding a lock associated with the name of the collection prior to yielding,
// even if the collection was renamed during yield.
- dassert(getOpCtx()->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
+ dassert(opCtx()->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
- const CollectionCatalog& catalog = CollectionCatalog::get(getOpCtx());
- auto newNss = catalog.lookupNSSByUUID(getOpCtx(), _collectionUUID);
+ const CollectionCatalog& catalog = CollectionCatalog::get(opCtx());
+ auto newNss = catalog.lookupNSSByUUID(opCtx(), _collectionUUID);
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection dropped. UUID " << _collectionUUID,
newNss);
@@ -68,7 +68,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// restored locks on the correct name. It is now safe to restore the Collection pointer. The
// collection must exist, since we already successfully looked up the namespace string by UUID
// under the correct lock manager locks.
- _collection = catalog.lookupCollectionByUUID(getOpCtx(), _collectionUUID);
+ _collection = catalog.lookupCollectionByUUID(opCtx(), _collectionUUID);
invariant(_collection);
uassert(ErrorCodes::QueryPlanKilled,
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index 896d9eb5181..3b0520281e1 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -55,8 +55,8 @@ namespace mongo {
template <typename CollectionT>
class RequiresCollectionStageBase : public PlanStage {
public:
- RequiresCollectionStageBase(const char* stageType, OperationContext* opCtx, CollectionT coll)
- : PlanStage(stageType, opCtx),
+ RequiresCollectionStageBase(const char* stageType, ExpressionContext* expCtx, CollectionT coll)
+ : PlanStage(stageType, expCtx),
_collection(coll),
_collectionUUID(_collection->uuid()),
_databaseEpoch(getDatabaseEpoch(_collection)),
@@ -94,8 +94,8 @@ private:
// collection pointer 'coll' must be non-null and must point to a valid collection.
uint64_t getDatabaseEpoch(CollectionT coll) const {
invariant(coll);
- auto databaseHolder = DatabaseHolder::get(getOpCtx());
- auto db = databaseHolder->getDb(getOpCtx(), coll->ns().ns());
+ auto databaseHolder = DatabaseHolder::get(opCtx());
+ auto db = databaseHolder->getDb(opCtx(), coll->ns().ns());
invariant(db);
return db->epoch();
}
diff --git a/src/mongo/db/exec/requires_index_stage.cpp b/src/mongo/db/exec/requires_index_stage.cpp
index 8e3e2382c9d..1073330fcf0 100644
--- a/src/mongo/db/exec/requires_index_stage.cpp
+++ b/src/mongo/db/exec/requires_index_stage.cpp
@@ -34,10 +34,10 @@
namespace mongo {
RequiresIndexStage::RequiresIndexStage(const char* stageType,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet)
- : RequiresCollectionStage(stageType, opCtx, indexDescriptor->getCollection()),
+ : RequiresCollectionStage(stageType, expCtx, indexDescriptor->getCollection()),
_weakIndexCatalogEntry(collection()->getIndexCatalog()->getEntryShared(indexDescriptor)) {
auto indexCatalogEntry = _weakIndexCatalogEntry.lock();
_indexDescriptor = indexCatalogEntry->descriptor();
diff --git a/src/mongo/db/exec/requires_index_stage.h b/src/mongo/db/exec/requires_index_stage.h
index 81d5649970e..374dc5f0615 100644
--- a/src/mongo/db/exec/requires_index_stage.h
+++ b/src/mongo/db/exec/requires_index_stage.h
@@ -48,7 +48,7 @@ namespace mongo {
class RequiresIndexStage : public RequiresCollectionStage {
public:
RequiresIndexStage(const char* stageType,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet);
diff --git a/src/mongo/db/exec/return_key.h b/src/mongo/db/exec/return_key.h
index 687b949d182..c9981dced1c 100644
--- a/src/mongo/db/exec/return_key.h
+++ b/src/mongo/db/exec/return_key.h
@@ -46,12 +46,12 @@ class ReturnKeyStage : public PlanStage {
public:
static constexpr StringData kStageName = "RETURN_KEY"_sd;
- ReturnKeyStage(OperationContext* opCtx,
+ ReturnKeyStage(ExpressionContext* expCtx,
std::vector<FieldPath> sortKeyMetaFields,
WorkingSet* ws,
SortKeyFormat sortKeyFormat,
std::unique_ptr<PlanStage> child)
- : PlanStage(opCtx, std::move(child), kStageName.rawData()),
+ : PlanStage(expCtx, std::move(child), kStageName.rawData()),
_ws(*ws),
_sortKeyMetaFields(std::move(sortKeyMetaFields)),
_sortKeyFormat(sortKeyFormat) {}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 51c67dfe697..dad89927d60 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -50,11 +50,11 @@ using std::vector;
// static
const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
-ShardFilterStage::ShardFilterStage(OperationContext* opCtx,
+ShardFilterStage::ShardFilterStage(ExpressionContext* expCtx,
ScopedCollectionFilter collectionFilter,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _shardFilterer(std::move(collectionFilter)) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _shardFilterer(std::move(collectionFilter)) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index b902e3f54ee..5f0d8df7ea9 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -71,7 +71,7 @@ namespace mongo {
*/
class ShardFilterStage final : public PlanStage {
public:
- ShardFilterStage(OperationContext* opCtx,
+ ShardFilterStage(ExpressionContext* expCtx,
ScopedCollectionFilter collectionFilter,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index bc488c1b410..94bb81153e4 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -43,11 +43,11 @@ using std::vector;
// static
const char* SkipStage::kStageType = "SKIP";
-SkipStage::SkipStage(OperationContext* opCtx,
+SkipStage::SkipStage(ExpressionContext* expCtx,
long long toSkip,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _toSkip(toSkip) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _toSkip(toSkip) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 8751cb22471..24937662d02 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class SkipStage final : public PlanStage {
public:
- SkipStage(OperationContext* opCtx,
+ SkipStage(ExpressionContext* expCtx,
long long toSkip,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 2fa2e6dfe3e..6b03db5b26f 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -40,7 +40,7 @@ SortStage::SortStage(boost::intrusive_ptr<ExpressionContext> expCtx,
SortPattern sortPattern,
bool addSortKeyMetadata,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType.rawData(), expCtx->opCtx),
+ : PlanStage(kStageType.rawData(), expCtx.get()),
_ws(ws),
_sortKeyGen(sortPattern, expCtx->getCollator()),
_addSortKeyMetadata(addSortKeyMetadata) {
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 98e2774a747..d01d2fdcc63 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -48,13 +48,13 @@ namespace mongo {
const char* SortKeyGeneratorStage::kStageType = "SORT_KEY_GENERATOR";
-SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj)
- : PlanStage(kStageType, pExpCtx->opCtx),
+ : PlanStage(kStageType, expCtx.get()),
_ws(ws),
- _sortKeyGen({{sortSpecObj, pExpCtx}, pExpCtx->getCollator()}) {
+ _sortKeyGen({{sortSpecObj, expCtx}, expCtx->getCollator()}) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index 5732f2008f6..2679902dd2e 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -50,7 +50,7 @@ class WorkingSetMember;
*/
class SortKeyGeneratorStage final : public PlanStage {
public:
- SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+ SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj);
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 455b038dc3a..6c9bc2d9379 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -49,6 +49,8 @@ using namespace mongo;
namespace {
+static const NamespaceString kNss("db.dummy");
+
class SortStageDefaultTest : public ServiceContextMongoDTest {
public:
static constexpr uint64_t kMaxMemoryUsageBytes = 1024u * 1024u;
@@ -59,7 +61,7 @@ public:
CollatorFactoryInterface::set(getServiceContext(), std::make_unique<CollatorFactoryMock>());
}
- OperationContext* getOpCtx() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -82,8 +84,11 @@ public:
// so it's fine to declare
WorkingSet ws;
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx(), CollatorInterface::cloneCollator(collator), kNss);
+
// QueuedDataStage will be owned by SortStageDefault.
- auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj.getField("input");
ASSERT(inputElt.isABSONObj());
@@ -103,10 +108,6 @@ public:
auto sortPattern = fromjson(patternStr);
- // Create an ExpressionContext for the SortKeyGeneratorStage.
- auto expCtx =
- make_intrusive<ExpressionContext>(getOpCtx(), collator, NamespaceString("foo"));
-
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
expCtx, std::move(queuedDataStage), &ws, sortPattern);
@@ -168,11 +169,10 @@ private:
TEST_F(SortStageDefaultTest, SortEmptyWorkingSet) {
WorkingSet ws;
- // Create an ExpressionContext for the SortKeyGeneratorStage.
- auto expCtx = make_intrusive<ExpressionContext>(getOpCtx(), nullptr, NamespaceString("foo"));
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
// QueuedDataStage will be owned by SortStageDefault.
- auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
auto sortKeyGen =
std::make_unique<SortKeyGeneratorStage>(expCtx, std::move(queuedDataStage), &ws, BSONObj());
auto sortPattern = BSON("a" << 1);
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 34190125c51..7baabe4a011 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -148,6 +148,9 @@ public:
str::stream() << nss.toString() << " is not a valid namespace",
nss.isValid());
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
+
// Need a context to get the actual Collection*
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
@@ -172,12 +175,12 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
std::unique_ptr<PlanStage> userRoot{
- parseQuery(opCtx, collection, planObj, ws.get(), nss, &exprs)};
+ parseQuery(expCtx, collection, planObj, ws.get(), nss, &exprs)};
uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), nullptr != userRoot);
// Add a fetch at the top for the user so we can get obj back for sure.
- unique_ptr<PlanStage> rootFetch =
- std::make_unique<FetchStage>(opCtx, ws.get(), std::move(userRoot), nullptr, collection);
+ unique_ptr<PlanStage> rootFetch = std::make_unique<FetchStage>(
+ expCtx.get(), ws.get(), std::move(userRoot), nullptr, collection);
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
@@ -208,12 +211,14 @@ public:
return true;
}
- PlanStage* parseQuery(OperationContext* opCtx,
+ PlanStage* parseQuery(const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
BSONObj obj,
WorkingSet* workingSet,
const NamespaceString& nss,
std::vector<std::unique_ptr<MatchExpression>>* exprs) {
+ OperationContext* opCtx = expCtx->opCtx;
+
BSONElement firstElt = obj.firstElement();
if (!firstElt.isABSONObj()) {
return nullptr;
@@ -235,9 +240,6 @@ public:
}
BSONObj argObj = e.Obj();
if (filterTag == e.fieldName()) {
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, nss));
auto statusWithMatcher =
MatchExpressionParser::parse(argObj,
expCtx,
@@ -299,12 +301,12 @@ public:
params.direction = nodeArgs["direction"].numberInt();
params.shouldDedup = desc->isMultikey();
- return new IndexScan(opCtx, params, workingSet, matcher);
+ return new IndexScan(expCtx.get(), params, workingSet, matcher);
} else if ("andHash" == nodeName) {
uassert(
16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = std::make_unique<AndHashStage>(opCtx, workingSet);
+ auto andStage = std::make_unique<AndHashStage>(expCtx.get(), workingSet);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -313,7 +315,7 @@ public:
uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16923,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
@@ -328,7 +330,7 @@ public:
uassert(
16924, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = std::make_unique<AndSortedStage>(opCtx, workingSet);
+ auto andStage = std::make_unique<AndSortedStage>(expCtx.get(), workingSet);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -337,7 +339,7 @@ public:
uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16926,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
@@ -353,15 +355,15 @@ public:
16934, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
uassert(16935, "Dedup argument must be provided to OR", !nodeArgs["dedup"].eoo());
BSONObjIterator it(nodeArgs["nodes"].Obj());
- auto orStage =
- std::make_unique<OrStage>(opCtx, workingSet, nodeArgs["dedup"].Bool(), matcher);
+ auto orStage = std::make_unique<OrStage>(
+ expCtx.get(), workingSet, nodeArgs["dedup"].Bool(), matcher);
while (it.more()) {
BSONElement e = it.next();
if (!e.isABSONObj()) {
return nullptr;
}
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(
16936, "Can't parse sub-node of OR: " + e.Obj().toString(), nullptr != subNode);
orStage->addChild(std::move(subNode));
@@ -372,11 +374,12 @@ public:
uassert(
16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28731,
"Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
- return new FetchStage(opCtx, workingSet, std::move(subNode), matcher, collection);
+ return new FetchStage(
+ expCtx.get(), workingSet, std::move(subNode), matcher, collection);
} else if ("limit" == nodeName) {
uassert(16937,
"Limit stage doesn't have a filter (put it on the child)",
@@ -385,12 +388,12 @@ public:
16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj());
uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28732,
"Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
return new LimitStage(
- opCtx, nodeArgs["num"].numberInt(), workingSet, std ::move(subNode));
+ expCtx.get(), nodeArgs["num"].numberInt(), workingSet, std ::move(subNode));
} else if ("skip" == nodeName) {
uassert(16938,
"Skip stage doesn't have a filter (put it on the child)",
@@ -398,12 +401,12 @@ public:
uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj());
uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28733,
"Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
return new SkipStage(
- opCtx, nodeArgs["num"].numberInt(), workingSet, std::move(subNode));
+ expCtx.get(), nodeArgs["num"].numberInt(), workingSet, std::move(subNode));
} else if ("cscan" == nodeName) {
CollectionScanParams params;
@@ -417,7 +420,7 @@ public:
params.direction = CollectionScanParams::BACKWARD;
}
- return new CollectionScan(opCtx, collection, params, workingSet, matcher);
+ return new CollectionScan(expCtx.get(), collection, params, workingSet, matcher);
} else if ("mergeSort" == nodeName) {
uassert(
16971, "Nodes argument must be provided to sort", nodeArgs["nodes"].isABSONObj());
@@ -429,7 +432,7 @@ public:
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- auto mergeStage = std::make_unique<MergeSortStage>(opCtx, params, workingSet);
+ auto mergeStage = std::make_unique<MergeSortStage>(expCtx.get(), params, workingSet);
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
@@ -437,7 +440,7 @@ public:
uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16974,
"Can't parse sub-node of mergeSort: " + e.Obj().toString(),
nullptr != subNode);
@@ -475,7 +478,7 @@ public:
return nullptr;
}
- return new TextStage(opCtx, params, workingSet, matcher);
+ return new TextStage(expCtx.get(), params, workingSet, matcher);
} else if ("delete" == nodeName) {
uassert(18636,
"Delete stage doesn't have a filter (put it on the child)",
@@ -486,13 +489,14 @@ public:
"isMulti argument must be provided to delete",
nodeArgs["isMulti"].type() == Bool);
PlanStage* subNode =
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs);
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs);
uassert(28734,
"Can't parse sub-node of DELETE: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
auto params = std::make_unique<DeleteStageParams>();
params->isMulti = nodeArgs["isMulti"].Bool();
- return new DeleteStage(opCtx, std::move(params), workingSet, collection, subNode);
+ return new DeleteStage(
+ expCtx.get(), std::move(params), workingSet, collection, subNode);
} else {
return nullptr;
}
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 32c4cfa0bb8..054f48e3a2b 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -59,12 +59,12 @@ using std::vector;
const char* SubplanStage::kStageType = "SUBPLAN";
-SubplanStage::SubplanStage(OperationContext* opCtx,
+SubplanStage::SubplanStage(ExpressionContext* expCtx,
const Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : RequiresAllIndicesStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, expCtx, collection),
_ws(ws),
_plannerParams(params),
_query(cq) {
@@ -122,7 +122,7 @@ Status SubplanStage::planSubqueries() {
MatchExpression* orChild = _orExpression->getChild(i);
// Turn the i-th child into its own query.
- auto statusWithCQ = CanonicalQuery::canonicalize(getOpCtx(), *_query, orChild);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), *_query, orChild);
if (!statusWithCQ.isOK()) {
str::stream ss;
ss << "Can't canonicalize subchild " << orChild->debugString() << " "
@@ -263,7 +263,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// messages that can be generated if pickBestPlan yields.
invariant(_children.empty());
_children.emplace_back(
- std::make_unique<MultiPlanStage>(getOpCtx(),
+ std::make_unique<MultiPlanStage>(expCtx(),
collection(),
branchResult->canonicalQuery.get(),
MultiPlanStage::CachingMode::SometimesCache));
@@ -275,7 +275,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Dump all the solutions into the MPS.
for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
- auto nextPlanRoot = StageBuilder::build(getOpCtx(),
+ auto nextPlanRoot = StageBuilder::build(opCtx(),
collection(),
*branchResult->canonicalQuery,
*branchResult->solutions[ix],
@@ -362,8 +362,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Use the index tags from planning each branch to construct the composite solution,
// and set that solution as our child stage.
_ws->clear();
- auto root =
- StageBuilder::build(getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws);
+ auto root = StageBuilder::build(opCtx(), collection(), *_query, *_compositeSolution.get(), _ws);
invariant(_children.empty());
_children.emplace_back(std::move(root));
@@ -385,7 +384,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
if (1 == solutions.size()) {
// Only one possible plan. Run it. Build the stages from the solution.
- auto root = StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws);
+ auto root = StageBuilder::build(opCtx(), collection(), *_query, *solutions[0], _ws);
invariant(_children.empty());
_children.emplace_back(std::move(root));
@@ -398,7 +397,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
invariant(_children.empty());
- _children.emplace_back(new MultiPlanStage(getOpCtx(), collection(), _query));
+ _children.emplace_back(new MultiPlanStage(expCtx(), collection(), _query));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -407,7 +406,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
}
auto nextPlanRoot =
- StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[ix], _ws);
+ StageBuilder::build(opCtx(), collection(), *_query, *solutions[ix], _ws);
multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index f027d0412d3..07d8f956ca8 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -69,7 +69,7 @@ class OperationContext;
*/
class SubplanStage final : public RequiresAllIndicesStage {
public:
- SubplanStage(OperationContext* opCtx,
+ SubplanStage(ExpressionContext* expCtx,
const Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 9efb4915c61..7b7e4598e43 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -56,12 +56,12 @@ using fts::MAX_WEIGHT;
const char* TextStage::kStageType = "TEXT";
-TextStage::TextStage(OperationContext* opCtx,
+TextStage::TextStage(ExpressionContext* expCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : PlanStage(kStageType, opCtx), _params(params) {
- _children.emplace_back(buildTextTree(opCtx, ws, filter, params.wantTextScore));
+ : PlanStage(kStageType, expCtx), _params(params) {
+ _children.emplace_back(buildTextTree(expCtx->opCtx, ws, filter, params.wantTextScore));
_specificStats.indexPrefix = _params.indexPrefix;
_specificStats.indexName = _params.index->indexName();
_specificStats.parsedTextQuery = _params.query.toBSON();
@@ -112,7 +112,7 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
ixparams.direction = -1;
ixparams.shouldDedup = _params.index->isMultikey();
- indexScanList.push_back(std::make_unique<IndexScan>(opCtx, ixparams, ws, nullptr));
+ indexScanList.push_back(std::make_unique<IndexScan>(expCtx(), ixparams, ws, nullptr));
}
// Build the union of the index scans as a TEXT_OR or an OR stage, depending on whether the
@@ -122,16 +122,16 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
// We use a TEXT_OR stage to get the union of the results from the index scans and then
// compute their text scores. This is a blocking operation.
auto textScorer =
- std::make_unique<TextOrStage>(opCtx, _params.spec, ws, filter, collection);
+ std::make_unique<TextOrStage>(expCtx(), _params.spec, ws, filter, collection);
textScorer->addChildren(std::move(indexScanList));
textMatchStage = std::make_unique<TextMatchStage>(
- opCtx, std::move(textScorer), _params.query, _params.spec, ws);
+ expCtx(), std::move(textScorer), _params.query, _params.spec, ws);
} else {
// Because we don't need the text score, we can use a non-blocking OR stage to get the union
// of the index scans.
- auto textSearcher = std::make_unique<OrStage>(opCtx, ws, true, filter);
+ auto textSearcher = std::make_unique<OrStage>(expCtx(), ws, true, filter);
textSearcher->addChildren(std::move(indexScanList));
@@ -140,10 +140,10 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
// WorkingSetMember inputs have fetched data.
const MatchExpression* emptyFilter = nullptr;
auto fetchStage = std::make_unique<FetchStage>(
- opCtx, ws, std::move(textSearcher), emptyFilter, collection);
+ expCtx(), ws, std::move(textSearcher), emptyFilter, collection);
textMatchStage = std::make_unique<TextMatchStage>(
- opCtx, std::move(fetchStage), _params.query, _params.spec, ws);
+ expCtx(), std::move(fetchStage), _params.query, _params.spec, ws);
}
return textMatchStage;
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index e96fbd5b602..1e601d769de 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -73,7 +73,7 @@ struct TextStageParams {
*/
class TextStage final : public PlanStage {
public:
- TextStage(OperationContext* opCtx,
+ TextStage(ExpressionContext* expCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index b405608c134..c0d15c0e2fb 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -45,12 +45,12 @@ using std::vector;
const char* TextMatchStage::kStageType = "TEXT_MATCH";
-TextMatchStage::TextMatchStage(OperationContext* opCtx,
+TextMatchStage::TextMatchStage(ExpressionContext* expCtx,
unique_ptr<PlanStage> child,
const FTSQueryImpl& query,
const FTSSpec& spec,
WorkingSet* ws)
- : PlanStage(kStageType, opCtx), _ftsMatcher(query, spec), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _ftsMatcher(query, spec), _ws(ws) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/text_match.h b/src/mongo/db/exec/text_match.h
index 6155de1d48e..9ba74a5ac19 100644
--- a/src/mongo/db/exec/text_match.h
+++ b/src/mongo/db/exec/text_match.h
@@ -56,7 +56,7 @@ class RecordID;
*/
class TextMatchStage final : public PlanStage {
public:
- TextMatchStage(OperationContext* opCtx,
+ TextMatchStage(ExpressionContext* expCtx,
std::unique_ptr<PlanStage> child,
const FTSQueryImpl& query,
const FTSSpec& spec,
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 8202ad623c7..4f92025575a 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -52,12 +52,12 @@ using fts::FTSSpec;
const char* TextOrStage::kStageType = "TEXT_OR";
-TextOrStage::TextOrStage(OperationContext* opCtx,
+TextOrStage::TextOrStage(ExpressionContext* expCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_ftsSpec(ftsSpec),
_ws(ws),
_scoreIterator(_scores.end()),
@@ -97,7 +97,7 @@ void TextOrStage::doDetachFromOperationContext() {
void TextOrStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(getOpCtx());
+ _recordCursor->reattachToOperationContext(opCtx());
}
std::unique_ptr<PlanStageStats> TextOrStage::getStats() {
@@ -152,7 +152,7 @@ PlanStage::StageState TextOrStage::doWork(WorkingSetID* out) {
PlanStage::StageState TextOrStage::initStage(WorkingSetID* out) {
*out = WorkingSet::INVALID_ID;
try {
- _recordCursor = collection()->getCursor(getOpCtx());
+ _recordCursor = collection()->getCursor(opCtx());
_internalState = State::kReadingTerms;
return PlanStage::NEED_TIME;
} catch (const WriteConflictException&) {
@@ -268,8 +268,7 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out)
// Our parent expects RID_AND_OBJ members, so we fetch the document here if we haven't
// already.
try {
- if (!WorkingSetCommon::fetch(
- getOpCtx(), _ws, wsid, _recordCursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _ws, wsid, _recordCursor, collection()->ns())) {
_ws->free(wsid);
textRecordData->score = -1;
return NEED_TIME;
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 724ecba61a3..8b57b2f07e7 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -67,7 +67,7 @@ public:
kDone,
};
- TextOrStage(OperationContext* opCtx,
+ TextOrStage(ExpressionContext* expCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index a9e6c678000..908640e71ec 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -46,13 +46,13 @@ namespace mongo {
const char* TrialStage::kStageType = "TRIAL";
-TrialStage::TrialStage(OperationContext* opCtx,
+TrialStage::TrialStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> trialPlan,
std::unique_ptr<PlanStage> backupPlan,
size_t maxTrialWorks,
double minWorkAdvancedRatio)
- : PlanStage(kStageType, opCtx), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _ws(ws) {
invariant(minWorkAdvancedRatio > 0);
invariant(minWorkAdvancedRatio <= 1);
invariant(maxTrialWorks > 0);
@@ -64,7 +64,7 @@ TrialStage::TrialStage(OperationContext* opCtx,
_backupPlan = std::move(backupPlan);
// We need to cache results during the trial phase in case it succeeds.
- _queuedData = std::make_unique<QueuedDataStage>(opCtx, _ws);
+ _queuedData = std::make_unique<QueuedDataStage>(expCtx, _ws);
// Set up stats tracking specific to this stage.
_specificStats.successThreshold = minWorkAdvancedRatio;
@@ -175,8 +175,7 @@ void TrialStage::_assessTrialAndBuildFinalPlan() {
// The trial plan succeeded, but we need to build a plan that includes the queued data. Create a
// final plan which UNIONs across the QueuedDataStage and the trial plan.
- std::unique_ptr<PlanStage> unionPlan =
- std::make_unique<OrStage>(getOpCtx(), _ws, false, nullptr);
+ std::unique_ptr<PlanStage> unionPlan = std::make_unique<OrStage>(expCtx(), _ws, false, nullptr);
static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_queuedData));
static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_children.front()));
_replaceCurrentPlan(unionPlan);
@@ -208,19 +207,19 @@ void TrialStage::doDetachFromOperationContext() {
void TrialStage::doReattachToOperationContext() {
if (_backupPlan) {
- _backupPlan->reattachToOperationContext(getOpCtx());
+ _backupPlan->reattachToOperationContext(opCtx());
}
if (_queuedData) {
- _queuedData->reattachToOperationContext(getOpCtx());
+ _queuedData->reattachToOperationContext(opCtx());
}
}
void TrialStage::doDispose() {
if (_backupPlan) {
- _backupPlan->dispose(getOpCtx());
+ _backupPlan->dispose(opCtx());
}
if (_queuedData) {
- _queuedData->dispose(getOpCtx());
+ _queuedData->dispose(opCtx());
}
}
diff --git a/src/mongo/db/exec/trial_stage.h b/src/mongo/db/exec/trial_stage.h
index efa3c9c5fa5..9da1d04beab 100644
--- a/src/mongo/db/exec/trial_stage.h
+++ b/src/mongo/db/exec/trial_stage.h
@@ -58,7 +58,7 @@ public:
* Constructor. Both 'trialPlan' and 'backupPlan' must be non-nullptr; 'maxTrialEWorks' must be
* greater than 0, and 'minWorkAdvancedRatio' must be in the range (0,1].
*/
- TrialStage(OperationContext* opCtx,
+ TrialStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> trialPlan,
std::unique_ptr<PlanStage> backupPlan,
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 829ed4fd95f..6561aefea06 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -110,23 +110,23 @@ const char* UpdateStage::kStageType = "UPDATE";
const UpdateStats UpdateStage::kEmptyUpdateStats;
// Public constructor.
-UpdateStage::UpdateStage(OperationContext* opCtx,
+UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : UpdateStage(opCtx, params, ws, collection) {
+ : UpdateStage(expCtx, params, ws, collection) {
// We should never reach here if the request is an upsert.
invariant(!_params.request->isUpsert());
_children.emplace_back(child);
}
// Protected constructor.
-UpdateStage::UpdateStage(OperationContext* opCtx,
+UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection)
- : RequiresMutableCollectionStage(kStageType, opCtx, collection),
+ : RequiresMutableCollectionStage(kStageType, expCtx, collection),
_params(params),
_ws(ws),
_doc(params.driver->getDocument()),
@@ -147,7 +147,7 @@ UpdateStage::UpdateStage(OperationContext* opCtx,
_shouldCheckForShardKeyUpdate =
!(request->isFromOplogApplication() || request->getNamespaceString().isConfigDB() ||
request->isFromMigration()) &&
- OperationShardingState::isOperationVersioned(opCtx);
+ OperationShardingState::isOperationVersioned(expCtx->opCtx);
_specificStats.isModUpdate = params.driver->type() == UpdateDriver::UpdateType::kOperator;
}
@@ -175,14 +175,14 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
bool docWasModified = false;
- auto* const css = CollectionShardingState::get(getOpCtx(), collection()->ns());
+ auto* const css = CollectionShardingState::get(opCtx(), collection()->ns());
auto metadata = css->getCurrentMetadata();
Status status = Status::OK();
- const bool validateForStorage = getOpCtx()->writesAreReplicated() && _enforceOkForStorage;
+ const bool validateForStorage = opCtx()->writesAreReplicated() && _enforceOkForStorage;
const bool isInsert = false;
FieldRefSet immutablePaths;
- if (getOpCtx()->writesAreReplicated() && !request->isFromMigration()) {
- if (metadata->isSharded() && !OperationShardingState::isOperationVersioned(getOpCtx())) {
+ if (opCtx()->writesAreReplicated() && !request->isFromMigration()) {
+ if (metadata->isSharded() && !OperationShardingState::isOperationVersioned(opCtx())) {
immutablePaths.fillFrom(metadata->getKeyPatternFields());
}
immutablePaths.keepShortest(&idFieldRef);
@@ -278,10 +278,10 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
}
- WriteUnitOfWork wunit(getOpCtx());
+ WriteUnitOfWork wunit(opCtx());
StatusWith<RecordData> newRecStatus = collection()->updateDocumentWithDamages(
- getOpCtx(), recordId, std::move(snap), source, _damages, &args);
- invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId());
+ opCtx(), recordId, std::move(snap), source, _damages, &args);
+ invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId());
wunit.commit();
newObj = uassertStatusOK(std::move(newRecStatus)).releaseToBson();
@@ -306,15 +306,15 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
}
- WriteUnitOfWork wunit(getOpCtx());
- newRecordId = collection()->updateDocument(getOpCtx(),
+ WriteUnitOfWork wunit(opCtx());
+ newRecordId = collection()->updateDocument(opCtx(),
recordId,
oldObj,
newObj,
driver->modsAffectIndices(),
_params.opDebug,
&args);
- invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId());
+ invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId());
wunit.commit();
}
}
@@ -498,7 +498,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
bool docStillMatches;
try {
docStillMatches = write_stage_common::ensureStillMatches(
- collection(), getOpCtx(), _ws, id, _params.canonicalQuery);
+ collection(), opCtx(), _ws, id, _params.canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
memberFreer.dismiss();
@@ -544,8 +544,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
if (_params.request->shouldReturnNewDocs()) {
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
} else {
invariant(_params.request->shouldReturnOldDocs());
member->resetDocument(oldSnapshot, oldObj);
@@ -628,8 +627,8 @@ void UpdateStage::doRestoreStateRequiresCollection() {
const NamespaceString& nsString(request.getNamespaceString());
// We may have stepped down during the yield.
- bool userInitiatedWritesAndNotPrimary = getOpCtx()->writesAreReplicated() &&
- !repl::ReplicationCoordinator::get(getOpCtx())->canAcceptWritesFor(getOpCtx(), nsString);
+ bool userInitiatedWritesAndNotPrimary = opCtx()->writesAreReplicated() &&
+ !repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), nsString);
if (userInitiatedWritesAndNotPrimary) {
uasserted(ErrorCodes::PrimarySteppedDown,
@@ -639,7 +638,7 @@ void UpdateStage::doRestoreStateRequiresCollection() {
// The set of indices may have changed during yield. Make sure that the update driver has up to
// date index information.
- const auto& updateIndexData = CollectionQueryInfo::get(collection()).getIndexKeys(getOpCtx());
+ const auto& updateIndexData = CollectionQueryInfo::get(collection()).getIndexKeys(opCtx());
_params.driver->refreshIndexKeys(&updateIndexData);
}
@@ -731,12 +730,12 @@ bool UpdateStage::checkUpdateChangesShardKeyFields(ScopedCollectionMetadata meta
uassert(ErrorCodes::IllegalOperation,
"Must run update to shard key field in a multi-statement transaction or with "
"retryWrites: true.",
- getOpCtx()->getTxnNumber() || !getOpCtx()->writesAreReplicated());
+ opCtx()->getTxnNumber() || !opCtx()->writesAreReplicated());
if (!metadata->keyBelongsToMe(newShardKey)) {
if (MONGO_unlikely(hangBeforeThrowWouldChangeOwningShard.shouldFail())) {
LOGV2(20605, "Hit hangBeforeThrowWouldChangeOwningShard failpoint");
- hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(getOpCtx());
+ hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(opCtx());
}
uasserted(WouldChangeOwningShardInfo(oldObj.value(), newObj, false /* upsert */),
diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h
index cf167d603c0..a24cb2f50f9 100644
--- a/src/mongo/db/exec/update_stage.h
+++ b/src/mongo/db/exec/update_stage.h
@@ -81,7 +81,7 @@ class UpdateStage : public RequiresMutableCollectionStage {
UpdateStage& operator=(const UpdateStage&) = delete;
public:
- UpdateStage(OperationContext* opCtx,
+ UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
@@ -126,7 +126,7 @@ public:
const DuplicateKeyErrorInfo& errorInfo);
protected:
- UpdateStage(OperationContext* opCtx,
+ UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection);
diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp
index 387613df82f..c3613e5242b 100644
--- a/src/mongo/db/exec/upsert_stage.cpp
+++ b/src/mongo/db/exec/upsert_stage.cpp
@@ -73,12 +73,12 @@ void getShardKeyAndImmutablePaths(OperationContext* opCtx,
}
} // namespace
-UpsertStage::UpsertStage(OperationContext* opCtx,
+UpsertStage::UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : UpdateStage(opCtx, params, ws, collection) {
+ : UpdateStage(expCtx, params, ws, collection) {
// We should never create this stage for a non-upsert request.
invariant(_params.request->isUpsert());
_children.emplace_back(child);
@@ -114,7 +114,7 @@ PlanStage::StageState UpsertStage::doWork(WorkingSetID* out) {
// Determine whether this is a user-initiated or internal request.
const bool isInternalRequest =
- !getOpCtx()->writesAreReplicated() || _params.request->isFromMigration();
+ !opCtx()->writesAreReplicated() || _params.request->isFromMigration();
// Generate the new document to be inserted.
_specificStats.objInserted = _produceNewDocumentForInsert(isInternalRequest);
@@ -132,7 +132,7 @@ PlanStage::StageState UpsertStage::doWork(WorkingSetID* out) {
BSONObj newObj = _specificStats.objInserted;
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
member->transitionToOwnedObj();
return PlanStage::ADVANCED;
}
@@ -147,7 +147,7 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
// 'q' field belong to this shard, but those in the 'u' field do not. In this case we need to
// throw so that MongoS can target the insert to the correct shard.
if (_shouldCheckForShardKeyUpdate) {
- auto* const css = CollectionShardingState::get(getOpCtx(), collection()->ns());
+ auto* const css = CollectionShardingState::get(opCtx(), collection()->ns());
const auto& metadata = css->getCurrentMetadata();
if (metadata->isSharded()) {
@@ -162,7 +162,7 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
"query, since its shard key belongs on a different shard. Cross-shard "
"upserts are only allowed when running in a transaction or with "
"retryWrites: true.",
- getOpCtx()->getTxnNumber());
+ opCtx()->getTxnNumber());
uasserted(WouldChangeOwningShardInfo(
_params.request->getQuery(), newDocument, true /* upsert */),
"The document we are inserting belongs on a different shard");
@@ -172,13 +172,13 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
if (MONGO_unlikely(hangBeforeUpsertPerformsInsert.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
- &hangBeforeUpsertPerformsInsert, getOpCtx(), "hangBeforeUpsertPerformsInsert");
+ &hangBeforeUpsertPerformsInsert, opCtx(), "hangBeforeUpsertPerformsInsert");
}
- writeConflictRetry(getOpCtx(), "upsert", collection()->ns().ns(), [&] {
- WriteUnitOfWork wunit(getOpCtx());
+ writeConflictRetry(opCtx(), "upsert", collection()->ns().ns(), [&] {
+ WriteUnitOfWork wunit(opCtx());
uassertStatusOK(
- collection()->insertDocument(getOpCtx(),
+ collection()->insertDocument(opCtx(),
InsertStatement(_params.request->getStmtId(), newDocument),
_params.opDebug,
_params.request->isFromMigration()));
@@ -192,13 +192,13 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
BSONObj UpsertStage::_produceNewDocumentForInsert(bool isInternalRequest) {
// Obtain the sharding metadata. This will be needed to compute the shardKey paths. The metadata
// must remain in scope since it owns the pointers used by 'shardKeyPaths' and 'immutablePaths'.
- auto* css = CollectionShardingState::get(getOpCtx(), _params.request->getNamespaceString());
+ auto* css = CollectionShardingState::get(opCtx(), _params.request->getNamespaceString());
auto metadata = css->getCurrentMetadata();
// Compute the set of shard key paths and the set of immutable paths. Either may be empty.
FieldRefSet shardKeyPaths, immutablePaths;
getShardKeyAndImmutablePaths(
- getOpCtx(), metadata, isInternalRequest, &shardKeyPaths, &immutablePaths);
+ opCtx(), metadata, isInternalRequest, &shardKeyPaths, &immutablePaths);
// Reset the document into which we will be writing.
_doc.reset();
diff --git a/src/mongo/db/exec/upsert_stage.h b/src/mongo/db/exec/upsert_stage.h
index 092439bcf3d..8b149c26c7d 100644
--- a/src/mongo/db/exec/upsert_stage.h
+++ b/src/mongo/db/exec/upsert_stage.h
@@ -51,7 +51,7 @@ class UpsertStage final : public UpdateStage {
UpsertStage& operator=(const UpsertStage&) = delete;
public:
- UpsertStage(OperationContext* opCtx,
+ UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index 5f4834bbdce..17c66fe7d59 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -46,7 +46,7 @@ namespace {
std::unique_ptr<SortKeyGenerator> makeSortKeyGen(const BSONObj& sortSpec,
const CollatorInterface* collator) {
boost::intrusive_ptr<ExpressionContext> pExpCtx(new ExpressionContextForTest());
- pExpCtx->setCollator(collator);
+ pExpCtx->setCollator(CollatorInterface::cloneCollator(collator));
SortPattern sortPattern{sortSpec, pExpCtx};
return std::make_unique<SortKeyGenerator>(std::move(sortPattern), collator);
}
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index 68075fbb396..49a72946651 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -52,9 +52,9 @@ class ParsedMatchExpression {
public:
ParsedMatchExpression(const std::string& str, const CollatorInterface* collator = nullptr)
: _obj(fromjson(str)) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
- StatusWithMatchExpression result = MatchExpressionParser::parse(_obj, std::move(expCtx));
+ _expCtx = make_intrusive<ExpressionContextForTest>();
+ _expCtx->setCollator(CollatorInterface::cloneCollator(collator));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(_obj, _expCtx);
ASSERT_OK(result.getStatus());
_expr = std::move(result.getValue());
}
@@ -66,6 +66,7 @@ public:
private:
const BSONObj _obj;
std::unique_ptr<MatchExpression> _expr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
diff --git a/src/mongo/db/matcher/expression_expr.cpp b/src/mongo/db/matcher/expression_expr.cpp
index 138ce2d376a..01f3affa84c 100644
--- a/src/mongo/db/matcher/expression_expr.cpp
+++ b/src/mongo/db/matcher/expression_expr.cpp
@@ -92,8 +92,12 @@ bool ExprMatchExpression::equivalent(const MatchExpression* other) const {
}
void ExprMatchExpression::_doSetCollator(const CollatorInterface* collator) {
- _expCtx->setCollator(collator);
-
+ // This function is used to give match expression nodes which don't keep a pointer to the
+ // ExpressionContext access to the ExpressionContext's collator. Since the operation only ever
+ // has a single CollatorInterface, and since that collator is kept on the ExpressionContext,
+ // the collator pointer that we're propagating throughout the MatchExpression tree must match
+ // the one inside the ExpressionContext.
+ invariant(collator == _expCtx->getCollator());
if (_rewriteResult && _rewriteResult->matchExpression()) {
_rewriteResult->matchExpression()->setCollator(collator);
}
diff --git a/src/mongo/db/matcher/expression_expr_test.cpp b/src/mongo/db/matcher/expression_expr_test.cpp
index 87b9025510c..e9e2e6eab4b 100644
--- a/src/mongo/db/matcher/expression_expr_test.cpp
+++ b/src/mongo/db/matcher/expression_expr_test.cpp
@@ -59,8 +59,8 @@ public:
_matchExpression = MatchExpression::optimize(std::move(_matchExpression));
}
- void setCollator(CollatorInterface* collator) {
- _expCtx->setCollator(collator);
+ void setCollator(std::unique_ptr<CollatorInterface> collator) {
+ _expCtx->setCollator(std::move(collator));
if (_matchExpression) {
_matchExpression->setCollator(_expCtx->getCollator());
}
@@ -548,7 +548,7 @@ TEST_F(ExprMatchTest,
TEST_F(ExprMatchTest, InitialCollationUsedForComparisons) {
auto collator =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- setCollator(collator.get());
+ setCollator(std::move(collator));
createMatcher(fromjson("{$expr: {$eq: ['$x', 'abc']}}"));
ASSERT_TRUE(matches(BSON("x"
@@ -563,7 +563,7 @@ TEST_F(ExprMatchTest, SetCollatorChangesCollationUsedForComparisons) {
auto collator =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- setCollator(collator.get());
+ setCollator(std::move(collator));
ASSERT_TRUE(matches(BSON("x"
<< "AbC")));
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 8ead6ff5d2b..c519ee2b374 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -766,8 +766,9 @@ TEST(MatchExpressionParserArrayTest, AllStringNullCollation) {
TEST(MatchExpressionParserArrayTest, AllStringCollation) {
BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY("string")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_TRUE(result.isOK());
ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
@@ -775,6 +776,6 @@ TEST(MatchExpressionParserArrayTest, AllStringCollation) {
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::EQ, child->matchType());
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
- ASSERT_TRUE(eqMatch->getCollator() == &collator);
+ ASSERT_TRUE(eqMatch->getCollator() == expCtx->getCollator());
}
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index b14ba97988d..3c1ac33dc3e 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -61,14 +61,15 @@ TEST(MatchExpressionParserLeafTest, NullCollation) {
TEST(MatchExpressionParserLeafTest, Collation) {
BSONObj query = BSON("x"
<< "string");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleEQ2) {
@@ -104,14 +105,15 @@ TEST(MatchExpressionParserLeafTest, EQNullCollation) {
TEST(MatchExpressionParserLeafTest, EQCollation) {
BSONObj query = BSON("x" << BSON("$eq"
<< "string"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleGT1) {
@@ -139,14 +141,15 @@ TEST(MatchExpressionParserLeafTest, GTNullCollation) {
TEST(MatchExpressionParserLeafTest, GTCollation) {
BSONObj query = BSON("x" << BSON("$gt"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::GT, result.getValue()->matchType());
GTMatchExpression* match = static_cast<GTMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleLT1) {
@@ -175,14 +178,15 @@ TEST(MatchExpressionParserLeafTest, LTNullCollation) {
TEST(MatchExpressionParserLeafTest, LTCollation) {
BSONObj query = BSON("x" << BSON("$lt"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::LT, result.getValue()->matchType());
LTMatchExpression* match = static_cast<LTMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleGTE1) {
@@ -211,14 +215,15 @@ TEST(MatchExpressionParserLeafTest, GTENullCollation) {
TEST(MatchExpressionParserLeafTest, GTECollation) {
BSONObj query = BSON("x" << BSON("$gte"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::GTE, result.getValue()->matchType());
GTEMatchExpression* match = static_cast<GTEMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleLTE1) {
@@ -247,14 +252,15 @@ TEST(MatchExpressionParserLeafTest, LTENullCollation) {
TEST(MatchExpressionParserLeafTest, LTECollation) {
BSONObj query = BSON("x" << BSON("$lte"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::LTE, result.getValue()->matchType());
LTEMatchExpression* match = static_cast<LTEMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleNE1) {
@@ -285,16 +291,17 @@ TEST(MatchExpressionParserLeafTest, NENullCollation) {
TEST(MatchExpressionParserLeafTest, NECollation) {
BSONObj query = BSON("x" << BSON("$ne"
<< "string"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::NOT, result.getValue()->matchType());
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::EQ, child->matchType());
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
- ASSERT_TRUE(eqMatch->getCollator() == &collator);
+ ASSERT_TRUE(eqMatch->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleModBad1) {
@@ -357,22 +364,24 @@ TEST(MatchExpressionParserLeafTest, IdCollation) {
TEST(MatchExpressionParserLeafTest, IdNullCollation) {
BSONObj query = BSON("$id"
<< "string");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, RefCollation) {
BSONObj query = BSON("$ref"
<< "coll");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
@@ -383,9 +392,10 @@ TEST(MatchExpressionParserLeafTest, RefCollation) {
TEST(MatchExpressionParserLeafTest, DbCollation) {
BSONObj query = BSON("$db"
<< "db");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
@@ -416,14 +426,15 @@ TEST(MatchExpressionParserLeafTest, INNullCollation) {
TEST(MatchExpressionParserLeafTest, INCollation) {
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY("string")));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::MATCH_IN, result.getValue()->matchType());
InMatchExpression* match = static_cast<InMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
@@ -760,16 +771,17 @@ TEST(MatchExpressionParserLeafTest, NINNullCollation) {
TEST(MatchExpressionParserLeafTest, NINCollation) {
BSONObj query = BSON("x" << BSON("$nin" << BSON_ARRAY("string")));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::NOT, result.getValue()->matchType());
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::MATCH_IN, child->matchType());
InMatchExpression* inMatch = static_cast<InMatchExpression*>(child);
- ASSERT_TRUE(inMatch->getCollator() == &collator);
+ ASSERT_TRUE(inMatch->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, Regex1) {
diff --git a/src/mongo/db/matcher/expression_with_placeholder_test.cpp b/src/mongo/db/matcher/expression_with_placeholder_test.cpp
index a56264ad1df..fcf6d86e5d7 100644
--- a/src/mongo/db/matcher/expression_with_placeholder_test.cpp
+++ b/src/mongo/db/matcher/expression_with_placeholder_test.cpp
@@ -88,9 +88,10 @@ TEST(ExpressionWithPlaceholderTest, ParseElemMatch) {
}
TEST(ExpressionWithPlaceholderTest, ParseCollation) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
auto rawFilter = fromjson("{i: 'abc'}");
auto parsedFilter = assertGet(MatchExpressionParser::parse(rawFilter, expCtx));
auto filter = assertGet(ExpressionWithPlaceholder::make(std::move(parsedFilter)));
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
index 6c8dee32e26..f161bbd5eb2 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
@@ -176,9 +176,10 @@ TEST(InternalSchemaObjectMatchExpression, EquivalentReturnsCorrectResults) {
}
TEST(InternalSchemaObjectMatchExpression, SubExpressionRespectsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
auto query = fromjson(
"{a: {$_internalSchemaObjectMatch: {"
" b: {$eq: 'FOO'}"
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 85b06c4e7db..21a7a04ed19 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -56,11 +56,8 @@ long long deleteObjects(OperationContext* opCtx,
ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection,
- &parsedDelete,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */));
uassertStatusOK(exec->executePlan());
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index cc7039a10b9..be01c8d2863 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/ops/delete_request.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/assert_util.h"
@@ -59,6 +60,21 @@ Status ParsedDelete::parseRequest() {
// DeleteStage would not return the deleted document.
invariant(_request->getProj().isEmpty() || _request->shouldReturnDeleted());
+ std::unique_ptr<CollatorInterface> collator(nullptr);
+ if (!_request->getCollation().isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(_opCtx->getServiceContext())
+ ->makeFromBSON(_request->getCollation());
+
+ if (!statusWithCollator.isOK()) {
+ return statusWithCollator.getStatus();
+ }
+ collator = uassertStatusOK(std::move(statusWithCollator));
+ }
+ _expCtx = make_intrusive<ExpressionContext>(_opCtx,
+ std::move(collator),
+ _request->getNamespaceString(),
+ _request->getRuntimeConstants());
+
if (CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
return Status::OK();
}
@@ -94,11 +110,10 @@ Status ParsedDelete::parseQueryToCQ() {
qr->setRuntimeConstants(*runtimeConstants);
}
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(_opCtx,
std::move(qr),
- std::move(expCtx),
+ _expCtx,
extensionsCallback,
MatchExpressionParser::kAllowAllSpecialFeatures);
diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h
index 73f4bef19e4..09033065604 100644
--- a/src/mongo/db/ops/parsed_delete.h
+++ b/src/mongo/db/ops/parsed_delete.h
@@ -100,6 +100,21 @@ public:
*/
std::unique_ptr<CanonicalQuery> releaseParsedQuery();
+ /**
+ * This may return nullptr, specifically in cases where the query is IDHACK eligible.
+ */
+ const CanonicalQuery* parsedQuery() const {
+ return _canonicalQuery.get();
+ }
+
+ /**
+ * Always guaranteed to return a valid expression context.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtx() {
+ invariant(_expCtx.get());
+ return _expCtx;
+ }
+
private:
// Transactional context. Not owned by us.
OperationContext* _opCtx;
@@ -109,6 +124,8 @@ private:
// Parsed query object, or NULL if the query proves to be an id hack query.
std::unique_ptr<CanonicalQuery> _canonicalQuery;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 6ea2c5ddfa6..b495d48df9c 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -45,8 +45,9 @@ ParsedUpdate::ParsedUpdate(OperationContext* opCtx,
const ExtensionsCallback& extensionsCallback)
: _opCtx(opCtx),
_request(request),
- _driver(new ExpressionContext(
+ _expCtx(make_intrusive<ExpressionContext>(
opCtx, nullptr, _request->getNamespaceString(), _request->getRuntimeConstants())),
+ _driver(_expCtx),
_canonicalQuery(),
_extensionsCallback(extensionsCallback) {}
@@ -81,11 +82,11 @@ Status ParsedUpdate::parseRequest() {
if (!collator.isOK()) {
return collator.getStatus();
}
- _collator = std::move(collator.getValue());
+ _expCtx->setCollator(std::move(collator.getValue()));
}
- auto statusWithArrayFilters = parseArrayFilters(
- _request->getArrayFilters(), _opCtx, _collator.get(), _request->getNamespaceString());
+ auto statusWithArrayFilters =
+ parseArrayFilters(_expCtx, _request->getArrayFilters(), _request->getNamespaceString());
if (!statusWithArrayFilters.isOK()) {
return statusWithArrayFilters.getStatus();
}
@@ -147,9 +148,8 @@ Status ParsedUpdate::parseQueryToCQ() {
qr->setRuntimeConstants(*runtimeConstants);
}
- boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ = CanonicalQuery::canonicalize(
- _opCtx, std::move(qr), std::move(expCtx), _extensionsCallback, allowedMatcherFeatures);
+ _opCtx, std::move(qr), _expCtx, _extensionsCallback, allowedMatcherFeatures);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
}
@@ -165,7 +165,7 @@ Status ParsedUpdate::parseQueryToCQ() {
}
void ParsedUpdate::parseUpdate() {
- _driver.setCollator(_collator.get());
+ _driver.setCollator(_expCtx->getCollator());
_driver.setLogOp(true);
_driver.setFromOplogApplication(_request->isFromOplogApplication());
@@ -176,13 +176,11 @@ void ParsedUpdate::parseUpdate() {
}
StatusWith<std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>>>
-ParsedUpdate::parseArrayFilters(const std::vector<BSONObj>& rawArrayFiltersIn,
- OperationContext* opCtx,
- CollatorInterface* collator,
+ParsedUpdate::parseArrayFilters(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ const std::vector<BSONObj>& rawArrayFiltersIn,
const NamespaceString& nss) {
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFiltersOut;
for (auto rawArrayFilter : rawArrayFiltersIn) {
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
auto parsedArrayFilter =
MatchExpressionParser::parse(rawArrayFilter,
std::move(expCtx),
@@ -240,12 +238,18 @@ UpdateDriver* ParsedUpdate::getDriver() {
}
void ParsedUpdate::setCollator(std::unique_ptr<CollatorInterface> collator) {
- _collator = std::move(collator);
+ auto* rawCollator = collator.get();
- _driver.setCollator(_collator.get());
+ if (_canonicalQuery) {
+ _canonicalQuery->setCollator(std::move(collator));
+ } else {
+ _expCtx->setCollator(std::move(collator));
+ }
+
+ _driver.setCollator(rawCollator);
for (auto&& arrayFilter : _arrayFilters) {
- arrayFilter.second->getFilter()->setCollator(_collator.get());
+ arrayFilter.second->getFilter()->setCollator(rawCollator);
}
}
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index 06176336b17..e9be9312389 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -64,9 +64,8 @@ public:
* Parses the array filters portion of the update request.
*/
static StatusWith<std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>>>
- parseArrayFilters(const std::vector<BSONObj>& rawArrayFiltersIn,
- OperationContext* opCtx,
- CollatorInterface* collator,
+ parseArrayFilters(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ const std::vector<BSONObj>& rawArrayFiltersIn,
const NamespaceString& nss);
/**
@@ -128,13 +127,6 @@ public:
std::unique_ptr<CanonicalQuery> releaseParsedQuery();
/**
- * Get the collator of the parsed update.
- */
- const CollatorInterface* getCollator() const {
- return _collator.get();
- }
-
- /**
* Sets this ParsedUpdate's collator.
*
* This setter can be used to override the collator that was created from the update request
@@ -142,6 +134,13 @@ public:
*/
void setCollator(std::unique_ptr<CollatorInterface> collator);
+ /**
+ * Never returns nullptr.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtx() const {
+ return _expCtx;
+ }
+
private:
/**
* Parses the query portion of the update request.
@@ -159,12 +158,11 @@ private:
// Unowned pointer to the request object to process.
const UpdateRequest* const _request;
- // The collator for the parsed update. Owned here.
- std::unique_ptr<CollatorInterface> _collator;
-
// The array filters for the parsed update. Owned here.
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> _arrayFilters;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
+
// Driver for processing updates on matched documents.
UpdateDriver _driver;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 00b57ba533c..81683541275 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -97,8 +97,8 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
uassertStatusOK(parsedUpdate.parseRequest());
OpDebug* const nullOpDebug = nullptr;
- auto exec = uassertStatusOK(getExecutorUpdate(
- opCtx, nullOpDebug, collection, &parsedUpdate, boost::none /* verbosity */));
+ auto exec = uassertStatusOK(
+ getExecutorUpdate(nullOpDebug, collection, &parsedUpdate, boost::none /* verbosity */));
uassertStatusOK(exec->executePlan());
@@ -111,8 +111,8 @@ BSONObj applyUpdateOperators(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& from,
const BSONObj& operators) {
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
+ auto expCtx =
+ make_intrusive<ExpressionContext>(opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
UpdateDriver driver(std::move(expCtx));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
driver.parse(operators, arrayFilters);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 8560dff85af..149525ce366 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -669,11 +669,8 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
assertCanWrite_inlock(opCtx, ns);
- auto exec = uassertStatusOK(getExecutorUpdate(opCtx,
- &curOp.debug(),
- collection->getCollection(),
- &parsedUpdate,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorUpdate(
+ &curOp.debug(), collection->getCollection(), &parsedUpdate, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -912,11 +909,8 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangWithLockDuringBatchRemove, opCtx, "hangWithLockDuringBatchRemove");
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &curOp.debug(),
- collection.getCollection(),
- &parsedDelete,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &curOp.debug(), collection.getCollection(), &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/pipeline/accumulator_test.cpp b/src/mongo/db/pipeline/accumulator_test.cpp
index 724e6a6838a..888887a2898 100644
--- a/src/mongo/db/pipeline/accumulator_test.cpp
+++ b/src/mongo/db/pipeline/accumulator_test.cpp
@@ -210,8 +210,9 @@ TEST(Accumulators, Min) {
TEST(Accumulators, MinRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorMin>(expCtx,
{{{Value("abc"_sd), Value("cba"_sd)}, Value("cba"_sd)}});
}
@@ -236,8 +237,9 @@ TEST(Accumulators, Max) {
TEST(Accumulators, MaxRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorMax>(expCtx,
{{{Value("abc"_sd), Value("cba"_sd)}, Value("abc"_sd)}});
}
@@ -333,8 +335,9 @@ TEST(Accumulators, Sum) {
TEST(Accumulators, AddToSetRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorAddToSet>(expCtx,
{{{Value("a"_sd), Value("b"_sd), Value("c"_sd)},
Value(std::vector<Value>{Value("a"_sd)})}});
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 6710a1c78ce..3e04bb594bf 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -255,7 +255,7 @@ public:
GetNextResult getNext() {
pExpCtx->checkForInterrupt();
- if (MONGO_likely(!pExpCtx->explain)) {
+ if (MONGO_likely(!pExpCtx->shouldCollectExecStats())) {
return doGetNext();
}
diff --git a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
index eace0a24f3d..1ef3ac840f5 100644
--- a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
+++ b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
@@ -261,7 +261,7 @@ TEST_F(CheckResumeTokenTest, ShouldFailIfTokenHasWrongNamespace) {
TEST_F(CheckResumeTokenTest, ShouldSucceedWithBinaryCollation) {
CollatorInterfaceMock collatorCompareLower(CollatorInterfaceMock::MockType::kToLowerString);
- getExpCtx()->setCollator(&collatorCompareLower);
+ getExpCtx()->setCollator(collatorCompareLower.clone());
Timestamp resumeTimestamp(100, 1);
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index f135b105c35..4e203f16d38 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -103,10 +103,9 @@ ExpressionContext::ExpressionContext(
? TimeZoneDatabase::get(opCtx->getServiceContext())
: nullptr),
variablesParseState(variables.useIdGenerator()),
- _ownedCollator(std::move(collator)),
- _unownedCollator(_ownedCollator.get()),
- _documentComparator(_unownedCollator),
- _valueComparator(_unownedCollator),
+ _collator(std::move(collator)),
+ _documentComparator(_collator.get()),
+ _valueComparator(_collator.get()),
_resolvedNamespaces(std::move(resolvedNamespaces)) {
if (runtimeConstants) {
@@ -127,7 +126,7 @@ ExpressionContext::ExpressionContext(
}
ExpressionContext::ExpressionContext(OperationContext* opCtx,
- const CollatorInterface* collator,
+ std::unique_ptr<CollatorInterface> collator,
const NamespaceString& nss,
const boost::optional<RuntimeConstants>& runtimeConstants)
: ns(nss),
@@ -137,9 +136,9 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
? TimeZoneDatabase::get(opCtx->getServiceContext())
: nullptr),
variablesParseState(variables.useIdGenerator()),
- _unownedCollator(collator),
- _documentComparator(_unownedCollator),
- _valueComparator(_unownedCollator) {
+ _collator(std::move(collator)),
+ _documentComparator(_collator.get()),
+ _valueComparator(_collator.get()) {
if (runtimeConstants) {
variables.setRuntimeConstants(*runtimeConstants);
}
@@ -159,24 +158,12 @@ void ExpressionContext::checkForInterrupt() {
ExpressionContext::CollatorStash::CollatorStash(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<CollatorInterface> newCollator)
- : _expCtx(expCtx),
- _originalCollatorOwned(std::move(_expCtx->_ownedCollator)),
- _originalCollatorUnowned(_expCtx->_unownedCollator) {
+ : _expCtx(expCtx), _originalCollator(std::move(_expCtx->_collator)) {
_expCtx->setCollator(std::move(newCollator));
}
ExpressionContext::CollatorStash::~CollatorStash() {
- if (_originalCollatorOwned) {
- _expCtx->setCollator(std::move(_originalCollatorOwned));
- } else {
- _expCtx->setCollator(_originalCollatorUnowned);
- if (!_originalCollatorUnowned && _expCtx->_ownedCollator) {
- // If the original collation was 'nullptr', we cannot distinguish whether it was owned
- // or not. We always set '_ownedCollator' with the stash, so should reset it to null
- // here.
- _expCtx->_ownedCollator = nullptr;
- }
- }
+ _expCtx->setCollator(std::move(_originalCollator));
}
std::unique_ptr<ExpressionContext::CollatorStash> ExpressionContext::temporarilyChangeCollator(
@@ -185,14 +172,6 @@ std::unique_ptr<ExpressionContext::CollatorStash> ExpressionContext::temporarily
return std::unique_ptr<CollatorStash>(new CollatorStash(this, std::move(newCollator)));
}
-void ExpressionContext::setCollator(const CollatorInterface* collator) {
- _unownedCollator = collator;
-
- // Document/Value comparisons must be aware of the collation.
- _documentComparator = DocumentComparator(_unownedCollator);
- _valueComparator = ValueComparator(_unownedCollator);
-}
-
intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
NamespaceString ns,
boost::optional<UUID> uuid,
@@ -200,7 +179,7 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
auto collator = updatedCollator
? std::move(*updatedCollator)
- : (_ownedCollator ? _ownedCollator->clone() : std::unique_ptr<CollatorInterface>{});
+ : (_collator ? _collator->clone() : std::unique_ptr<CollatorInterface>{});
auto expCtx = make_intrusive<ExpressionContext>(opCtx,
explain,
@@ -223,16 +202,6 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
expCtx->useNewUpsert = useNewUpsert;
expCtx->jsHeapLimitMB = jsHeapLimitMB;
- // ExpressionContext is used both universally in Agg and in Find within a $expr. In the case
- // that this context is for use in $expr, the collator will be unowned and we will pass nullptr
- // in the constructor call above. If this is the case we must manually update the unowned
- // collator argument in the new ExpressionContext to match the old one. SERVER-31294 tracks an
- // effort to divorce the ExpressionContext from general Agg resources by creating an
- // AggregationContext. If that effort comes to fruition, this special-case collator handling
- // will be made unnecessary.
- if (!updatedCollator && !collator && _unownedCollator)
- expCtx->setCollator(_unownedCollator);
-
expCtx->variables = variables;
expCtx->variablesParseState = variablesParseState.copyWith(expCtx->variables.useIdGenerator());
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 26a1da21a70..0fba82ac610 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -95,8 +95,7 @@ public:
boost::intrusive_ptr<ExpressionContext> _expCtx;
- std::unique_ptr<CollatorInterface> _originalCollatorOwned;
- const CollatorInterface* _originalCollatorUnowned{nullptr};
+ std::unique_ptr<CollatorInterface> _originalCollator;
};
/**
@@ -136,7 +135,7 @@ public:
* If 'collator' is null, the simple collator will be used.
*/
ExpressionContext(OperationContext* opCtx,
- const CollatorInterface* collator,
+ std::unique_ptr<CollatorInterface> collator,
const NamespaceString& ns,
const boost::optional<RuntimeConstants>& runtimeConstants = boost::none);
@@ -168,7 +167,11 @@ public:
}
const CollatorInterface* getCollator() const {
- return _unownedCollator;
+ return _collator.get();
+ }
+
+ bool shouldCollectExecStats() const {
+ return static_cast<bool>(explain);
}
/**
@@ -181,10 +184,22 @@ public:
* the ExpressionContext.
*/
BSONObj getCollatorBSON() const {
- return _unownedCollator ? _unownedCollator->getSpec().toBSON() : CollationSpec::kSimpleSpec;
+ return _collator ? _collator->getSpec().toBSON() : CollationSpec::kSimpleSpec;
}
- void setCollator(const CollatorInterface* collator);
+ /**
+ * Sets '_collator' and resets 'documentComparator' and 'valueComparator'.
+ *
+ * Use with caution - '_collator' is used in the context of a Pipeline, and it is illegal
+ * to change the collation once a Pipeline has been parsed with this ExpressionContext.
+ */
+ void setCollator(std::unique_ptr<CollatorInterface> collator) {
+ _collator = std::move(collator);
+
+ // Document/Value comparisons must be aware of the collation.
+ _documentComparator = DocumentComparator(_collator.get());
+ _valueComparator = ValueComparator(_collator.get());
+ }
const DocumentComparator& getDocumentComparator() const {
return _documentComparator;
@@ -343,27 +358,10 @@ public:
protected:
static const int kInterruptCheckPeriod = 128;
- /**
- * Sets '_ownedCollator' and resets '_unownedCollator', 'documentComparator' and
- * 'valueComparator'.
- *
- * Use with caution - '_ownedCollator' is used in the context of a Pipeline, and it is illegal
- * to change the collation once a Pipeline has been parsed with this ExpressionContext.
- */
- void setCollator(std::unique_ptr<CollatorInterface> collator) {
- _ownedCollator = std::move(collator);
- setCollator(_ownedCollator.get());
- }
-
friend class CollatorStash;
- // Collator used for comparisons. This is owned in the context of a Pipeline.
- // TODO SERVER-31294: Move ownership of an aggregation's collator elsewhere.
- std::unique_ptr<CollatorInterface> _ownedCollator;
-
- // Collator used for comparisons. If '_ownedCollator' is non-null, then this must point to the
- // same collator object.
- const CollatorInterface* _unownedCollator = nullptr;
+ // Collator used for comparisons.
+ std::unique_ptr<CollatorInterface> _collator;
// Used for all comparisons of Document/Value during execution of the aggregation operation.
// Must not be changed after parsing a Pipeline with this ExpressionContext.
diff --git a/src/mongo/db/pipeline/expression_trim_test.cpp b/src/mongo/db/pipeline/expression_trim_test.cpp
index b21acb67ff4..a54ee025014 100644
--- a/src/mongo/db/pipeline/expression_trim_test.cpp
+++ b/src/mongo/db/pipeline/expression_trim_test.cpp
@@ -449,7 +449,7 @@ TEST(ExpressionTrimTest, TrimComparisonsShouldNotRespectCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
auto caseInsensitive =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- expCtx->setCollator(caseInsensitive.get());
+ expCtx->setCollator(std::move(caseInsensitive));
auto trim = Expression::parseExpression(expCtx,
BSON("$trim" << BSON("input"
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index a93d355f549..0ce4c83f8eb 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -104,7 +104,12 @@ namespace {
* percentage of the collection.
*/
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorExecutor(
- Collection* coll, OperationContext* opCtx, long long sampleSize, long long numRecords) {
+ Collection* coll,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ long long sampleSize,
+ long long numRecords) {
+ OperationContext* opCtx = expCtx->opCtx;
+
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
@@ -123,7 +128,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = std::make_unique<MultiIteratorStage>(opCtx, ws.get(), coll);
+ std::unique_ptr<PlanStage> root =
+ std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
@@ -145,15 +151,15 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
const auto minWorkAdvancedRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
- auto randomCursorPlan =
- std::make_unique<ShardFilterStage>(opCtx, collectionFilter, ws.get(), std::move(root));
+ auto randomCursorPlan = std::make_unique<ShardFilterStage>(
+ expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
- opCtx, coll, CollectionScanParams{}, ws.get(), nullptr);
+ expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
- opCtx, collectionFilter, ws.get(), std::move(collScanPlan));
+ expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
- root = std::make_unique<TrialStage>(opCtx,
+ root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
@@ -229,11 +235,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
// 2) We not want a plan that will return separate values for each array element. For
// example, if we have a document {a: [1,2]} and group by "a" a DISTINCT_SCAN on an "a"
// index would produce one result for '1' and another for '2', which would be incorrect.
- auto distinctExecutor =
- getExecutorDistinct(expCtx->opCtx,
- collection,
- plannerOpts | QueryPlannerParams::STRICT_DISTINCT_ONLY,
- &parsedDistinct);
+ auto distinctExecutor = getExecutorDistinct(
+ collection, plannerOpts | QueryPlannerParams::STRICT_DISTINCT_ONLY, &parsedDistinct);
if (!distinctExecutor.isOK()) {
return distinctExecutor.getStatus().withContext(
"Unable to use distinct scan to optimize $group stage");
@@ -319,7 +322,7 @@ PipelineD::buildInnerQueryExecutor(Collection* collection,
const long long sampleSize = sampleStage->getSampleSize();
const long long numRecords = collection->getRecordStore()->numRecords(expCtx->opCtx);
auto exec = uassertStatusOK(
- createRandomCursorExecutor(collection, expCtx->opCtx, sampleSize, numRecords));
+ createRandomCursorExecutor(collection, expCtx, sampleSize, numRecords));
if (exec) {
// For sharded collections, the root of the plan tree is a TrialStage that may have
// chosen either a random-sampling cursor trial plan or a COLLSCAN backup plan. We
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 5d977f17737..fdb8cc496ec 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -152,8 +152,8 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// Make MatchExpression.
boost::intrusive_ptr<ExpressionContext> newExpCtx;
if (!expCtx.get()) {
- newExpCtx.reset(
- new ExpressionContext(opCtx, collator.get(), qr->nss(), qr->getRuntimeConstants()));
+ newExpCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::move(collator), qr->nss(), qr->getRuntimeConstants());
} else {
newExpCtx = expCtx;
invariant(CollatorInterface::collatorsMatch(collator.get(), expCtx->getCollator()));
@@ -175,7 +175,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::move(qr),
parsingCanProduceNoopMatchNodes(extensionsCallback, allowedFeatures),
std::move(me),
- std::move(collator),
projectionPolicies);
if (!initStatus.isOK()) {
@@ -200,11 +199,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
return qrStatus;
}
- std::unique_ptr<CollatorInterface> collator;
- if (baseQuery.getCollator()) {
- collator = baseQuery.getCollator()->clone();
- }
-
// Make the CQ we'll hopefully return.
std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
Status initStatus = cq->init(opCtx,
@@ -212,7 +206,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::move(qr),
baseQuery.canHaveNoopMatchNodes(),
root->shallowClone(),
- std::move(collator),
ProjectionPolicies::findProjectionPolicies());
if (!initStatus.isOK()) {
@@ -226,11 +219,9 @@ Status CanonicalQuery::init(OperationContext* opCtx,
std::unique_ptr<QueryRequest> qr,
bool canHaveNoopMatchNodes,
std::unique_ptr<MatchExpression> root,
- std::unique_ptr<CollatorInterface> collator,
const ProjectionPolicies& projectionPolicies) {
_expCtx = expCtx;
_qr = std::move(qr);
- _collator = std::move(collator);
_canHaveNoopMatchNodes = canHaveNoopMatchNodes;
@@ -305,15 +296,13 @@ void CanonicalQuery::initSortPattern(QueryMetadataBitSet unavailableMetadata) {
}
void CanonicalQuery::setCollator(std::unique_ptr<CollatorInterface> collator) {
- _collator = std::move(collator);
+ auto collatorRaw = collator.get();
+ // We must give the ExpressionContext the same collator.
+ _expCtx->setCollator(std::move(collator));
// The collator associated with the match expression tree is now invalid, since we have reset
- // the object owned by '_collator'. We must associate the match expression tree with the new
- // value of '_collator'.
- _root->setCollator(_collator.get());
-
- // In a similar vein, we must give the ExpressionContext the same collator.
- _expCtx->setCollator(_collator.get());
+ // the collator owned by the ExpressionContext.
+ _root->setCollator(collatorRaw);
}
// static
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 2598ac3157c..e1261805e20 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -154,7 +154,7 @@ public:
}
const CollatorInterface* getCollator() const {
- return _collator.get();
+ return _expCtx->getCollator();
}
/**
@@ -227,7 +227,6 @@ private:
std::unique_ptr<QueryRequest> qr,
bool canHaveNoopMatchNodes,
std::unique_ptr<MatchExpression> root,
- std::unique_ptr<CollatorInterface> collator,
const ProjectionPolicies& projectionPolicies);
// Initializes '_sortPattern', adding any metadata dependencies implied by the sort.
@@ -250,8 +249,6 @@ private:
// Keeps track of what metadata has been explicitly requested.
QueryMetadataBitSet _metadataDeps;
- std::unique_ptr<CollatorInterface> _collator;
-
bool _canHaveNoopMatchNodes = false;
};
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index cba75f67e00..6388f5986b0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -97,6 +97,19 @@ using std::string;
using std::unique_ptr;
using std::vector;
+boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
+ OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss) {
+ invariant(opCtx);
+
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, nss);
+ if (!requestCollation.isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(expCtx->opCtx->getServiceContext())
+ ->makeFromBSON(requestCollation);
+ expCtx->setCollator(uassertStatusOK(std::move(statusWithCollator)));
+ }
+ return expCtx;
+}
+
// static
void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
std::vector<IndexEntry>* indexEntries) {
@@ -368,7 +381,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"Collection {ns} does not exist. Using EOF plan: {canonicalQuery_Short}",
"ns"_attr = ns,
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
- root = std::make_unique<EOFStage>(opCtx);
+ root = std::make_unique<EOFStage>(canonicalQuery->getExpCtx().get());
return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root));
}
@@ -377,10 +390,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
plannerParams.options = plannerOptions;
fillOutPlannerParams(opCtx, collection, canonicalQuery.get(), &plannerParams);
- // If the canonical query does not have a user-specified collation, set it from the collection
- // default.
+ // If the canonical query does not have a user-specified collation and no one has given the
+ // CanonicalQuery a collation already, set it from the collection default.
if (canonicalQuery->getQueryRequest().getCollation().isEmpty() &&
- collection->getDefaultCollator()) {
+ canonicalQuery->getCollator() == nullptr && collection->getDefaultCollator()) {
canonicalQuery->setCollator(collection->getDefaultCollator()->clone());
}
@@ -393,12 +406,13 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"Using idhack: {canonicalQuery_Short}",
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
- root = std::make_unique<IDHackStage>(opCtx, canonicalQuery.get(), ws, descriptor);
+ root = std::make_unique<IDHackStage>(
+ canonicalQuery->getExpCtx().get(), canonicalQuery.get(), ws, descriptor);
// Might have to filter out orphaned docs.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
root = std::make_unique<ShardFilterStage>(
- opCtx,
+ canonicalQuery->getExpCtx().get(),
CollectionShardingState::get(opCtx, canonicalQuery->nss())
->getOwnershipFilter(opCtx),
ws,
@@ -410,7 +424,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// Add a SortKeyGeneratorStage if the query requested sortKey metadata.
if (canonicalQuery->metadataDeps()[DocumentMetadataFields::kSortKey]) {
root = std::make_unique<SortKeyGeneratorStage>(
- canonicalQuery->getExpCtx(),
+ canonicalQuery->getExpCtx().get(),
std::move(root),
ws,
canonicalQuery->getQueryRequest().getSort());
@@ -422,7 +436,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// the exception the $meta sortKey projection, which can be used along with the
// returnKey.
root = std::make_unique<ReturnKeyStage>(
- opCtx,
+ canonicalQuery->getExpCtx().get(),
cqProjection
? QueryPlannerCommon::extractSortKeyMetaFieldsFromProjection(*cqProjection)
: std::vector<FieldPath>{},
@@ -443,7 +457,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
std::move(root));
} else {
root = std::make_unique<ProjectionStageSimple>(
- canonicalQuery->getExpCtx(),
+ canonicalQuery->getExpCtx().get(),
canonicalQuery->getQueryRequest().getProj(),
canonicalQuery->getProj(),
ws,
@@ -498,13 +512,14 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
//
// 'decisionWorks' is used to determine whether the existing cache entry should
// be evicted, and the query replanned.
- auto cachedPlanStage = std::make_unique<CachedPlanStage>(opCtx,
- collection,
- ws,
- canonicalQuery.get(),
- plannerParams,
- cs->decisionWorks,
- std::move(root));
+ auto cachedPlanStage =
+ std::make_unique<CachedPlanStage>(canonicalQuery->getExpCtx().get(),
+ collection,
+ ws,
+ canonicalQuery.get(),
+ plannerParams,
+ cs->decisionWorks,
+ std::move(root));
return PrepareExecutionResult(std::move(canonicalQuery),
std::move(querySolution),
std::move(cachedPlanStage));
@@ -520,7 +535,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
root = std::make_unique<SubplanStage>(
- opCtx, collection, ws, plannerParams, canonicalQuery.get());
+ canonicalQuery->getExpCtx().get(), collection, ws, plannerParams, canonicalQuery.get());
return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root));
}
@@ -572,8 +587,8 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
} else {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- auto multiPlanStage =
- std::make_unique<MultiPlanStage>(opCtx, collection, canonicalQuery.get());
+ auto multiPlanStage = std::make_unique<MultiPlanStage>(
+ canonicalQuery->getExpCtx().get(), collection, canonicalQuery.get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
@@ -715,11 +730,12 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* opCtx,
//
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity) {
+ auto expCtx = parsedDelete->expCtx();
+ OperationContext* opCtx = expCtx->opCtx;
const DeleteRequest* request = parsedDelete->getRequest();
const NamespaceString& nss(request->getNamespaceString());
@@ -763,7 +779,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
"nss_ns"_attr = nss.ns(),
"request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
- opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
+ opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss);
}
if (!parsedDelete->hasParsedQuery()) {
@@ -794,9 +810,13 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
"unparsedQuery"_attr = redact(unparsedQuery));
auto idHackStage = std::make_unique<IDHackStage>(
- opCtx, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<DeleteStage> root = std::make_unique<DeleteStage>(
- opCtx, std::move(deleteStageParams), ws.get(), collection, idHackStage.release());
+ expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<DeleteStage> root =
+ std::make_unique<DeleteStage>(expCtx.get(),
+ std::move(deleteStageParams),
+ ws.get(),
+ collection,
+ idHackStage.release());
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
@@ -831,7 +851,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
invariant(root);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(deleteStageParams), ws.get(), collection, root.release());
+ cq->getExpCtx().get(), std::move(deleteStageParams), ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnDeleted());
@@ -861,11 +881,13 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
//
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity) {
+ auto expCtx = parsedUpdate->expCtx();
+ OperationContext* opCtx = expCtx->opCtx;
+
const UpdateRequest* request = parsedUpdate->getRequest();
UpdateDriver* driver = parsedUpdate->getDriver();
@@ -918,7 +940,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
"nss_ns"_attr = nss.ns(),
"request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
- opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
+ opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss);
}
// Pass index information to the update driver, so that it can determine for us whether the
@@ -937,7 +959,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
const bool hasCollectionDefaultCollation = CollatorInterface::collatorsMatch(
- parsedUpdate->getCollator(), collection->getDefaultCollator());
+ expCtx->getCollator(), collection->getDefaultCollator());
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
@@ -988,10 +1010,12 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
updateStageParams.canonicalQuery = cq.get();
const bool isUpsert = updateStageParams.request->isUpsert();
- root = (isUpsert ? std::make_unique<UpsertStage>(
- opCtx, updateStageParams, ws.get(), collection, root.release())
- : std::make_unique<UpdateStage>(
- opCtx, updateStageParams, ws.get(), collection, root.release()));
+ root =
+ (isUpsert
+ ? std::make_unique<UpsertStage>(
+ cq->getExpCtx().get(), updateStageParams, ws.get(), collection, root.release())
+ : std::make_unique<UpdateStage>(
+ cq->getExpCtx().get(), updateStageParams, ws.get(), collection, root.release()));
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnAnyDocs());
@@ -1157,11 +1181,12 @@ bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
} // namespace
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
- OperationContext* opCtx,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
const CountCommand& request,
bool explain,
const NamespaceString& nss) {
+ OperationContext* opCtx = expCtx->opCtx;
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
auto qr = std::make_unique<QueryRequest>(nss);
@@ -1171,7 +1196,6 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
qr->setHint(request.getHint());
qr->setExplain(explain);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ = CanonicalQuery::canonicalize(
opCtx,
std::move(qr),
@@ -1197,7 +1221,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
// machinery always assumes that the root stage for a count operation is a CountStage, so in
// this case we put a CountStage on top of an EOFStage.
unique_ptr<PlanStage> root = std::make_unique<CountStage>(
- opCtx, collection, limit, skip, ws.get(), new EOFStage(opCtx));
+ expCtx.get(), collection, limit, skip, ws.get(), new EOFStage(expCtx.get()));
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), nullptr, yieldPolicy, nss);
}
@@ -1212,7 +1236,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
if (useRecordStoreCount) {
unique_ptr<PlanStage> root =
- std::make_unique<RecordStoreFastCountStage>(opCtx, collection, skip, limit);
+ std::make_unique<RecordStoreFastCountStage>(expCtx.get(), collection, skip, limit);
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), nullptr, yieldPolicy, nss);
}
@@ -1233,7 +1257,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
invariant(root);
// Make a CountStage to be the new root.
- root = std::make_unique<CountStage>(opCtx, collection, limit, skip, ws.get(), root.release());
+ root = std::make_unique<CountStage>(
+ expCtx.get(), collection, limit, skip, ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be NULL. Takes ownership of all args other than 'collection' and 'opCtx'
return PlanExecutor::make(std::move(cq),
@@ -1641,10 +1666,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorWithoutPr
} // namespace
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- OperationContext* opCtx,
- Collection* collection,
- size_t plannerOptions,
- ParsedDistinct* parsedDistinct) {
+ Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct) {
+ auto expCtx = parsedDistinct->getQuery()->getExpCtx();
+ OperationContext* opCtx = expCtx->opCtx;
const auto yieldPolicy = opCtx->inMultiDocumentTransaction() ? PlanExecutor::INTERRUPT_ONLY
: PlanExecutor::YIELD_AUTO;
@@ -1652,7 +1676,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
// Treat collections that do not exist as empty collections.
return PlanExecutor::make(parsedDistinct->releaseQuery(),
std::make_unique<WorkingSet>(),
- std::make_unique<EOFStage>(opCtx),
+ std::make_unique<EOFStage>(expCtx.get()),
collection,
yieldPolicy);
}
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 99e1657c6a7..4390557ef05 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -49,6 +49,17 @@ class Collection;
class CountRequest;
/**
+ * Make an ExpressionContext to be used for non-aggregate commands. The result of this can be passed
+ * into any of the getExecutor* functions.
+ *
+ * Note that the getExecutor* functions may change the collation on the returned ExpressionContext
+ * if the collection has a default collation and no collation was specifically requested
+ * ('requestCollation' is empty).
+ */
+boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
+ OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss);
+
+/**
* Filter indexes retrieved from index catalog by
* allowed indices in query settings.
* Used by getExecutor().
@@ -192,10 +203,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln,
* distinct.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- OperationContext* opCtx,
- Collection* collection,
- size_t plannerOptions,
- ParsedDistinct* parsedDistinct);
+ Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct);
/*
* Get a PlanExecutor for a query executing as part of a count command.
@@ -205,7 +213,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDist
* executing a count.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
- OperationContext* opCtx,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
const CountCommand& request,
bool explain,
@@ -231,7 +239,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCoun
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete,
@@ -258,7 +265,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate,
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index a9bc1b5021a..238f6636e0f 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -55,8 +55,11 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
const Direction direction) {
std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(ns));
+
if (nullptr == collection) {
- auto eof = std::make_unique<EOFStage>(opCtx);
+ auto eof = std::make_unique<EOFStage>(expCtx.get());
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(eof), nullptr, yieldPolicy, NamespaceString(ns));
@@ -66,7 +69,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
invariant(ns == collection->ns().ns());
- auto cs = _collectionScan(opCtx, ws.get(), collection, direction);
+ auto cs = _collectionScan(expCtx, ws.get(), collection, direction);
// Takes ownership of 'ws' and 'cs'.
auto statusWithPlanExecutor =
@@ -84,10 +87,13 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- auto root = _collectionScan(opCtx, ws.get(), collection, direction);
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ auto root = _collectionScan(expCtx, ws.get(), collection, direction);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), collection, root.release());
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -108,7 +114,10 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::indexScan(
int options) {
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(opCtx,
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ std::unique_ptr<PlanStage> root = _indexScan(expCtx,
ws.get(),
collection,
descriptor,
@@ -137,7 +146,10 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(opCtx,
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ std::unique_ptr<PlanStage> root = _indexScan(expCtx,
ws.get(),
collection,
descriptor,
@@ -148,7 +160,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
InternalPlanner::IXSCAN_FETCH);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), collection, root.release());
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -166,13 +178,16 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- auto idHackStage = std::make_unique<IDHackStage>(opCtx, key, ws.get(), descriptor);
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ auto idHackStage = std::make_unique<IDHackStage>(expCtx.get(), key, ws.get(), descriptor);
const bool isUpsert = params.request->isUpsert();
auto root = (isUpsert ? std::make_unique<UpsertStage>(
- opCtx, params, ws.get(), collection, idHackStage.release())
+ expCtx.get(), params, ws.get(), collection, idHackStage.release())
: std::make_unique<UpdateStage>(
- opCtx, params, ws.get(), collection, idHackStage.release()));
+ expCtx.get(), params, ws.get(), collection, idHackStage.release()));
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -180,14 +195,16 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith
return std::move(executor.getValue());
}
-std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- Direction direction) {
+std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ Direction direction) {
invariant(collection);
CollectionScanParams params;
- params.shouldWaitForOplogVisibility = shouldWaitForOplogVisibility(opCtx, collection, false);
+ params.shouldWaitForOplogVisibility =
+ shouldWaitForOplogVisibility(expCtx->opCtx, collection, false);
if (FORWARD == direction) {
params.direction = CollectionScanParams::FORWARD;
@@ -195,22 +212,23 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op
params.direction = CollectionScanParams::BACKWARD;
}
- return std::make_unique<CollectionScan>(opCtx, collection, params, ws, nullptr);
+ return std::make_unique<CollectionScan>(expCtx.get(), collection, params, ws, nullptr);
}
-std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- BoundInclusion boundInclusion,
- Direction direction,
- int options) {
+std::unique_ptr<PlanStage> InternalPlanner::_indexScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ BoundInclusion boundInclusion,
+ Direction direction,
+ int options) {
invariant(collection);
invariant(descriptor);
- IndexScanParams params(opCtx, descriptor);
+ IndexScanParams params(expCtx->opCtx, descriptor);
params.direction = direction;
params.bounds.isSimpleRange = true;
params.bounds.startKey = startKey;
@@ -219,10 +237,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
params.shouldDedup = descriptor->isMultikey();
std::unique_ptr<PlanStage> root =
- std::make_unique<IndexScan>(opCtx, std::move(params), ws, nullptr);
+ std::make_unique<IndexScan>(expCtx.get(), std::move(params), ws, nullptr);
if (InternalPlanner::IXSCAN_FETCH & options) {
- root = std::make_unique<FetchStage>(opCtx, ws, std::move(root), nullptr, collection);
+ root = std::make_unique<FetchStage>(expCtx.get(), ws, std::move(root), nullptr, collection);
}
return root;
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index a846a55b60b..228f6f4fd97 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -130,25 +130,27 @@ private:
*
* Used as a helper for collectionScan() and deleteWithCollectionScan().
*/
- static std::unique_ptr<PlanStage> _collectionScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- Direction direction);
+ static std::unique_ptr<PlanStage> _collectionScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ Direction direction);
/**
* Returns a plan stage that is either an index scan or an index scan with a fetch stage.
*
* Used as a helper for indexScan() and deleteWithIndexScan().
*/
- static std::unique_ptr<PlanStage> _indexScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- BoundInclusion boundInclusion,
- Direction direction = FORWARD,
- int options = IXSCAN_DEFAULT);
+ static std::unique_ptr<PlanStage> _indexScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ BoundInclusion boundInclusion,
+ Direction direction = FORWARD,
+ int options = IXSCAN_DEFAULT);
};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index 471e57bfafb..2748bfd85ac 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -41,10 +41,19 @@
namespace mongo {
namespace {
-std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
- const CollatorInterface* collator = nullptr) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
+/**
+ * Produce a MatchExpression from BSON.
+ *
+ * If the caller would like the MatchExpression to have a collation associated with it, they may
+ * pass in an ExpressionContext owning the collation. Otherwise the caller may pass nullptr and a
+ * default-constructed ExpressionContextForTest will be used.
+ */
+std::unique_ptr<MatchExpression> parseMatchExpression(
+ const BSONObj& obj, boost::intrusive_ptr<ExpressionContext> expCtx = nullptr) {
+ if (!expCtx) {
+ expCtx = make_intrusive<ExpressionContextForTest>();
+ }
+
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse query: " << obj.toString()
@@ -400,6 +409,9 @@ TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollations
entry.collator = &collator;
state.updateDiscriminators({entry});
+ boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
+ expCtx->setCollator(collator.clone());
+
auto discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
ASSERT(discriminators.find("a_1") != discriminators.end());
@@ -409,14 +421,13 @@ TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollations
// Index collator matches query collator.
ASSERT_EQ(true,
disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: 'abc'}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: 'abc'}"), expCtx).get()));
ASSERT_EQ(true,
disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), &collator).get()));
- ASSERT_EQ(
- true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$_internalExprEq: 'abc'}}}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), expCtx).get()));
+ ASSERT_EQ(true,
+ disc.isMatchCompatibleWithIndex(
+ parseMatchExpression(fromjson("{a: {$_internalExprEq: 'abc'}}}"), expCtx).get()));
// Expression is not a ComparisonMatchExpression, InternalExprEqMatchExpression or
// InMatchExpression.
@@ -547,6 +558,10 @@ TEST(PlanCacheIndexabilityTest, WildcardWithCollationDiscriminator) {
auto entryProjExecPair = makeWildcardEntry(BSON("a.$**" << 1));
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
entryProjExecPair.first.collator = &collator;
+
+ boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
+ expCtx->setCollator(collator.clone());
+
state.updateDiscriminators({entryProjExecPair.first});
const auto unindexedPathDiscriminators = state.buildWildcardDiscriminators("notIndexed");
@@ -563,7 +578,7 @@ TEST(PlanCacheIndexabilityTest, WildcardWithCollationDiscriminator) {
parseMatchExpression(fromjson("{a: \"hello world\"}"), nullptr).get()));
// Match expression which uses the same collation as the index is.
ASSERT_TRUE(disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: \"hello world\"}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: \"hello world\"}"), expCtx).get()));
}
TEST(PlanCacheIndexabilityTest, WildcardPartialIndexDiscriminator) {
diff --git a/src/mongo/db/query/projection_test.cpp b/src/mongo/db/query/projection_test.cpp
index b52d27bfb80..9050416deef 100644
--- a/src/mongo/db/query/projection_test.cpp
+++ b/src/mongo/db/query/projection_test.cpp
@@ -55,9 +55,8 @@ projection_ast::Projection createProjection(const BSONObj& query,
ProjectionPolicies policies = {}) {
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT_OK(statusWithMatcher.getStatus());
@@ -85,9 +84,8 @@ void assertInvalidProjection(const char* queryStr, const char* projStr) {
BSONObj projObj = fromjson(projStr);
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT_OK(statusWithMatcher.getStatus());
diff --git a/src/mongo/db/query/query_planner_partialidx_test.cpp b/src/mongo/db/query/query_planner_partialidx_test.cpp
index 124b75c7518..ecb574fa728 100644
--- a/src/mongo/db/query/query_planner_partialidx_test.cpp
+++ b/src/mongo/db/query/query_planner_partialidx_test.cpp
@@ -36,6 +36,8 @@
namespace mongo {
namespace {
+const static NamespaceString kNs("db.dummyNs");
+
TEST_F(QueryPlannerTest, PartialIndexEq) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 0}}"));
@@ -448,9 +450,14 @@ TEST_F(QueryPlannerTest, PartialIndexNor) {
TEST_F(QueryPlannerTest, PartialIndexStringComparisonMatchingCollators) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 'cba'}}"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- std::unique_ptr<MatchExpression> filterExpr = parseMatchExpression(filterObj, &collator);
- addIndex(fromjson("{a: 1}"), filterExpr.get(), &collator);
+
+ auto expCtxForPartialFilter = make_intrusive<ExpressionContext>(
+ opCtx.get(),
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ kNs);
+ std::unique_ptr<MatchExpression> filterExpr =
+ parseMatchExpression(filterObj, expCtxForPartialFilter);
+ addIndex(fromjson("{a: 1}"), filterExpr.get(), expCtxForPartialFilter->getCollator());
runQueryAsCommand(
fromjson("{find: 'testns', filter: {a: 'abc'}, collation: {locale: 'reverse'}}"));
@@ -468,9 +475,14 @@ TEST_F(QueryPlannerTest, PartialIndexStringComparisonMatchingCollators) {
TEST_F(QueryPlannerTest, PartialIndexNoStringComparisonNonMatchingCollators) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 0}}"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- std::unique_ptr<MatchExpression> filterExpr = parseMatchExpression(filterObj, &collator);
- addIndex(fromjson("{a: 1}"), filterExpr.get(), &collator);
+
+ auto expCtxForPartialFilter = make_intrusive<ExpressionContext>(
+ opCtx.get(),
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ kNs);
+ std::unique_ptr<MatchExpression> filterExpr =
+ parseMatchExpression(filterObj, expCtxForPartialFilter);
+ addIndex(fromjson("{a: 1}"), filterExpr.get(), expCtxForPartialFilter->getCollator());
runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, collation: {locale: 'reverse'}}"));
assertNumSolutions(1U);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 0ea76a16783..805fccdbc1f 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -53,6 +53,8 @@ const NamespaceString QueryPlannerTest::nss("test.collection");
void QueryPlannerTest::setUp() {
opCtx = serviceContext.makeOperationContext();
+ expCtx = make_intrusive<ExpressionContext>(
+ opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), nss);
internalQueryPlannerEnableHashIntersection.store(true);
params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
addIndex(BSON("_id" << 1));
@@ -62,6 +64,7 @@ void QueryPlannerTest::clearState() {
plannerStatus = Status::OK();
solns.clear();
cq.reset();
+ expCtx.reset();
relaxBoundsCheck = false;
}
@@ -327,7 +330,6 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
qr->setHint(hint);
qr->setMin(minObj);
qr->setMax(maxObj);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -408,7 +410,6 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
qr->setHint(hint);
qr->setMin(minObj);
qr->setMax(maxObj);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -432,7 +433,6 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
std::unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -456,7 +456,6 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
std::unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -550,10 +549,13 @@ void QueryPlannerTest::assertHasOnlyCollscan() const {
}
std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(
- const BSONObj& obj, const CollatorInterface* collator) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
+ const BSONObj& obj, const boost::intrusive_ptr<ExpressionContext>& optionalExpCtx) {
+ auto expCtx = optionalExpCtx;
+ if (!expCtx.get()) {
+ expCtx = make_intrusive<ExpressionContextForTest>();
+ }
+
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj, expCtx);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse query: " << obj.toString()
<< ". Reason: " << status.getStatus().toString());
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 4743f505fa2..d133a83002c 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -222,9 +222,16 @@ protected:
/**
* Helper function to parse a MatchExpression.
+ *
+ * If the caller wants a collator to be used with the match expression, pass an expression
+ * context owning that collator as the second argument. The expression context passed must
+ * outlive the returned match expression.
+ *
+ * If no ExpressionContext is passed a default-constructed ExpressionContextForTest will be
+ * used.
*/
- static std::unique_ptr<MatchExpression> parseMatchExpression(
- const BSONObj& obj, const CollatorInterface* collator = nullptr);
+ std::unique_ptr<MatchExpression> parseMatchExpression(
+ const BSONObj& obj, const boost::intrusive_ptr<ExpressionContext>& expCtx = nullptr);
//
// Data members.
@@ -234,6 +241,8 @@ protected:
QueryTestServiceContext serviceContext;
ServiceContext::UniqueOperationContext opCtx;
+ boost::intrusive_ptr<ExpressionContext> expCtx;
+
BSONObj queryObj;
std::unique_ptr<CanonicalQuery> cq;
QueryPlannerParams params;
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index dc1ee631607..2abe9e00aa0 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -76,9 +76,8 @@ bool filterMatches(const BSONObj& testFilter,
}
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(testCollator.get());
- StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(testFilter, std::move(expCtx));
+ expCtx->setCollator(std::move(testCollator));
+ StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(testFilter, expCtx);
if (!statusWithMatcher.isOK()) {
return false;
}
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index 142ed16fd2a..fb79d1e69b6 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -717,9 +717,8 @@ TEST(QuerySolutionTest, IndexScanNodeHasFieldExcludesSimpleBoundsStringFieldWhen
auto createMatchExprAndProjection(const BSONObj& query, const BSONObj& projObj) {
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, NamespaceString("test.dummy")));
+ const boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(
+ opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), NamespaceString("test.dummy")));
StatusWithMatchExpression queryMatchExpr =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT(queryMatchExpr.isOK());
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 907f8018f27..01005defd04 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -74,6 +74,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
const QuerySolution& qsol,
const QuerySolutionNode* root,
WorkingSet* ws) {
+ auto* const expCtx = cq.getExpCtx().get();
switch (root->getType()) {
case STAGE_COLLSCAN: {
const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
@@ -89,7 +90,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.resumeAfterRecordId = csn->resumeAfterRecordId;
params.stopApplyingFilterAfterFirstMatch = csn->stopApplyingFilterAfterFirstMatch;
return std::make_unique<CollectionScan>(
- opCtx, collection, params, ws, csn->filter.get());
+ expCtx, collection, params, ws, csn->filter.get());
}
case STAGE_IXSCAN: {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
@@ -113,13 +114,13 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.direction = ixn->direction;
params.addKeyMetadata = ixn->addKeyMetadata;
params.shouldDedup = ixn->shouldDedup;
- return std::make_unique<IndexScan>(opCtx, std::move(params), ws, ixn->filter.get());
+ return std::make_unique<IndexScan>(expCtx, std::move(params), ws, ixn->filter.get());
}
case STAGE_FETCH: {
const FetchNode* fn = static_cast<const FetchNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
return std::make_unique<FetchStage>(
- opCtx, ws, std::move(childStage), fn->filter.get(), collection);
+ expCtx, ws, std::move(childStage), fn->filter.get(), collection);
}
case STAGE_SORT_DEFAULT: {
auto snDefault = static_cast<const SortNodeDefault*>(root);
@@ -155,7 +156,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
auto returnKeyNode = static_cast<const ReturnKeyNode*>(root);
auto childStage =
buildStages(opCtx, collection, cq, qsol, returnKeyNode->children[0], ws);
- return std::make_unique<ReturnKeyStage>(opCtx,
+ return std::make_unique<ReturnKeyStage>(expCtx,
std::move(returnKeyNode->sortKeyMetaFields),
ws,
cq.getExpCtx()->sortKeyFormat,
@@ -173,7 +174,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_PROJECTION_COVERED: {
auto pn = static_cast<const ProjectionNodeCovered*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
- return std::make_unique<ProjectionStageCovered>(cq.getExpCtx(),
+ return std::make_unique<ProjectionStageCovered>(cq.getExpCtx().get(),
cq.getQueryRequest().getProj(),
cq.getProj(),
ws,
@@ -183,7 +184,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_PROJECTION_SIMPLE: {
auto pn = static_cast<const ProjectionNodeSimple*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
- return std::make_unique<ProjectionStageSimple>(cq.getExpCtx(),
+ return std::make_unique<ProjectionStageSimple>(cq.getExpCtx().get(),
cq.getQueryRequest().getProj(),
cq.getProj(),
ws,
@@ -192,16 +193,16 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_LIMIT: {
const LimitNode* ln = static_cast<const LimitNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
- return std::make_unique<LimitStage>(opCtx, ln->limit, ws, std::move(childStage));
+ return std::make_unique<LimitStage>(expCtx, ln->limit, ws, std::move(childStage));
}
case STAGE_SKIP: {
const SkipNode* sn = static_cast<const SkipNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
- return std::make_unique<SkipStage>(opCtx, sn->skip, ws, std::move(childStage));
+ return std::make_unique<SkipStage>(expCtx, sn->skip, ws, std::move(childStage));
}
case STAGE_AND_HASH: {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto ret = std::make_unique<AndHashStage>(opCtx, ws);
+ auto ret = std::make_unique<AndHashStage>(expCtx, ws);
for (size_t i = 0; i < ahn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -210,7 +211,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
}
case STAGE_OR: {
const OrNode* orn = static_cast<const OrNode*>(root);
- auto ret = std::make_unique<OrStage>(opCtx, ws, orn->dedup, orn->filter.get());
+ auto ret = std::make_unique<OrStage>(expCtx, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -219,7 +220,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
}
case STAGE_AND_SORTED: {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto ret = std::make_unique<AndSortedStage>(opCtx, ws);
+ auto ret = std::make_unique<AndSortedStage>(expCtx, ws);
for (size_t i = 0; i < asn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -232,7 +233,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.dedup = msn->dedup;
params.pattern = msn->sort;
params.collator = cq.getCollator();
- auto ret = std::make_unique<MergeSortStage>(opCtx, params, ws);
+ auto ret = std::make_unique<MergeSortStage>(expCtx, params, ws);
for (size_t i = 0; i < msn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -254,7 +255,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
opCtx, node->index.identifier.catalogName);
invariant(twoDIndex);
- return std::make_unique<GeoNear2DStage>(params, opCtx, ws, twoDIndex);
+ return std::make_unique<GeoNear2DStage>(params, expCtx, ws, twoDIndex);
}
case STAGE_GEO_NEAR_2DSPHERE: {
const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
@@ -271,7 +272,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
opCtx, node->index.identifier.catalogName);
invariant(s2Index);
- return std::make_unique<GeoNear2DSphereStage>(params, opCtx, ws, s2Index);
+ return std::make_unique<GeoNear2DSphereStage>(params, expCtx, ws, s2Index);
}
case STAGE_TEXT: {
const TextNode* node = static_cast<const TextNode*>(root);
@@ -291,7 +292,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
// created by planning a query that contains "no-op" expressions.
params.query = static_cast<FTSQueryImpl&>(*node->ftsQuery);
params.wantTextScore = cq.metadataDeps()[DocumentMetadataFields::kTextScore];
- return std::make_unique<TextStage>(opCtx, params, ws, node->filter.get());
+ return std::make_unique<TextStage>(expCtx, params, ws, node->filter.get());
}
case STAGE_SHARDING_FILTER: {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
@@ -299,7 +300,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
auto css = CollectionShardingState::get(opCtx, collection->ns());
return std::make_unique<ShardFilterStage>(
- opCtx, css->getOwnershipFilter(opCtx), ws, std::move(childStage));
+ expCtx, css->getOwnershipFilter(opCtx), ws, std::move(childStage));
}
case STAGE_DISTINCT_SCAN: {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
@@ -320,7 +321,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.scanDirection = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return std::make_unique<DistinctScan>(opCtx, std::move(params), ws);
+ return std::make_unique<DistinctScan>(expCtx, std::move(params), ws);
}
case STAGE_COUNT_SCAN: {
const CountScanNode* csn = static_cast<const CountScanNode*>(root);
@@ -342,13 +343,13 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.startKeyInclusive = csn->startKeyInclusive;
params.endKey = csn->endKey;
params.endKeyInclusive = csn->endKeyInclusive;
- return std::make_unique<CountScan>(opCtx, std::move(params), ws);
+ return std::make_unique<CountScan>(expCtx, std::move(params), ws);
}
case STAGE_ENSURE_SORTED: {
const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
return std::make_unique<EnsureSortedStage>(
- opCtx, esn->pattern, ws, std::move(childStage));
+ expCtx, esn->pattern, ws, std::move(childStage));
}
case STAGE_CACHED_PLAN:
case STAGE_CHANGE_STREAM_PROXY:
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 96582c70720..20129013287 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -321,7 +321,8 @@ Status _checkPrecondition(OperationContext* opCtx,
// applyOps does not allow any extensions, such as $text, $where, $geoNear, $near,
// $nearSphere, or $expr.
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ new ExpressionContext(opCtx, CollatorInterface::cloneCollator(collator), nss));
Matcher matcher(preCondition["res"].Obj(), std::move(expCtx));
if (!matcher.matches(realres)) {
result->append("got", realres);
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index f62634da128..8344fc7d500 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -875,7 +875,7 @@ Status _updateWithQuery(OperationContext* opCtx,
}
auto planExecutorResult = mongo::getExecutorUpdate(
- opCtx, nullptr, collection, &parsedUpdate, boost::none /* verbosity */);
+ nullptr, collection, &parsedUpdate, boost::none /* verbosity */);
if (!planExecutorResult.isOK()) {
return planExecutorResult.getStatus();
}
@@ -1004,7 +1004,7 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
auto collection = collectionResult.getValue();
auto planExecutorResult = mongo::getExecutorDelete(
- opCtx, nullptr, collection, &parsedDelete, boost::none /* verbosity */);
+ nullptr, collection, &parsedDelete, boost::none /* verbosity */);
if (!planExecutorResult.isOK()) {
return planExecutorResult.getStatus();
}
diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp
index 426d84aa0ba..9c3cac056be 100644
--- a/src/mongo/db/update/addtoset_node_test.cpp
+++ b/src/mongo/db/update/addtoset_node_test.cpp
@@ -321,9 +321,10 @@ TEST_F(AddToSetNodeTest, ApplyCreateEmptyArrayIsNotNoop) {
TEST_F(AddToSetNodeTest, ApplyDeduplicationOfElementsToAddRespectsCollation) {
auto update = fromjson("{$addToSet: {a: {$each: ['abc', 'ABC', 'def', 'abc']}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
@@ -341,9 +342,10 @@ TEST_F(AddToSetNodeTest, ApplyDeduplicationOfElementsToAddRespectsCollation) {
TEST_F(AddToSetNodeTest, ApplyComparisonToExistingElementsRespectsCollation) {
auto update = fromjson("{$addToSet: {a: {$each: ['abc', 'def']}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
@@ -385,12 +387,13 @@ DEATH_TEST_REGEX(AddToSetNodeTest,
CannotSetCollatorIfCollatorIsNonNull,
"Invariant failure.*!_collator") {
auto update = fromjson("{$addToSet: {a: 1}}");
- CollatorInterfaceMock caseInsensitiveCollator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto caseInsensitiveCollator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&caseInsensitiveCollator);
+ expCtx->setCollator(std::move(caseInsensitiveCollator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
- node.setCollator(&caseInsensitiveCollator);
+ node.setCollator(expCtx->getCollator());
}
DEATH_TEST_REGEX(AddToSetNodeTest, CannotSetCollatorTwice, "Invariant failure.*!_collator") {
diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp
index 698842855f0..246b933f7f7 100644
--- a/src/mongo/db/update/compare_node_test.cpp
+++ b/src/mongo/db/update/compare_node_test.cpp
@@ -296,9 +296,10 @@ TEST_F(CompareNodeTest, ApplyExistingEmbeddedDocMaxNumber) {
TEST_F(CompareNodeTest, ApplyMinRespectsCollation) {
auto update = fromjson("{$min: {a: 'dba'}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
CompareNode node(CompareNode::CompareMode::kMin);
ASSERT_OK(node.init(update["$min"]["a"], expCtx));
@@ -359,12 +360,14 @@ DEATH_TEST_REGEX(CompareNodeTest,
CannotSetCollatorIfCollatorIsNonNull,
"Invariant failure.*!_collator") {
auto update = fromjson("{$max: {a: 1}}");
- CollatorInterfaceMock caseInsensitiveCollator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
+
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&caseInsensitiveCollator);
+ expCtx->setCollator(std::move(collator));
CompareNode node(CompareNode::CompareMode::kMax);
ASSERT_OK(node.init(update["$max"]["a"], expCtx));
- node.setCollator(&caseInsensitiveCollator);
+ node.setCollator(expCtx->getCollator());
}
DEATH_TEST_REGEX(CompareNodeTest, CannotSetCollatorTwice, "Invariant failure.*!_collator") {
diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp
index 39f41ba06f1..bca1b93a18f 100644
--- a/src/mongo/db/update/pull_node_test.cpp
+++ b/src/mongo/db/update/pull_node_test.cpp
@@ -319,9 +319,10 @@ TEST_F(PullNodeTest, ApplyWithCollation) {
// With the collation, this update will pull any string whose reverse is greater than the
// reverse of the "abc" string.
auto update = fromjson("{$pull : {a: {$gt: 'abc'}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -338,9 +339,10 @@ TEST_F(PullNodeTest, ApplyWithCollation) {
TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectNonStringMatches) {
auto update = fromjson("{$pull : {a: {$lt: 1}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -357,9 +359,10 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectNonStringMatches) {
TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectRegexMatches) {
auto update = fromjson("{$pull : {a: /a/}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -376,9 +379,10 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectRegexMatches) {
TEST_F(PullNodeTest, ApplyStringLiteralMatchWithCollation) {
auto update = fromjson("{$pull : {a: 'c'}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -395,9 +399,10 @@ TEST_F(PullNodeTest, ApplyStringLiteralMatchWithCollation) {
TEST_F(PullNodeTest, ApplyCollationDoesNotAffectNumberLiteralMatches) {
auto update = fromjson("{$pull : {a: 99}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -584,9 +589,10 @@ TEST_F(PullNodeTest, ApplyComplexDocAndMatching3) {
TEST_F(PullNodeTest, ApplyFullPredicateWithCollation) {
auto update = fromjson("{$pull: {'a.b': {x: 'blah'}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a.b"], expCtx));
diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp
index dd77b411dcf..6ca896d45b0 100644
--- a/src/mongo/db/update/pullall_node_test.cpp
+++ b/src/mongo/db/update/pullall_node_test.cpp
@@ -254,9 +254,10 @@ TEST_F(PullAllNodeTest, ApplyWithAllArrayElementsAndThenSome) {
TEST_F(PullAllNodeTest, ApplyWithCollator) {
auto update = fromjson("{$pullAll : {a: ['FOO', 'BAR']}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullAllNode node;
ASSERT_OK(node.init(update["$pullAll"]["a"], expCtx));
diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp
index 985ee81ca2c..b6fa5e59aaa 100644
--- a/src/mongo/db/update/push_node_test.cpp
+++ b/src/mongo/db/update/push_node_test.cpp
@@ -611,9 +611,10 @@ TEST_F(PushNodeTest, ApplyWithEmbeddedFieldSort) {
TEST_F(PushNodeTest, ApplySortWithCollator) {
auto update = fromjson("{$push: {a: {$each: ['ha'], $sort: 1}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PushNode node;
ASSERT_OK(node.init(update["$push"]["a"], expCtx));
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index 8f7024f80c1..9899fd44366 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -304,8 +304,6 @@ Status UpdateDriver::update(StringData matchedField,
}
void UpdateDriver::setCollator(const CollatorInterface* collator) {
- _expCtx->setCollator(collator);
-
if (_updateExecutor) {
_updateExecutor->setCollator(collator);
}
diff --git a/src/mongo/db/update/update_object_node_test.cpp b/src/mongo/db/update/update_object_node_test.cpp
index 1025f93fb0e..e1c0282eaab 100644
--- a/src/mongo/db/update/update_object_node_test.cpp
+++ b/src/mongo/db/update/update_object_node_test.cpp
@@ -2017,9 +2017,10 @@ TEST_F(UpdateObjectNodeTest, ChildrenShouldBeAppliedInAlphabeticalOrder) {
TEST_F(UpdateObjectNodeTest, CollatorShouldNotAffectUpdateOrder) {
auto setUpdate = fromjson("{$set: {abc: 5, cba: 6}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
std::set<std::string> foundIdentifiers;
UpdateObjectNode root;
diff --git a/src/mongo/dbtests/cursor_manager_test.cpp b/src/mongo/dbtests/cursor_manager_test.cpp
index a4c3d835079..e6227d325e6 100644
--- a/src/mongo/dbtests/cursor_manager_test.cpp
+++ b/src/mongo/dbtests/cursor_manager_test.cpp
@@ -71,8 +71,11 @@ public:
std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeFakePlanExecutor(
OperationContext* opCtx) {
+ // Create a mock ExpressionContext.
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, kTestNss);
+
auto workingSet = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx, workingSet.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), workingSet.get());
return unittest::assertGet(PlanExecutor::make(opCtx,
std::move(workingSet),
std::move(queuedDataStage),
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index bf184315ecd..84738eb6e17 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -301,8 +301,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout)
collScanParams.tailable = true;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData);
@@ -340,8 +343,11 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) {
CollectionScanParams collScanParams;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
auto canonicalQuery = unittest::assertGet(
@@ -386,8 +392,11 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKil
collScanParams.tailable = true;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
queryRequest->setTailableMode(TailableModeEnum::kTailableAndAwaitData);
@@ -424,8 +433,11 @@ TEST_F(DocumentSourceCursorTest, NormalCursorShouldErrorAfterBeingKilled) {
CollectionScanParams collScanParams;
auto filter = BSON("a" << 1);
auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx()));
- auto collectionScan = std::make_unique<CollectionScan>(
- opCtx(), readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get());
+ auto collectionScan = std::make_unique<CollectionScan>(ctx().get(),
+ readLock.getCollection(),
+ collScanParams,
+ workingSet.get(),
+ matchExpression.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filter);
auto canonicalQuery = unittest::assertGet(
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index bc3e329816c..1ea2ba9233e 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -229,9 +229,8 @@ public:
const NamespaceString nss("unittests.matchertests");
AutoGetCollectionForReadCommand ctx(&opCtx, nss);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtxPtr.get(), collator, kTestNss));
+ const boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(
+ opCtxPtr.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
M m(BSON("$where"
<< "function(){ return this.a == 1; }"),
expCtx,
@@ -291,9 +290,10 @@ template <typename M>
class Collator {
public:
void run() {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
M matcher(BSON("a"
<< "string"),
expCtx);
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 06b33cc30d4..a7e949465bd 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -58,7 +58,8 @@ static const NamespaceString nss("unittests.PlanExecutorInvalidationTest");
*/
class PlanExecutorInvalidationTest : public unittest::Test {
public:
- PlanExecutorInvalidationTest() : _client(&_opCtx) {
+ PlanExecutorInvalidationTest()
+ : _client(&_opCtx), _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss)) {
_ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns()));
_client.dropCollection(nss.ns());
@@ -76,7 +77,7 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection(), params, ws.get(), nullptr));
+ new CollectionScan(_expCtx.get(), collection(), params, ws.get(), nullptr));
// Create a plan executor to hold it
auto qr = std::make_unique<QueryRequest>(nss);
@@ -131,6 +132,8 @@ public:
OperationContext& _opCtx = *_opCtxPtr;
unique_ptr<dbtests::WriteContextForTests> _ctx;
DBDirectClient _client;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYield) {
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 95ff9f4da3d..8831ee951a0 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -126,7 +126,7 @@ public:
ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);
// Fill out the MPR.
- _mps.reset(new MultiPlanStage(&_opCtx, collection, cq));
+ _mps.reset(new MultiPlanStage(_expCtx.get(), collection, cq));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
@@ -167,6 +167,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
// Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
// Restored at end of test invocation regardless of test result.
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 0529ec381bb..25182aa327b 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -116,7 +116,7 @@ public:
// Make the stage.
unique_ptr<PlanStage> root(
- new CollectionScan(&_opCtx, coll, csparams, ws.get(), cq.get()->root()));
+ new CollectionScan(cq->getExpCtx().get(), coll, csparams, ws.get(), cq.get()->root()));
// Hand the plan off to the executor.
auto statusWithPlanExecutor =
@@ -153,9 +153,9 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
- auto ixscan = std::make_unique<IndexScan>(&_opCtx, ixparams, ws.get(), nullptr);
+ auto ixscan = std::make_unique<IndexScan>(_expCtx.get(), ixparams, ws.get(), nullptr);
unique_ptr<PlanStage> root =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ixscan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ixscan), nullptr, coll);
auto qr = std::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr));
@@ -174,6 +174,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
const IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
Collection* collection =
@@ -203,8 +206,6 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) {
// Create the aggregation pipeline.
std::vector<BSONObj> rawPipeline = {fromjson("{$match: {a: {$gte: 7, $lte: 10}}}")};
- boost::intrusive_ptr<ExpressionContextForTest> expCtx =
- new ExpressionContextForTest(&_opCtx, AggregationRequest(nss, rawPipeline));
// Create an "inner" plan executor and register it with the cursor manager so that it can
// get notified when the collection is dropped.
@@ -215,12 +216,12 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) {
// in the pipeline.
innerExec->saveState();
auto cursorSource = DocumentSourceCursor::create(
- collection, std::move(innerExec), expCtx, DocumentSourceCursor::CursorType::kRegular);
- auto pipeline = Pipeline::create({cursorSource}, expCtx);
+ collection, std::move(innerExec), _expCtx, DocumentSourceCursor::CursorType::kRegular);
+ auto pipeline = Pipeline::create({cursorSource}, _expCtx);
// Create the output PlanExecutor that pulls results from the pipeline.
auto ws = std::make_unique<WorkingSet>();
- auto proxy = std::make_unique<PipelineProxyStage>(&_opCtx, std::move(pipeline), ws.get());
+ auto proxy = std::make_unique<PipelineProxyStage>(_expCtx.get(), std::move(pipeline), ws.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(proxy), collection, PlanExecutor::NO_YIELD);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index f06e8fb3589..9750b9af706 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -167,6 +167,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
+
private:
DBDirectClient _client;
};
@@ -199,18 +202,18 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0
// in that order. Read half of them.
@@ -286,19 +289,19 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20 (descending).
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar <= 19 (descending).
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 19);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// First call to work reads the first result from the children. The first result for the
// first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan
@@ -366,19 +369,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -415,19 +418,19 @@ public:
// before hashed AND is done reading the first child (stage has to
// hold 21 keys in buffer for Foo <= 20).
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 20 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 20 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 20 << "" << big);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -462,19 +465,19 @@ public:
// keys in last child's index are not buffered. There are 6 keys
// that satisfy the criteria Foo <= 20 and Bar >= 10 and 5 <= baz <= 15.
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 5 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 5 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -504,24 +507,24 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -562,24 +565,24 @@ public:
// before hashed AND is done reading the second child (stage has to
// hold 11 keys in buffer for Foo <= 20 and Bar >= 10).
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws, 10 * big.size());
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws, 10 * big.size());
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll));
params.bounds.startKey = BSON("" << 10 << "" << big);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// 5 <= baz <= 15
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 15);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -607,19 +610,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar == 5. Index scan should be eof.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 5);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
int count = 0;
int works = 0;
@@ -664,12 +667,12 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo >= 100
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 100);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar <= 100
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
@@ -681,7 +684,7 @@ public:
<< "");
params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly;
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -711,24 +714,24 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// First child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll);
ah->addChild(std::move(fetch));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
// through {foo: 20, bar: 20}.
@@ -764,23 +767,23 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Foo <= 20
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.direction = -1;
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar >= 10
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 10);
- auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// Second child of the AND_HASH stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll);
ah->addChild(std::move(fetch));
// Check that the AndHash stage returns docs {foo: 10, bar: 10}
@@ -813,9 +816,9 @@ public:
// Child2: NEED_TIME, FAILURE
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -825,7 +828,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
childStage2->pushBack(PlanStage::NEED_TIME);
childStage2->pushBack(PlanStage::FAILURE);
@@ -846,9 +849,9 @@ public:
// Child2: Data
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
@@ -860,7 +863,7 @@ public:
}
childStage1->pushBack(PlanStage::FAILURE);
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -887,9 +890,9 @@ public:
// Child2: Data, FAILURE
{
WorkingSet ws;
- const auto andHashStage = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ const auto andHashStage = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
- auto childStage1 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage1 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -899,7 +902,7 @@ public:
childStage1->pushBack(id);
}
- auto childStage2 = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto childStage2 = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
{
WorkingSetID id = ws.allocate();
WorkingSetMember* wsm = ws.get(id);
@@ -951,19 +954,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Scan over bar == 1.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Get the set of RecordIds in our collection to use later.
set<RecordId> data;
@@ -1068,25 +1071,25 @@ public:
addIndex(BSON("baz" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// baz == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -1113,19 +1116,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Foo == 7. Should be EOF.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Bar == 20, not EOF.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1156,19 +1159,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// foo == 7.
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 7);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 20.
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 20);
params.bounds.endKey = BSON("" << 20);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1195,19 +1198,19 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- auto ah = std::make_unique<AndHashStage>(&_opCtx, &ws);
+ auto ah = std::make_unique<AndHashStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// Intersect with 7 <= bar < 10000
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 7);
params.bounds.endKey = BSON("" << 10000);
- ah->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ah->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
WorkingSetID lastId = WorkingSet::INVALID_ID;
@@ -1257,25 +1260,25 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- auto firstScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto firstScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// First child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(firstScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll);
as->addChild(std::move(fetch));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
for (int i = 0; i < 50; i++) {
BSONObj obj = getNext(as.get(), &ws);
@@ -1310,24 +1313,24 @@ public:
addIndex(BSON("bar" << 1));
WorkingSet ws;
- unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(&_opCtx, &ws);
+ unique_ptr<AndSortedStage> as = std::make_unique<AndSortedStage>(_expCtx.get(), &ws);
// Scan over foo == 1
auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- as->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ as->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
// bar == 1
params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll));
params.bounds.startKey = BSON("" << 1);
params.bounds.endKey = BSON("" << 1);
- auto secondScan = std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr);
+ auto secondScan = std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr);
// Second child of the AND_SORTED stage is a Fetch. The NULL in the
// constructor means there is no filter.
auto fetch =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(secondScan), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll);
as->addChild(std::move(fetch));
for (int i = 0; i < 50; i++) {
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index fd3b508fc37..3811a73e3d0 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -152,13 +152,18 @@ public:
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq, plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq,
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -171,6 +176,9 @@ protected:
OperationContext& _opCtx = *_opCtxPtr;
WorkingSet _ws;
DBDirectClient _client{&_opCtx};
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
};
/**
@@ -199,13 +207,18 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailure) {
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
// Queued data stage will return a failure during the cached plan trial period.
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
mockChild->pushBack(PlanStage::FAILURE);
// High enough so that we shouldn't trigger a replan based on works.
const size_t decisionWorks = 50;
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -249,13 +262,18 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) {
const size_t decisionWorks = 10;
const size_t mockWorks =
1U + static_cast<size_t>(internalQueryCacheEvictionRatio * decisionWorks);
- auto mockChild = std::make_unique<QueuedDataStage>(&_opCtx, &_ws);
+ auto mockChild = std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws);
for (size_t i = 0; i < mockWorks; i++) {
mockChild->pushBack(PlanStage::NEED_TIME);
}
- CachedPlanStage cachedPlanStage(
- &_opCtx, collection, &_ws, cq.get(), plannerParams, decisionWorks, std::move(mockChild));
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
+ collection,
+ &_ws,
+ cq.get(),
+ plannerParams,
+ decisionWorks,
+ std::move(mockChild));
// This should succeed after triggering a replan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
@@ -453,13 +471,13 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
const size_t decisionWorks = 10;
- CachedPlanStage cachedPlanStage(&_opCtx,
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
collection,
&_ws,
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
// Drop an index while the CachedPlanStage is in a saved state. Restoring should fail, since we
// may still need the dropped index for plan selection.
@@ -495,13 +513,13 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl
fillOutPlannerParams(&_opCtx, collection, cq.get(), &plannerParams);
const size_t decisionWorks = 10;
- CachedPlanStage cachedPlanStage(&_opCtx,
+ CachedPlanStage cachedPlanStage(_expCtx.get(),
collection,
&_ws,
cq.get(),
plannerParams,
decisionWorks,
- std::make_unique<QueuedDataStage>(&_opCtx, &_ws));
+ std::make_unique<QueuedDataStage>(_expCtx.get(), &_ws));
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL,
_opCtx.getServiceContext()->getFastClockSource());
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 5eb79bb6e80..9040569a58c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -94,18 +94,15 @@ public:
params.tailable = false;
// Make the filter.
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, nss));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
- &_opCtx, collection, params, ws.get(), filterExpr.get());
+ _expCtx.get(), collection, params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -132,7 +129,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -152,6 +149,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -192,7 +192,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) {
// Make a scan and have the runner own it.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -221,7 +221,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) {
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ps), collection, PlanExecutor::NO_YIELD);
@@ -255,7 +255,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) {
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr));
+ unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -308,7 +308,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw
params.tailable = false;
WorkingSet ws;
- unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr));
+ unique_ptr<PlanStage> scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr));
int count = 0;
while (count < 10) {
@@ -368,7 +368,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
// Create plan stage.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, collection, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), collection, params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
@@ -417,7 +417,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
// Create plan stage.
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<PlanStage> ps =
- std::make_unique<CollectionScan>(&_opCtx, coll, params, ws.get(), nullptr);
+ std::make_unique<CollectionScan>(_expCtx.get(), coll, params, ws.get(), nullptr);
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 0c62e6636f4..304297724be 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -60,6 +60,7 @@ public:
CountStageTest()
: _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X),
_ctx(&_opCtx, ns()),
+ _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, kTestNss)),
_coll(nullptr) {}
virtual ~CountStageTest() {}
@@ -94,7 +95,8 @@ public:
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, nullptr));
+ unique_ptr<CollectionScan> scan(
+ new CollectionScan(_expCtx.get(), _coll, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -146,11 +148,8 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, kTestNss));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(request.getQuery(), expCtx);
+ MatchExpressionParser::parse(request.getQuery(), _expCtx);
ASSERT(statusWithMatcher.isOK());
unique_ptr<MatchExpression> expression = std::move(statusWithMatcher.getValue());
@@ -161,7 +160,7 @@ public:
scan = createCollScan(expression.get(), ws.get());
}
- CountStage countStage(&_opCtx,
+ CountStage countStage(_expCtx.get(),
_coll,
request.getLimit().value_or(0),
request.getSkip().value_or(0),
@@ -216,14 +215,14 @@ public:
params.direction = 1;
// This child stage gets owned and freed by its parent CountStage
- return new IndexScan(&_opCtx, params, ws, expr);
+ return new IndexScan(_expCtx.get(), params, ws, expr);
}
CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) {
CollectionScanParams params;
// This child stage gets owned and freed by its parent CountStage
- return new CollectionScan(&_opCtx, _coll, params, ws, expr);
+ return new CollectionScan(_expCtx.get(), _coll, params, ws, expr);
}
static const char* ns() {
@@ -240,6 +239,7 @@ protected:
OperationContext& _opCtx = *_opCtxPtr;
Lock::DBLock _dbLock;
OldClientContext _ctx;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
Collection* _coll;
};
diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp
index 913baf62ba9..d89316948fc 100644
--- a/src/mongo/dbtests/query_stage_count_scan.cpp
+++ b/src/mongo/dbtests/query_stage_count_scan.cpp
@@ -110,6 +110,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, NamespaceString(ns()));
+
private:
DBDirectClient _client;
};
@@ -138,7 +141,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(2, numCounted);
@@ -169,7 +172,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(5, numCounted);
@@ -200,7 +203,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(3, numCounted);
@@ -227,7 +230,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -255,7 +258,7 @@ public:
params.endKeyInclusive = false;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -284,7 +287,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(0, numCounted);
@@ -314,7 +317,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -366,7 +369,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -421,7 +424,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
@@ -483,7 +486,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(7, numCounted);
@@ -515,7 +518,7 @@ public:
params.endKeyInclusive = true; // yes?
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
int numCounted = runCount(&count);
ASSERT_EQUALS(9, numCounted);
@@ -544,7 +547,7 @@ public:
params.endKeyInclusive = true;
WorkingSet ws;
- CountScan count(&_opCtx, params, &ws);
+ CountScan count(_expCtx.get(), params, &ws);
WorkingSetID wsid;
int numCounted = 0;
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index db316f5a15f..6ff9419dd17 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -91,7 +91,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -119,6 +119,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -147,11 +150,12 @@ public:
deleteStageParams->isMulti = true;
WorkingSet ws;
- DeleteStage deleteStage(&_opCtx,
- std::move(deleteStageParams),
- &ws,
- coll,
- new CollectionScan(&_opCtx, coll, collScanParams, &ws, nullptr));
+ DeleteStage deleteStage(
+ _expCtx.get(),
+ std::move(deleteStageParams),
+ &ws,
+ coll,
+ new CollectionScan(_expCtx.get(), coll, collScanParams, &ws, nullptr));
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats());
@@ -203,7 +207,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -218,7 +222,7 @@ public:
deleteParams->canonicalQuery = cq.get();
const auto deleteStage = std::make_unique<DeleteStage>(
- &_opCtx, std::move(deleteParams), ws.get(), coll, qds.release());
+ _expCtx.get(), std::move(deleteParams), ws.get(), coll, qds.release());
const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index fdfa2c06469..2dd15cd8f6c 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -54,7 +54,8 @@ static const NamespaceString nss{"unittests.QueryStageDistinct"};
class DistinctBase {
public:
- DistinctBase() : _client(&_opCtx) {}
+ DistinctBase()
+ : _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss)), _client(&_opCtx) {}
virtual ~DistinctBase() {
_client.dropCollection(nss.ns());
@@ -98,6 +99,7 @@ public:
protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
private:
DBDirectClient _client;
@@ -142,7 +144,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
WorkingSetID wsid;
// Get our first result.
@@ -210,7 +212,7 @@ public:
params.bounds.fields.push_back(oil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
// We should see each number in the range [1, 6] exactly once.
std::set<int> seen;
@@ -279,7 +281,7 @@ public:
params.bounds.fields.push_back(bOil);
WorkingSet ws;
- DistinctScan distinct(&_opCtx, std::move(params), &ws);
+ DistinctScan distinct(_expCtx.get(), std::move(params), &ws);
WorkingSetID wsid;
PlanStage::StageState state;
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index e956472d40d..cccca5ac8d4 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -59,11 +59,15 @@ public:
void testWork(const char* patternStr,
const char* inputStr,
const char* expectedStr,
- CollatorInterface* collator = nullptr) {
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
auto opCtx = _serviceContext.makeOperationContext();
+ // Create a mock ExpressionContext.
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ make_intrusive<ExpressionContext>(opCtx.get(), std::move(collator), kTestNss));
+
WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx.get(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj["input"];
ASSERT(inputElt.isABSONObj());
@@ -80,16 +84,11 @@ public:
queuedDataStage->pushBack(id);
}
- // Create a mock ExpressionContext.
- boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
- pExpCtx->setCollator(collator);
-
// Initialization.
BSONObj pattern = fromjson(patternStr);
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
- pExpCtx, std::move(queuedDataStage), &ws, pattern);
- EnsureSortedStage ess(opCtx.get(), pattern, &ws, std::move(sortKeyGen));
+ expCtx.get(), std::move(queuedDataStage), &ws, pattern);
+ EnsureSortedStage ess(expCtx.get(), pattern, &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -127,10 +126,10 @@ TEST_F(QueryStageEnsureSortedTest, EnsureSortedEmptyWorkingSet) {
new ExpressionContext(opCtx.get(), nullptr, kTestNss));
WorkingSet ws;
- auto queuedDataStage = std::make_unique<QueuedDataStage>(opCtx.get(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &ws);
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
pExpCtx, std::move(queuedDataStage), &ws, BSONObj());
- EnsureSortedStage ess(opCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
+ EnsureSortedStage ess(pExpCtx.get(), BSONObj(), &ws, std::move(sortKeyGen));
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = PlanStage::NEED_TIME;
@@ -184,8 +183,12 @@ TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsNullCollator) {
}
TEST_F(QueryStageEnsureSortedTest, EnsureSortedStringsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- testWork("{a: 1}", "{input: [{a: 'abc'}, {a: 'cba'}]}", "{output: [{a: 'abc'}]}", &collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ testWork("{a: 1}",
+ "{input: [{a: 'abc'}, {a: 'cba'}]}",
+ "{output: [{a: 'abc'}]}",
+ std::move(collator));
}
} // namespace
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 53b4e1e0646..163621ce89e 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -88,6 +88,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtxPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_opCtxPtr;
DBDirectClient _client;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
};
@@ -116,7 +119,7 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto mockStage = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
// Mock data.
{
@@ -140,7 +143,7 @@ public:
}
auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, &ws, std::move(mockStage), nullptr, coll);
+ std::make_unique<FetchStage>(_expCtx.get(), &ws, std::move(mockStage), nullptr, coll);
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state;
@@ -183,7 +186,7 @@ public:
ASSERT_EQUALS(size_t(1), recordIds.size());
// Create a mock stage that returns the WSM.
- auto mockStage = std::make_unique<QueuedDataStage>(&_opCtx, &ws);
+ auto mockStage = std::make_unique<QueuedDataStage>(_expCtx.get(), &ws);
// Mock data.
{
@@ -200,17 +203,14 @@ public:
// Make the filter.
BSONObj filterObj = BSON("foo" << 6);
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, nss()));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
// Matcher requires that foo==6 but we only have data with foo==5.
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx, &ws, std::move(mockStage), filterExpr.get(), coll);
+ _expCtx.get(), &ws, std::move(mockStage), filterExpr.get(), coll);
// First call should return a fetch request as it's not in memory.
WorkingSetID id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index e9ac3c744d8..c7f3d0aa5fd 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -49,7 +49,8 @@ public:
IndexScanTest()
: _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X),
_ctx(&_opCtx, ns()),
- _coll(nullptr) {}
+ _coll(nullptr),
+ _expCtx(make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss())) {}
virtual ~IndexScanTest() {}
@@ -110,7 +111,7 @@ public:
// This child stage gets owned and freed by the caller.
MatchExpression* filter = nullptr;
- return new IndexScan(&_opCtx, params, &_ws, filter);
+ return new IndexScan(_expCtx.get(), params, &_ws, filter);
}
IndexScan* createIndexScan(BSONObj startKey,
@@ -134,7 +135,7 @@ public:
params.bounds.fields.push_back(oil);
MatchExpression* filter = nullptr;
- return new IndexScan(&_opCtx, params, &_ws, filter);
+ return new IndexScan(_expCtx.get(), params, &_ws, filter);
}
static const char* ns() {
@@ -153,6 +154,8 @@ protected:
Collection* _coll;
WorkingSet _ws;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
// SERVER-15958: Some IndexScanStats info must be initialized on construction of an IndexScan.
diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp
index 7d5fba46ae1..41050b305dd 100644
--- a/src/mongo/dbtests/query_stage_limit_skip.cpp
+++ b/src/mongo/dbtests/query_stage_limit_skip.cpp
@@ -56,8 +56,9 @@ using std::unique_ptr;
static const int N = 50;
/* Populate a QueuedDataStage and return it. Caller owns it. */
-std::unique_ptr<QueuedDataStage> getMS(OperationContext* opCtx, WorkingSet* ws) {
- auto ms = std::make_unique<QueuedDataStage>(opCtx, ws);
+std::unique_ptr<QueuedDataStage> getMS(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws) {
+ auto ms = std::make_unique<QueuedDataStage>(expCtx.get(), ws);
// Put N ADVANCED results into the mock stage, and some other stalling results (YIELD/TIME).
for (int i = 0; i < N; ++i) {
@@ -94,15 +95,18 @@ int countResults(PlanStage* stage) {
class QueryStageLimitSkipBasicTest {
public:
void run() {
+ const boost::intrusive_ptr<ExpressionContext> expCtx(make_intrusive<ExpressionContext>(
+ _opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString("test.dummyNS")));
+
for (int i = 0; i < 2 * N; ++i) {
WorkingSet ws;
unique_ptr<PlanStage> skip =
- std::make_unique<SkipStage>(_opCtx, i, &ws, getMS(_opCtx, &ws));
+ std::make_unique<SkipStage>(expCtx.get(), i, &ws, getMS(expCtx.get(), &ws));
ASSERT_EQUALS(max(0, N - i), countResults(skip.get()));
unique_ptr<PlanStage> limit =
- std::make_unique<LimitStage>(_opCtx, i, &ws, getMS(_opCtx, &ws));
+ std::make_unique<LimitStage>(expCtx.get(), i, &ws, getMS(expCtx.get(), &ws));
ASSERT_EQUALS(min(N, i), countResults(limit.get()));
}
}
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 78fdcfd0ead..b6b1fa9e87e 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -134,6 +134,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss());
+
private:
DBDirectClient _client;
};
@@ -170,18 +173,18 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -236,17 +239,17 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -301,17 +304,17 @@ public:
MergeSortStageParams msparams;
msparams.dedup = false;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -368,21 +371,21 @@ public:
// Sort by c:-1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << -1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = objWithMaxKey(1);
params.bounds.endKey = objWithMinKey(1);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -437,19 +440,19 @@ public:
// Sort by c:1
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:51 (EOF)
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -488,7 +491,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
int numIndices = 20;
for (int i = 0; i < numIndices; ++i) {
@@ -499,10 +502,10 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
}
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -541,7 +544,7 @@ public:
// Sort by foo:1
MergeSortStageParams msparams;
msparams.pattern = BSON("foo" << 1);
- auto ms = make_unique<MergeSortStage>(&_opCtx, msparams, &ws);
+ auto ms = make_unique<MergeSortStage>(_expCtx.get(), msparams, &ws);
// Index 'a'+i has foo equal to 'i'.
@@ -554,7 +557,7 @@ public:
BSONObj indexSpec = BSON(index << 1 << "foo" << 1);
addIndex(indexSpec);
auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr));
}
set<RecordId> recordIds;
@@ -672,7 +675,7 @@ public:
WorkingSetMember* member;
MergeSortStageParams msparams;
msparams.pattern = BSON("a" << 1);
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, &ws);
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, &ws);
// First child scans [5, 10].
{
@@ -680,9 +683,9 @@ public:
params.bounds.startKey = BSON("" << 5);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx,
+ _expCtx.get(),
&ws,
- std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr),
nullptr,
coll);
ms->addChild(std::move(fetchStage));
@@ -694,9 +697,9 @@ public:
params.bounds.startKey = BSON("" << 4);
params.bounds.endKey = BSON("" << 10);
auto fetchStage = std::make_unique<FetchStage>(
- &_opCtx,
+ _expCtx.get(),
&ws,
- std::make_unique<IndexScan>(&_opCtx, params, &ws, nullptr),
+ std::make_unique<IndexScan>(_expCtx.get(), params, &ws, nullptr),
nullptr,
coll);
ms->addChild(std::move(fetchStage));
@@ -780,17 +783,18 @@ public:
MergeSortStageParams msparams;
msparams.pattern = BSON("c" << 1 << "d" << 1);
msparams.collator = nullptr;
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
- auto fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ auto fetchStage =
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
@@ -848,18 +852,18 @@ public:
msparams.pattern = BSON("c" << 1 << "d" << 1);
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
msparams.collator = &collator;
- auto ms = std::make_unique<MergeSortStage>(&_opCtx, msparams, ws.get());
+ auto ms = std::make_unique<MergeSortStage>(_expCtx.get(), msparams, ws.get());
// a:1
auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
// b:1
params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll));
- ms->addChild(std::make_unique<IndexScan>(&_opCtx, params, ws.get(), nullptr));
+ ms->addChild(std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), nullptr));
unique_ptr<FetchStage> fetchStage =
- make_unique<FetchStage>(&_opCtx, ws.get(), std::move(ms), nullptr, coll);
+ make_unique<FetchStage>(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll);
// Must fetch if we want to easily pull out an obj.
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD);
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index a9b5a8375fe..38d44b1d8f2 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -118,6 +118,9 @@ protected:
const ServiceContext::UniqueOperationContext _opCtx = cc().makeOperationContext();
ClockSource* const _clock = _opCtx->getServiceContext()->getFastClockSource();
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, nss);
+
DBDirectClient _client;
};
@@ -133,30 +136,28 @@ std::unique_ptr<CanonicalQuery> makeCanonicalQuery(OperationContext* opCtx,
return cq;
}
-unique_ptr<PlanStage> getIxScanPlan(OperationContext* opCtx,
+unique_ptr<PlanStage> getIxScanPlan(ExpressionContext* expCtx,
const Collection* coll,
WorkingSet* sharedWs,
int desiredFooValue) {
std::vector<const IndexDescriptor*> indexes;
- coll->getIndexCatalog()->findIndexesByKeyPattern(opCtx, BSON("foo" << 1), false, &indexes);
+ coll->getIndexCatalog()->findIndexesByKeyPattern(
+ expCtx->opCtx, BSON("foo" << 1), false, &indexes);
ASSERT_EQ(indexes.size(), 1U);
- IndexScanParams ixparams(opCtx, indexes[0]);
+ IndexScanParams ixparams(expCtx->opCtx, indexes[0]);
ixparams.bounds.isSimpleRange = true;
ixparams.bounds.startKey = BSON("" << desiredFooValue);
ixparams.bounds.endKey = BSON("" << desiredFooValue);
ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys;
ixparams.direction = 1;
- auto ixscan = std::make_unique<IndexScan>(opCtx, ixparams, sharedWs, nullptr);
- return std::make_unique<FetchStage>(opCtx, sharedWs, std::move(ixscan), nullptr, coll);
+ auto ixscan = std::make_unique<IndexScan>(expCtx, ixparams, sharedWs, nullptr);
+ return std::make_unique<FetchStage>(expCtx, sharedWs, std::move(ixscan), nullptr, coll);
}
-unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCtx,
+unique_ptr<MatchExpression> makeMatchExpressionFromFilter(ExpressionContext* expCtx,
BSONObj filterObj) {
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, nss));
StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, expCtx);
ASSERT_OK(statusWithMatcher.getStatus());
unique_ptr<MatchExpression> filter = std::move(statusWithMatcher.getValue());
@@ -165,19 +166,19 @@ unique_ptr<MatchExpression> makeMatchExpressionFromFilter(OperationContext* opCt
}
-unique_ptr<PlanStage> getCollScanPlan(OperationContext* opCtx,
+unique_ptr<PlanStage> getCollScanPlan(ExpressionContext* expCtx,
const Collection* coll,
WorkingSet* sharedWs,
MatchExpression* matchExpr) {
CollectionScanParams csparams;
csparams.direction = CollectionScanParams::FORWARD;
- unique_ptr<PlanStage> root(new CollectionScan(opCtx, coll, csparams, sharedWs, matchExpr));
+ unique_ptr<PlanStage> root(new CollectionScan(expCtx, coll, csparams, sharedWs, matchExpr));
return root;
}
-std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
+std::unique_ptr<MultiPlanStage> runMultiPlanner(ExpressionContext* expCtx,
const NamespaceString& nss,
const Collection* coll,
int desiredFooValue) {
@@ -185,23 +186,24 @@ std::unique_ptr<MultiPlanStage> runMultiPlanner(OperationContext* opCtx,
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(opCtx, coll, sharedWs.get(), desiredFooValue);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(expCtx, coll, sharedWs.get(), desiredFooValue);
// Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << desiredFooValue);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(opCtx, filterObj);
- unique_ptr<PlanStage> collScanRoot = getCollScanPlan(opCtx, coll, sharedWs.get(), filter.get());
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(expCtx, filterObj);
+ unique_ptr<PlanStage> collScanRoot =
+ getCollScanPlan(expCtx, coll, sharedWs.get(), filter.get());
// Hand the plans off to the MPS.
- auto cq = makeCanonicalQuery(opCtx, nss, BSON("foo" << desiredFooValue));
+ auto cq = makeCanonicalQuery(expCtx->opCtx, nss, BSON("foo" << desiredFooValue));
- unique_ptr<MultiPlanStage> mps = std::make_unique<MultiPlanStage>(opCtx, coll, cq.get());
+ unique_ptr<MultiPlanStage> mps = std::make_unique<MultiPlanStage>(expCtx, coll, cq.get());
mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
- opCtx->getServiceContext()->getFastClockSource());
+ expCtx->opCtx->getServiceContext()->getFastClockSource());
ASSERT_OK(mps->pickBestPlan(&yieldPolicy));
ASSERT(mps->bestPlanChosen());
ASSERT_EQUALS(0, mps->bestPlanIdx());
@@ -231,19 +233,19 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) {
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan with matcher.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
// Hand the plans off to the MPS.
auto cq = makeCanonicalQuery(_opCtx.get(), nss, filterObj);
unique_ptr<MultiPlanStage> mps =
- std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
+ std::make_unique<MultiPlanStage>(_expCtx.get(), ctx.getCollection(), cq.get());
mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get());
mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get());
@@ -287,7 +289,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
// Run an index scan and collection scan, searching for {foo: 7}.
- auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+ auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
// Be sure that an inactive cache entry was added.
PlanCache* cache = CollectionQueryInfo::get(coll).getPlanCache();
@@ -299,7 +301,7 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
// Run the multi-planner again. The index scan will again win, but the number of works
// will be greater, since {foo: 5} appears more frequently in the collection.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
// The last plan run should have required far more works than the previous plan. This means
// that the 'works' in the cache entry should have doubled.
@@ -310,14 +312,14 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesNotCreateActiveCacheEntryImmediately) {
// Run the exact same query again. This will still take more works than 'works', and
// should cause the cache entry's 'works' to be doubled again.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->size(), 1U);
entry = assertGet(cache->getEntry(*cq));
ASSERT_FALSE(entry->isActive);
ASSERT_EQ(firstQueryWorks * 2 * 2, entry->works);
// Run the query yet again. This time, an active cache entry should be created.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->size(), 1U);
entry = assertGet(cache->getEntry(*cq));
ASSERT_TRUE(entry->isActive);
@@ -342,14 +344,14 @@ TEST_F(QueryStageMultiPlanTest, MPSDoesCreatesActiveEntryWhenInactiveEntriesDisa
const auto cq = makeCanonicalQuery(_opCtx.get(), nss, BSON("foo" << 7));
// Run an index scan and collection scan, searching for {foo: 7}.
- auto mps = runMultiPlanner(_opCtx.get(), nss, coll, 7);
+ auto mps = runMultiPlanner(_expCtx.get(), nss, coll, 7);
// Be sure that an _active_ cache entry was added.
PlanCache* cache = CollectionQueryInfo::get(coll).getPlanCache();
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
// Run the multi-planner again. The entry should still be active.
- mps = runMultiPlanner(_opCtx.get(), nss, coll, 5);
+ mps = runMultiPlanner(_expCtx.get(), nss, coll, 5);
ASSERT_EQ(cache->get(*cq).state, PlanCache::CacheEntryState::kPresentActive);
}
@@ -394,7 +396,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) {
ASSERT_EQUALS(solutions.size(), 3U);
// Fill out the MultiPlanStage.
- unique_ptr<MultiPlanStage> mps(new MultiPlanStage(_opCtx.get(), collection, cq.get()));
+ unique_ptr<MultiPlanStage> mps(new MultiPlanStage(_expCtx.get(), collection, cq.get()));
unique_ptr<WorkingSet> ws(new WorkingSet());
// Put each solution from the planner into the MPR.
for (size_t i = 0; i < solutions.size(); ++i) {
@@ -465,8 +467,8 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
const int nDocs = 500;
auto ws = std::make_unique<WorkingSet>();
- auto firstPlan = std::make_unique<QueuedDataStage>(_opCtx.get(), ws.get());
- auto secondPlan = std::make_unique<QueuedDataStage>(_opCtx.get(), ws.get());
+ auto firstPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
+ auto secondPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
for (int i = 0; i < nDocs; ++i) {
addMember(firstPlan.get(), ws.get(), BSON("x" << 1));
@@ -482,7 +484,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
qr->setFilter(BSON("x" << 1));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(qr)));
unique_ptr<MultiPlanStage> mps =
- std::make_unique<MultiPlanStage>(_opCtx.get(), ctx.getCollection(), cq.get());
+ std::make_unique<MultiPlanStage>(_expCtx.get(), ctx.getCollection(), cq.get());
// Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'.
mps->addPlan(std::make_unique<QuerySolution>(), std::move(firstPlan), ws.get());
@@ -571,20 +573,20 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Make the filter.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(filterObj);
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
@@ -615,19 +617,19 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) {
// Every call to work() returns something so this should clearly win (by current scoring
// at least).
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
- unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_opCtx.get(), coll, sharedWs.get(), 7);
+ unique_ptr<PlanStage> ixScanRoot = getIxScanPlan(_expCtx.get(), coll, sharedWs.get(), 7);
// Plan 1: CollScan.
BSONObj filterObj = BSON("foo" << 7);
- unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_opCtx.get(), filterObj);
+ unique_ptr<MatchExpression> filter = makeMatchExpressionFromFilter(_expCtx.get(), filterObj);
unique_ptr<PlanStage> collScanRoot =
- getCollScanPlan(_opCtx.get(), coll, sharedWs.get(), filter.get());
+ getCollScanPlan(_expCtx.get(), coll, sharedWs.get(), filter.get());
auto queryRequest = std::make_unique<QueryRequest>(nss);
queryRequest->setFilter(BSON("foo" << BSON("$gte" << 0)));
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
@@ -649,7 +651,7 @@ protected:
}
public:
- ThrowyPlanStage(OperationContext* opCtx) : PlanStage("throwy", opCtx) {}
+ ThrowyPlanStage(ExpressionContext* expCtx) : PlanStage("throwy", expCtx) {}
bool isEOF() final {
return false;
}
@@ -673,15 +675,15 @@ TEST_F(QueryStageMultiPlanTest, AddsContextDuringException) {
<< "query"));
auto canonicalQuery =
uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(queryRequest)));
- MultiPlanStage multiPlanStage(opCtx(),
+ MultiPlanStage multiPlanStage(_expCtx.get(),
ctx.getCollection(),
canonicalQuery.get(),
MultiPlanStage::CachingMode::NeverCache);
unique_ptr<WorkingSet> sharedWs(new WorkingSet());
multiPlanStage.addPlan(
- createQuerySolution(), std::make_unique<ThrowyPlanStage>(opCtx()), sharedWs.get());
+ createQuerySolution(), std::make_unique<ThrowyPlanStage>(_expCtx.get()), sharedWs.get());
multiPlanStage.addPlan(
- createQuerySolution(), std::make_unique<ThrowyPlanStage>(opCtx()), sharedWs.get());
+ createQuerySolution(), std::make_unique<ThrowyPlanStage>(_expCtx.get()), sharedWs.get());
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_THROWS_WITH_CHECK(multiPlanStage.pickBestPlan(&yieldPolicy),
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 77a9f0c81a1..938a98e2f88 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -60,6 +60,9 @@ const BSONObj kTestKeyPattern = BSON("testIndex" << 1);
class QueryStageNearTest : public unittest::Test {
public:
void setUp() override {
+ _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx, nullptr, NamespaceString(kTestNamespace));
+
directClient.createCollection(kTestNamespace);
ASSERT_OK(dbtests::createIndex(_opCtx, kTestNamespace, kTestKeyPattern));
@@ -76,6 +79,8 @@ protected:
OperationContext* const _opCtx = _uniqOpCtx.get();
DBDirectClient directClient{_opCtx};
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
+
boost::optional<AutoGetCollectionForRead> _autoColl;
const IndexDescriptor* _mockGeoIndex;
};
@@ -95,11 +100,14 @@ public:
double max;
};
- MockNearStage(OperationContext* opCtx,
+ MockNearStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
WorkingSet* workingSet,
const IndexDescriptor* indexDescriptor)
- : NearStage(
- opCtx, "MOCK_DISTANCE_SEARCH_STAGE", STAGE_UNKNOWN, workingSet, indexDescriptor),
+ : NearStage(expCtx.get(),
+ "MOCK_DISTANCE_SEARCH_STAGE",
+ STAGE_UNKNOWN,
+ workingSet,
+ indexDescriptor),
_pos(0) {}
void addInterval(vector<BSONObj> data, double min, double max) {
@@ -116,7 +124,7 @@ public:
bool lastInterval = _pos == static_cast<int>(_intervals.size());
- auto queuedStage = std::make_unique<QueuedDataStage>(opCtx, workingSet);
+ auto queuedStage = std::make_unique<QueuedDataStage>(expCtx(), workingSet);
for (unsigned int i = 0; i < interval.data.size(); i++) {
// Add all documents from the lastInterval into the QueuedDataStage.
@@ -178,7 +186,7 @@ TEST_F(QueryStageNearTest, Basic) {
vector<BSONObj> mockData;
WorkingSet workingSet;
- MockNearStage nearStage(_opCtx, &workingSet, _mockGeoIndex);
+ MockNearStage nearStage(_expCtx.get(), &workingSet, _mockGeoIndex);
// First set of results
mockData.clear();
@@ -217,7 +225,7 @@ TEST_F(QueryStageNearTest, EmptyResults) {
auto* coll = autoColl.getCollection();
ASSERT(coll);
- MockNearStage nearStage(_opCtx, &workingSet, _mockGeoIndex);
+ MockNearStage nearStage(_expCtx.get(), &workingSet, _mockGeoIndex);
// Empty set of results
mockData.clear();
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 3e01a3c97f6..1d6833337bc 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -113,7 +113,7 @@ public:
Collection* coll) {
// Build the mock scan stage which feeds the data.
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
auto sortPattern = BSON("foo" << 1);
@@ -152,7 +152,7 @@ public:
*/
void sortAndCheck(int direction, Collection* coll) {
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
// Insert a mix of the various types of data.
insertVarietyOfObjects(ws.get(), queuedDataStage.get(), coll);
@@ -169,8 +169,8 @@ public:
false, // addSortKeyMetadata
std::move(keyGenStage));
- auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
+ auto fetchStage = std::make_unique<FetchStage>(
+ _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll);
// Must fetch so we can look at the doc as a BSONObj.
auto statusWithPlanExecutor = PlanExecutor::make(
@@ -551,7 +551,7 @@ public:
}
auto ws = std::make_unique<WorkingSet>();
- auto queuedDataStage = std::make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
for (int i = 0; i < numObj(); ++i) {
{
@@ -585,8 +585,8 @@ public:
false, // addSortKeyMetadata
std::move(keyGenStage));
- auto fetchStage =
- std::make_unique<FetchStage>(&_opCtx, ws.get(), std::move(sortStage), nullptr, coll);
+ auto fetchStage = std::make_unique<FetchStage>(
+ _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll);
// We don't get results back since we're sorting some parallel arrays.
auto statusWithPlanExecutor = PlanExecutor::make(
diff --git a/src/mongo/dbtests/query_stage_sort_key_generator.cpp b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
index 351ad1cca0c..b1a77f75be2 100644
--- a/src/mongo/dbtests/query_stage_sort_key_generator.cpp
+++ b/src/mongo/dbtests/query_stage_sort_key_generator.cpp
@@ -65,15 +65,17 @@ const NamespaceString kTestNss = NamespaceString("db.dummy");
* The 'collator' is used to specify the string comparison semantics that should be used when
* generating the sort key.
*/
-Value extractSortKey(const char* sortSpec, const char* doc, const CollatorInterface* collator) {
+Value extractSortKey(const char* sortSpec,
+ const char* doc,
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::move(collator), kTestNss));
WorkingSet workingSet;
- auto mockStage = std::make_unique<QueuedDataStage>(opCtx.get(), &workingSet);
+ auto mockStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &workingSet);
auto wsid = workingSet.allocate();
auto wsm = workingSet.get(wsid);
wsm->doc = {SnapshotId(), Document{fromjson(doc)}};
@@ -95,15 +97,15 @@ Value extractSortKey(const char* sortSpec, const char* doc, const CollatorInterf
*/
Value extractSortKeyCovered(const char* sortSpec,
const IndexKeyDatum& ikd,
- const CollatorInterface* collator) {
+ std::unique_ptr<CollatorInterface> collator = nullptr) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
boost::intrusive_ptr<ExpressionContext> pExpCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::move(collator), kTestNss));
WorkingSet workingSet;
- auto mockStage = std::make_unique<QueuedDataStage>(opCtx.get(), &workingSet);
+ auto mockStage = std::make_unique<QueuedDataStage>(pExpCtx.get(), &workingSet);
auto wsid = workingSet.allocate();
auto wsm = workingSet.get(wsid);
wsm->keyData.push_back(ikd);
@@ -157,82 +159,76 @@ TEST(SortKeyGeneratorStageTest, SortKeyArray) {
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredNormal) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
- "{a: 1}", IndexKeyDatum(BSON("a" << 1), BSON("" << 5), 0, SnapshotId{}), collator);
+ "{a: 1}", IndexKeyDatum(BSON("a" << 1), BSON("" << 5), 0, SnapshotId{}));
Value expectedOut({Value(5)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredEmbedded) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
"{'a.c': 1}",
- IndexKeyDatum(BSON("a.c" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}),
- collator);
+ IndexKeyDatum(BSON("a.c" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}));
Value expectedOut(5);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered(
"{a: 1, c: 1}",
- IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}),
- collator);
+ IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), 0, SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(5), Value(6)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound2) {
- CollatorInterface* collator = nullptr;
Value actualOut = extractSortKeyCovered("{a: 1, b: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1 << "c" << 1),
BSON("" << 5 << "" << 6 << "" << 4),
0,
- SnapshotId{}),
- collator);
+ SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(5), Value(6)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, SortKeyCoveredCompound3) {
- CollatorInterface* collator = nullptr;
Value actualOut =
extractSortKeyCovered("{b: 1, c: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1),
BSON("" << 5 << "" << 6 << "" << 4 << "" << 9000),
0,
- SnapshotId{}),
- collator);
+ SnapshotId{}));
Value expectedOut(std::vector<Value>{Value(6), Value(4)});
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, ExtractStringSortKeyWithCollatorUsesComparisonKey) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut =
- extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", &collator);
+ extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", std::move(collator));
Value expectedOut = Value("2gniht"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, CollatorHasNoEffectWhenExtractingNonStringSortKey) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- Value actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", &collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ Value actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", std::move(collator));
Value expectedOut = Value(6);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorStageTest, CollatorAppliesWhenExtractingCoveredSortKeyString) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut = extractSortKeyCovered("{b: 1}",
IndexKeyDatum(BSON("a" << 1 << "b" << 1),
BSON("" << 4 << ""
<< "foo"),
0,
SnapshotId{}),
- &collator);
+ std::move(collator));
Value expectedOut = Value("oof"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
@@ -244,9 +240,10 @@ TEST(SortKeyGeneratorStageTest, SortKeyGenerationForArraysChoosesCorrectKey) {
}
TEST(SortKeyGeneratorStageTest, EnsureSortKeyGenerationForArraysRespectsCollation) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
Value actualOut =
- extractSortKey("{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", &collator);
+ extractSortKey("{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", std::move(collator));
Value expectedOut("ayy"_sd);
ASSERT_VALUE_EQ(actualOut, expectedOut);
}
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index be533ba7142..28f657879a1 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -79,6 +79,10 @@ public:
return _opCtx.get();
}
+ ExpressionContext* expCtx() {
+ return _expCtx.get();
+ }
+
ServiceContext* serviceContext() {
return _opCtx->getServiceContext();
}
@@ -105,6 +109,8 @@ protected:
const ServiceContext::UniqueOperationContext _opCtx = cc().makeOperationContext();
ClockSource* _clock = _opCtx->getServiceContext()->getFastClockSource();
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, nss);
private:
DBDirectClient _client;
@@ -142,7 +148,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
// Plan selection should succeed due to falling back on regular planning.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
@@ -181,7 +187,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(test->opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
test->serviceContext()->getFastClockSource());
@@ -195,7 +201,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon
// If we repeat the same query, the plan for the first branch should have come from
// the cache.
ws.clear();
- subplan.reset(new SubplanStage(test->opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -262,7 +268,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -276,7 +282,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) {
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -318,7 +324,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -332,7 +338,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) {
// from the cache (because the first call to pickBestPlan() refrained from creating any
// cache entries).
ws.clear();
- subplan.reset(new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -490,7 +496,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) {
WorkingSet ws;
std::unique_ptr<SubplanStage> subplan(
- new SubplanStage(opCtx(), collection, &ws, plannerParams, cq.get()));
+ new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get()));
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
ASSERT_OK(subplan->pickBestPlan(&yieldPolicy));
@@ -536,7 +542,7 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlanning)
// Create the SubplanStage.
WorkingSet workingSet;
SubplanStage subplanStage(
- opCtx(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
+ _expCtx.get(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, subplanStage.pickBestPlan(&alwaysTimeOutPolicy));
@@ -561,7 +567,7 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) {
// Create the SubplanStage.
WorkingSet workingSet;
SubplanStage subplanStage(
- opCtx(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
+ _expCtx.get(), ctx.getCollection(), &workingSet, params, canonicalQuery.get());
AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource());
ASSERT_EQ(ErrorCodes::QueryPlanKilled, subplanStage.pickBestPlan(&alwaysPlanKilledYieldPolicy));
@@ -597,7 +603,7 @@ TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelect
// Create the SubplanStage.
WorkingSet workingSet;
- SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+ SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get());
// Mimic a yield by saving the state of the subplan stage. Then, drop an index not being used
// while yielded.
@@ -641,7 +647,7 @@ TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSele
// Create the SubplanStage.
WorkingSet workingSet;
- SubplanStage subplanStage(opCtx(), collection, &workingSet, params, canonicalQuery.get());
+ SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get());
PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, serviceContext()->getFastClockSource());
ASSERT_OK(subplanStage.pickBestPlan(&yieldPolicy));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 43a6c482131..e86d851c7a4 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -83,17 +83,14 @@ public:
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns()));
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(&_opCtx, collator, NamespaceString(ns())));
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterObj, expCtx);
+ MatchExpressionParser::parse(filterObj, _expCtx);
verify(statusWithMatcher.isOK());
unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue());
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
unique_ptr<IndexScan> ix =
- std::make_unique<IndexScan>(&_opCtx, params, ws.get(), filterExpr.get());
+ std::make_unique<IndexScan>(_expCtx.get(), params, ws.get(), filterExpr.get());
auto statusWithPlanExecutor = PlanExecutor::make(
&_opCtx, std::move(ws), std::move(ix), ctx.getCollection(), PlanExecutor::NO_YIELD);
@@ -150,6 +147,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ new ExpressionContext(&_opCtx, nullptr, NamespaceString(ns()));
+
private:
DBDirectClient _client;
};
diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp
index 9f3681f2c89..5b843c7d1ba 100644
--- a/src/mongo/dbtests/query_stage_trial.cpp
+++ b/src/mongo/dbtests/query_stage_trial.cpp
@@ -44,9 +44,13 @@ namespace mongo {
namespace {
+const NamespaceString kTestNss = NamespaceString("db.dummy");
+
class TrialStageTest : public unittest::Test {
public:
- TrialStageTest() : _opCtx(cc().makeOperationContext()) {}
+ TrialStageTest()
+ : _opCtx(cc().makeOperationContext()),
+ _expCtx(make_intrusive<ExpressionContext>(_opCtx.get(), nullptr, kTestNss)) {}
protected:
// Pushes BSONObjs from the given vector into the given QueuedDataStage. Each empty BSONObj in
@@ -98,11 +102,14 @@ protected:
private:
ServiceContext::UniqueOperationContext _opCtx;
WorkingSet _ws;
+
+protected:
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -114,7 +121,7 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
// Set the minimum advanced-to-works ratio to 0.75. Because every work() will result in an
// ADVANCE, the trial plan will succeed.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -131,8 +138,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialSucceeds) {
}
TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 5 results and no NEED_TIMEs.
std::vector<BSONObj> trialResults;
@@ -144,7 +151,7 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
// We schedule the trial to run for 10 works. Because we hit EOF after 5 results, we will end
// the trial phase early and adopt the successful trial plan.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -166,8 +173,8 @@ TEST_F(TrialStageTest, AdoptsTrialPlanIfTrialPlanHitsEOF) {
}
TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 20 results. Every second result will produce a NEED_TIME.
std::vector<BSONObj> trialResults;
@@ -187,7 +194,7 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
// Set the minimum advanced-to-works ratio to 0.75. Because every second work() will result in a
// NEED_TIME and the actual ratio is thus 0.5, the trial plan will fail.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
@@ -204,8 +211,8 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialDoesNotSucceed) {
}
TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
- auto trialPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
- auto backupPlan = std::make_unique<QueuedDataStage>(opCtx(), ws());
+ auto trialPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
+ auto backupPlan = std::make_unique<QueuedDataStage>(_expCtx.get(), ws());
// Seed the trial plan with 2 results followed by a PlanStage::FAILURE.
queueData({BSON("_id" << 0), BSON("_id" << 1)}, trialPlan.get());
@@ -222,7 +229,7 @@ TEST_F(TrialStageTest, AdoptsBackupPlanIfTrialPlanDies) {
// We schedule the trial to run for 10 works. Because we will encounter a PlanStage::FAILURE
// before this point, the trial will complete early and the backup plan will be adopted.
auto trialStage = std::make_unique<TrialStage>(
- opCtx(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
+ _expCtx.get(), ws(), std::move(trialPlan), std::move(backupPlan), 10, 0.75);
ASSERT_OK(trialStage->pickBestPlan(yieldPolicy().get()));
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 98061f09523..a309f00db8d 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -133,7 +133,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -155,7 +155,7 @@ public:
params.tailable = false;
unique_ptr<CollectionScan> scan(
- new CollectionScan(&_opCtx, collection, params, &ws, nullptr));
+ new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr));
while (!scan->isEOF()) {
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = scan->work(&id);
@@ -185,6 +185,9 @@ protected:
const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext();
OperationContext& _opCtx = *_txnPtr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx =
+ make_intrusive<ExpressionContext>(&_opCtx, nullptr, nss);
+
private:
DBDirectClient _client;
};
@@ -200,8 +203,7 @@ public:
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
Collection* collection = ctx.getCollection();
ASSERT(collection);
@@ -230,10 +232,10 @@ public:
params.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto eofStage = make_unique<EOFStage>(&_opCtx);
+ auto eofStage = make_unique<EOFStage>(_expCtx.get());
- auto updateStage =
- make_unique<UpsertStage>(&_opCtx, params, ws.get(), collection, eofStage.release());
+ auto updateStage = make_unique<UpsertStage>(
+ _expCtx.get(), params, ws.get(), collection, eofStage.release());
runUpdate(updateStage.get());
}
@@ -271,8 +273,7 @@ public:
CurOp& curOp = *CurOp::get(_opCtx);
OpDebug* opDebug = &curOp.debug();
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
Collection* coll =
CollectionCatalog::get(&_opCtx).lookupCollectionByNamespace(&_opCtx, nss);
ASSERT(coll);
@@ -309,11 +310,11 @@ public:
updateParams.canonicalQuery = cq.get();
auto ws = make_unique<WorkingSet>();
- auto cs =
- make_unique<CollectionScan>(&_opCtx, coll, collScanParams, ws.get(), cq->root());
+ auto cs = make_unique<CollectionScan>(
+ _expCtx.get(), coll, collScanParams, ws.get(), cq->root());
auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, cs.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, cs.release());
const UpdateStats* stats =
static_cast<const UpdateStats*>(updateStage->getSpecificStats());
@@ -386,8 +387,7 @@ public:
Collection* coll = ctx.getCollection();
ASSERT(coll);
UpdateRequest request(nss);
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
const int targetDocIndex = 0; // We'll be working with the first doc in the collection.
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = make_unique<WorkingSet>();
@@ -412,7 +412,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -426,7 +426,7 @@ public:
updateParams.canonicalQuery = cq.get();
const auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
@@ -478,8 +478,7 @@ public:
Collection* coll = ctx.getCollection();
ASSERT(coll);
UpdateRequest request(nss);
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator, nss));
+ UpdateDriver driver(_expCtx);
const int targetDocIndex = 10;
const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex));
const auto ws = make_unique<WorkingSet>();
@@ -504,7 +503,7 @@ public:
// Configure a QueuedDataStage to pass the first object in the collection back in a
// RID_AND_OBJ state.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
+ auto qds = make_unique<QueuedDataStage>(_expCtx.get(), ws.get());
WorkingSetID id = ws->allocate();
WorkingSetMember* member = ws->get(id);
member->recordId = recordIds[targetDocIndex];
@@ -518,7 +517,7 @@ public:
updateParams.canonicalQuery = cq.get();
auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
+ make_unique<UpdateStage>(_expCtx.get(), updateParams, ws.get(), coll, qds.release());
// Should return advanced.
id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/embedded/stitch_support/stitch_support.cpp b/src/mongo/embedded/stitch_support/stitch_support.cpp
index 53010105fca..934f9185735 100644
--- a/src/mongo/embedded/stitch_support/stitch_support.cpp
+++ b/src/mongo/embedded/stitch_support/stitch_support.cpp
@@ -172,13 +172,14 @@ struct stitch_support_v1_matcher {
stitch_support_v1_collator* collator)
: client(std::move(client)),
opCtx(this->client->makeOperationContext()),
- matcher(filterBSON.getOwned(),
- new mongo::ExpressionContext(opCtx.get(),
- collator ? collator->collator.get() : nullptr,
- mongo::kDummyNamespaceStr)){};
+ expCtx(new mongo::ExpressionContext(opCtx.get(),
+ collator ? collator->collator->clone() : nullptr,
+ mongo::kDummyNamespaceStr)),
+ matcher(filterBSON.getOwned(), expCtx){};
mongo::ServiceContext::UniqueClient client;
mongo::ServiceContext::UniqueOperationContext opCtx;
+ boost::intrusive_ptr<mongo::ExpressionContext> expCtx;
mongo::Matcher matcher;
};
@@ -190,7 +191,9 @@ struct stitch_support_v1_projection {
: client(std::move(client)), opCtx(this->client->makeOperationContext()), matcher(matcher) {
auto expCtx = mongo::make_intrusive<mongo::ExpressionContext>(
- opCtx.get(), collator ? collator->collator.get() : nullptr, mongo::kDummyNamespaceStr);
+ opCtx.get(),
+ collator ? collator->collator->clone() : nullptr,
+ mongo::kDummyNamespaceStr);
const auto policies = mongo::ProjectionPolicies::findProjectionPolicies();
auto proj =
mongo::projection_ast::parse(expCtx,
@@ -229,21 +232,19 @@ struct stitch_support_v1_update {
stitch_support_v1_collator* collator)
: client(std::move(client)),
opCtx(this->client->makeOperationContext()),
+ expCtx(new mongo::ExpressionContext(opCtx.get(),
+ collator ? collator->collator->clone() : nullptr,
+ mongo::kDummyNamespaceStr)),
updateExpr(updateExpr.getOwned()),
arrayFilters(arrayFilters.getOwned()),
matcher(matcher),
- updateDriver(new mongo::ExpressionContext(opCtx.get(),
- collator ? collator->collator.get() : nullptr,
- mongo::kDummyNamespaceStr)) {
+ updateDriver(expCtx) {
std::vector<mongo::BSONObj> arrayFilterVector;
for (auto&& filter : this->arrayFilters) {
arrayFilterVector.push_back(filter.embeddedObject());
}
- this->parsedFilters = uassertStatusOK(
- mongo::ParsedUpdate::parseArrayFilters(arrayFilterVector,
- this->opCtx.get(),
- collator ? collator->collator.get() : nullptr,
- mongo::kDummyNamespaceStr));
+ this->parsedFilters = uassertStatusOK(mongo::ParsedUpdate::parseArrayFilters(
+ expCtx, arrayFilterVector, mongo::kDummyNamespaceStr));
updateDriver.parse(this->updateExpr, parsedFilters);
@@ -254,6 +255,7 @@ struct stitch_support_v1_update {
mongo::ServiceContext::UniqueClient client;
mongo::ServiceContext::UniqueOperationContext opCtx;
+ boost::intrusive_ptr<mongo::ExpressionContext> expCtx;
mongo::BSONObj updateExpr;
mongo::BSONArray arrayFilters;