summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorIan Boros <ian.boros@mongodb.com>2020-01-30 13:10:55 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-28 22:16:41 +0000
commitcfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726 (patch)
tree7ab1e1ce8e2edd6837952c131fe14d43a0633235 /src/mongo/db
parent793ae32c597f197b6445750aa9bfdaabc206132d (diff)
downloadmongo-cfa5c05fa1855fb1a04cb3a6e2eb10a7e82bf726.tar.gz
SERVER-45406 Plumb ExpressionContext through PlanStage
This patch includes also moves ownership of the collator to the ExpressionContext.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp6
-rw-r--r--src/mongo/db/catalog/collection.h49
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp94
-rw-r--r--src/mongo/db/catalog/collection_impl.h26
-rw-r--r--src/mongo/db/catalog/collection_mock.h11
-rw-r--r--src/mongo/db/catalog/database_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp7
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h3
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp12
-rw-r--r--src/mongo/db/commands/count_cmd.cpp12
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp13
-rw-r--r--src/mongo/db/commands/list_collections.cpp11
-rw-r--r--src/mongo/db/commands/list_databases.cpp5
-rw-r--r--src/mongo/db/commands/list_indexes.cpp6
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp14
-rw-r--r--src/mongo/db/exec/and_hash.cpp8
-rw-r--r--src/mongo/db/exec/and_hash.h4
-rw-r--r--src/mongo/db/exec/and_sorted.cpp4
-rw-r--r--src/mongo/db/exec/and_sorted.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp10
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/change_stream_proxy.cpp4
-rw-r--r--src/mongo/db/exec/change_stream_proxy.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp17
-rw-r--r--src/mongo/db/exec/collection_scan.h2
-rw-r--r--src/mongo/db/exec/count.cpp4
-rw-r--r--src/mongo/db/exec/count.h2
-rw-r--r--src/mongo/db/exec/count_scan.cpp8
-rw-r--r--src/mongo/db/exec/count_scan.h2
-rw-r--r--src/mongo/db/exec/delete.cpp14
-rw-r--r--src/mongo/db/exec/delete.h2
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp10
-rw-r--r--src/mongo/db/exec/distinct_scan.h2
-rw-r--r--src/mongo/db/exec/ensure_sorted.cpp4
-rw-r--r--src/mongo/db/exec/ensure_sorted.h2
-rw-r--r--src/mongo/db/exec/eof.cpp2
-rw-r--r--src/mongo/db/exec/eof.h2
-rw-r--r--src/mongo/db/exec/fetch.cpp10
-rw-r--r--src/mongo/db/exec/fetch.h2
-rw-r--r--src/mongo/db/exec/geo_near.cpp44
-rw-r--r--src/mongo/db/exec/geo_near.h12
-rw-r--r--src/mongo/db/exec/idhack.cpp17
-rw-r--r--src/mongo/db/exec/idhack.h4
-rw-r--r--src/mongo/db/exec/index_scan.cpp10
-rw-r--r--src/mongo/db/exec/index_scan.h2
-rw-r--r--src/mongo/db/exec/limit.cpp4
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/merge_sort.cpp4
-rw-r--r--src/mongo/db/exec/merge_sort.h2
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp9
-rw-r--r--src/mongo/db/exec/multi_iterator.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp8
-rw-r--r--src/mongo/db/exec/multi_plan.h2
-rw-r--r--src/mongo/db/exec/near.cpp8
-rw-r--r--src/mongo/db/exec/near.h2
-rw-r--r--src/mongo/db/exec/or.cpp7
-rw-r--r--src/mongo/db/exec/or.h2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp12
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h4
-rw-r--r--src/mongo/db/exec/plan_stage.h25
-rw-r--r--src/mongo/db/exec/projection.cpp10
-rw-r--r--src/mongo/db/exec/projection.h6
-rw-r--r--src/mongo/db/exec/projection_executor_builder_test.cpp5
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp4
-rw-r--r--src/mongo/db/exec/queued_data_stage.h2
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp11
-rw-r--r--src/mongo/db/exec/record_store_fast_count.cpp6
-rw-r--r--src/mongo/db/exec/record_store_fast_count.h2
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.h6
-rw-r--r--src/mongo/db/exec/requires_collection_stage.cpp8
-rw-r--r--src/mongo/db/exec/requires_collection_stage.h8
-rw-r--r--src/mongo/db/exec/requires_index_stage.cpp4
-rw-r--r--src/mongo/db/exec/requires_index_stage.h2
-rw-r--r--src/mongo/db/exec/return_key.h4
-rw-r--r--src/mongo/db/exec/shard_filter.cpp4
-rw-r--r--src/mongo/db/exec/shard_filter.h2
-rw-r--r--src/mongo/db/exec/skip.cpp4
-rw-r--r--src/mongo/db/exec/skip.h2
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp6
-rw-r--r--src/mongo/db/exec/sort_key_generator.h2
-rw-r--r--src/mongo/db/exec/sort_test.cpp18
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp58
-rw-r--r--src/mongo/db/exec/subplan.cpp19
-rw-r--r--src/mongo/db/exec/subplan.h2
-rw-r--r--src/mongo/db/exec/text.cpp18
-rw-r--r--src/mongo/db/exec/text.h2
-rw-r--r--src/mongo/db/exec/text_match.cpp4
-rw-r--r--src/mongo/db/exec/text_match.h2
-rw-r--r--src/mongo/db/exec/text_or.cpp11
-rw-r--r--src/mongo/db/exec/text_or.h2
-rw-r--r--src/mongo/db/exec/trial_stage.cpp17
-rw-r--r--src/mongo/db/exec/trial_stage.h2
-rw-r--r--src/mongo/db/exec/update_stage.cpp45
-rw-r--r--src/mongo/db/exec/update_stage.h4
-rw-r--r--src/mongo/db/exec/upsert_stage.cpp24
-rw-r--r--src/mongo/db/exec/upsert_stage.h2
-rw-r--r--src/mongo/db/index/sort_key_generator_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp7
-rw-r--r--src/mongo/db/matcher/expression_expr.cpp8
-rw-r--r--src/mongo/db/matcher/expression_expr_test.cpp8
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp7
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp80
-rw-r--r--src/mongo/db/matcher/expression_with_placeholder_test.cpp5
-rw-r--r--src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp5
-rw-r--r--src/mongo/db/ops/delete.cpp7
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp19
-rw-r--r--src/mongo/db/ops/parsed_delete.h17
-rw-r--r--src/mongo/db/ops/parsed_update.cpp32
-rw-r--r--src/mongo/db/ops/parsed_update.h24
-rw-r--r--src/mongo/db/ops/update.cpp8
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp14
-rw-r--r--src/mongo/db/pipeline/accumulator_test.cpp15
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_check_resume_token_test.cpp2
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp51
-rw-r--r--src/mongo/db/pipeline/expression_context.h48
-rw-r--r--src/mongo/db/pipeline/expression_trim_test.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp29
-rw-r--r--src/mongo/db/query/canonical_query.cpp25
-rw-r--r--src/mongo/db/query/canonical_query.h5
-rw-r--r--src/mongo/db/query/get_executor.cpp108
-rw-r--r--src/mongo/db/query/get_executor.h20
-rw-r--r--src/mongo/db/query/internal_plans.cpp74
-rw-r--r--src/mongo/db/query/internal_plans.h28
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp37
-rw-r--r--src/mongo/db/query/projection_test.cpp6
-rw-r--r--src/mongo/db/query/query_planner_partialidx_test.cpp24
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp18
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h13
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp5
-rw-r--r--src/mongo/db/query/query_solution_test.cpp5
-rw-r--r--src/mongo/db/query/stage_builder.cpp39
-rw-r--r--src/mongo/db/repl/apply_ops.cpp3
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp4
-rw-r--r--src/mongo/db/update/addtoset_node_test.cpp17
-rw-r--r--src/mongo/db/update/compare_node_test.cpp13
-rw-r--r--src/mongo/db/update/pull_node_test.cpp30
-rw-r--r--src/mongo/db/update/pullall_node_test.cpp5
-rw-r--r--src/mongo/db/update/push_node_test.cpp5
-rw-r--r--src/mongo/db/update/update_driver.cpp2
-rw-r--r--src/mongo/db/update/update_object_node_test.cpp5
144 files changed, 983 insertions, 812 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 521ba897fda..2967e62ff0f 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -186,9 +186,8 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
bool upsert,
const BSONObj& writeConcern) {
namespace mmb = mutablebson;
- const CollatorInterface* collator = nullptr;
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, collectionName));
+ new ExpressionContext(opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionName));
UpdateDriver driver(std::move(expCtx));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
driver.parse(updatePattern, arrayFilters);
@@ -299,9 +298,8 @@ Status AuthzManagerExternalStateMock::_queryVector(
const NamespaceString& collectionName,
const BSONObj& query,
std::vector<BSONObjCollection::iterator>* result) {
- const CollatorInterface* collator = nullptr;
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, collectionName));
+ new ExpressionContext(opCtx, std::unique_ptr<CollatorInterface>(nullptr), collectionName));
StatusWithMatchExpression parseResult = MatchExpressionParser::parse(query, std::move(expCtx));
if (!parseResult.isOK()) {
return parseResult.getStatus();
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index fdfb119719b..86deaf95b0a 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -185,6 +185,53 @@ public:
};
/**
+ * A Collection::Validator represents a filter that is applied to all documents that are
+ * inserted. Enforcement of Validators being well formed is done lazily, so the 'Validator'
+ * class may represent a validator which is not well formed.
+ */
+ struct Validator {
+
+ /**
+ * Returns whether the validator's filter is well formed.
+ */
+ bool isOK() const {
+ return filter.isOK();
+ }
+
+ /**
+ * Returns OK or the error encounter when parsing the validator.
+ */
+ Status getStatus() const {
+ return filter.getStatus();
+ }
+
+ /**
+ * Empty means no validator. This must outlive 'filter'.
+ */
+ BSONObj validatorDoc;
+
+ /**
+ * A special ExpressionContext used to evaluate the filter match expression. This should
+ * outlive 'filter'.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtxForFilter;
+
+ /**
+ * The collection validator MatchExpression. This is stored as a StatusWith, as we lazily
+ * enforce that collection validators are well formed.
+ *
+ * -A non-OK Status indicates that the validator is not well formed, and any attempts to
+ * enforce the validator should error.
+ *
+ * -A value of Status::OK/nullptr indicates that there is no validator.
+ *
+ * -Anything else indicates a well formed validator. The MatchExpression will maintain
+ * pointers into _validatorDoc.
+ */
+ StatusWithMatchExpression filter = {nullptr};
+ };
+
+ /**
* Callback function for callers of insertDocumentForBulkLoader().
*/
using OnRecordInsertedFn = std::function<Status(const RecordId& loc)>;
@@ -354,7 +401,7 @@ public:
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
- virtual StatusWithMatchExpression parseValidator(
+ virtual Validator parseValidator(
OperationContext* opCtx,
const BSONObj& validator,
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index d45b3e27d23..221d042bf80 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -256,7 +256,6 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx,
_needCappedLock(supportsDocLocking() && _recordStore && _recordStore->isCapped() &&
_ns.db() != "local"),
_indexCatalog(std::make_unique<IndexCatalogImpl>(this)),
- _swValidator{nullptr},
_cappedNotifier(_recordStore && _recordStore->isCapped()
? std::make_unique<CappedInsertNotifier>()
: nullptr) {
@@ -288,22 +287,23 @@ void CollectionImpl::init(OperationContext* opCtx) {
auto collectionOptions =
DurableCatalog::get(opCtx)->getCollectionOptions(opCtx, getCatalogId());
_collator = parseCollation(opCtx, _ns, collectionOptions.collation);
- _validatorDoc = collectionOptions.validator.getOwned();
+ auto validatorDoc = collectionOptions.validator.getOwned();
// Enforce that the validator can be used on this namespace.
- uassertStatusOK(checkValidatorCanBeUsedOnNs(_validatorDoc, ns(), _uuid));
+ uassertStatusOK(checkValidatorCanBeUsedOnNs(validatorDoc, ns(), _uuid));
// Store the result (OK / error) of parsing the validator, but do not enforce that the result is
// OK. This is intentional, as users may have validators on disk which were considered well
// formed in older versions but not in newer versions.
- _swValidator =
- parseValidator(opCtx, _validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!_swValidator.isOK()) {
+ _validator =
+ parseValidator(opCtx, validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
+ if (!_validator.isOK()) {
// Log an error and startup warning if the collection validator is malformed.
LOGV2_WARNING_OPTIONS(20293,
{logv2::LogTag::kStartupWarnings},
- "Collection {ns} has malformed validator: {swValidator_getStatus}",
+ "Collection {ns} has malformed validator: {validatorStatus}",
+ "Collection has malformed validator",
"ns"_attr = _ns,
- "swValidator_getStatus"_attr = _swValidator.getStatus());
+ "validatorStatus"_attr = _validator.getStatus());
}
_validationAction = uassertStatusOK(_parseValidationAction(collectionOptions.validationAction));
_validationLevel = uassertStatusOK(_parseValidationLevel(collectionOptions.validationLevel));
@@ -366,11 +366,11 @@ bool CollectionImpl::findDoc(OperationContext* opCtx,
}
Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& document) const {
- if (!_swValidator.isOK()) {
- return _swValidator.getStatus();
+ if (!_validator.isOK()) {
+ return _validator.getStatus();
}
- const auto* const validatorMatchExpr = _swValidator.getValue().get();
+ const auto* const validatorMatchExpr = _validator.filter.getValue().get();
if (!validatorMatchExpr)
return Status::OK();
@@ -394,26 +394,26 @@ Status CollectionImpl::checkValidation(OperationContext* opCtx, const BSONObj& d
return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
}
-StatusWithMatchExpression CollectionImpl::parseValidator(
+Collection::Validator CollectionImpl::parseValidator(
OperationContext* opCtx,
const BSONObj& validator,
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
maxFeatureCompatibilityVersion) const {
if (MONGO_unlikely(allowSettingMalformedCollectionValidators.shouldFail())) {
- return {nullptr};
+ return {validator, nullptr, nullptr};
}
if (validator.isEmpty())
- return {nullptr};
+ return {validator, nullptr, nullptr};
Status canUseValidatorInThisContext = checkValidatorCanBeUsedOnNs(validator, ns(), _uuid);
if (!canUseValidatorInThisContext.isOK()) {
- return canUseValidatorInThisContext;
+ return {validator, nullptr, canUseValidatorInThisContext};
}
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, _collator.get(), ns()));
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, CollatorInterface::cloneCollator(_collator.get()), ns());
// The MatchExpression and contained ExpressionContext created as part of the validator are
// owned by the Collection and will outlive the OperationContext they were created under.
@@ -430,11 +430,14 @@ StatusWithMatchExpression CollectionImpl::parseValidator(
MatchExpressionParser::parse(validator, expCtx, ExtensionsCallbackNoop(), allowedFeatures);
if (!statusWithMatcher.isOK()) {
- return StatusWithMatchExpression{
+ return {
+ validator,
+ boost::intrusive_ptr<ExpressionContext>(nullptr),
statusWithMatcher.getStatus().withContext("Parsing of collection validator failed")};
}
- return statusWithMatcher;
+ return Collection::Validator{
+ validator, std::move(expCtx), std::move(statusWithMatcher.getValue())};
}
Status CollectionImpl::insertDocumentsForOplog(OperationContext* opCtx,
@@ -443,8 +446,8 @@ Status CollectionImpl::insertDocumentsForOplog(OperationContext* opCtx,
dassert(opCtx->lockState()->isWriteLocked());
// Since this is only for the OpLog, we can assume these for simplicity.
- invariant(_swValidator.isOK());
- invariant(_swValidator.getValue() == nullptr);
+ invariant(_validator.isOK());
+ invariant(_validator.filter.getValue() == nullptr);
invariant(!_indexCatalog->haveAnyIndexes());
Status status = _recordStore->insertRecords(opCtx, records, timestamps);
@@ -806,7 +809,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
}
bool CollectionImpl::updateWithDamagesSupported() const {
- if (!_swValidator.isOK() || _swValidator.getValue() != nullptr)
+ if (!_validator.isOK() || _validator.filter.getValue() != nullptr)
return false;
return _recordStore->updateWithDamagesSupported();
@@ -986,22 +989,18 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
// Note that, by the time we reach this, we should have already done a pre-parse that checks for
// banned features, so we don't need to include that check again.
- auto statusWithMatcher =
+ auto newValidator =
parseValidator(opCtx, validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!statusWithMatcher.isOK())
- return statusWithMatcher.getStatus();
+ if (!newValidator.isOK())
+ return newValidator.getStatus();
DurableCatalog::get(opCtx)->updateValidator(
opCtx, getCatalogId(), validatorDoc, getValidationLevel(), getValidationAction());
- opCtx->recoveryUnit()->onRollback([this,
- oldValidator = std::move(_swValidator),
- oldValidatorDoc = std::move(_validatorDoc)]() mutable {
- this->_swValidator = std::move(oldValidator);
- this->_validatorDoc = std::move(oldValidatorDoc);
+ opCtx->recoveryUnit()->onRollback([this, oldValidator = std::move(_validator)]() mutable {
+ this->_validator = std::move(oldValidator);
});
- _swValidator = std::move(statusWithMatcher);
- _validatorDoc = std::move(validatorDoc);
+ _validator = std::move(newValidator);
return Status::OK();
}
@@ -1038,8 +1037,11 @@ Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData ne
auto oldValidationLevel = _validationLevel;
_validationLevel = levelSW.getValue();
- DurableCatalog::get(opCtx)->updateValidator(
- opCtx, getCatalogId(), _validatorDoc, getValidationLevel(), getValidationAction());
+ DurableCatalog::get(opCtx)->updateValidator(opCtx,
+ getCatalogId(),
+ _validator.validatorDoc,
+ getValidationLevel(),
+ getValidationAction());
opCtx->recoveryUnit()->onRollback(
[this, oldValidationLevel]() { this->_validationLevel = oldValidationLevel; });
@@ -1058,8 +1060,11 @@ Status CollectionImpl::setValidationAction(OperationContext* opCtx, StringData n
_validationAction = actionSW.getValue();
- DurableCatalog::get(opCtx)->updateValidator(
- opCtx, getCatalogId(), _validatorDoc, getValidationLevel(), getValidationAction());
+ DurableCatalog::get(opCtx)->updateValidator(opCtx,
+ getCatalogId(),
+ _validator.validatorDoc,
+ getValidationLevel(),
+ getValidationAction());
opCtx->recoveryUnit()->onRollback(
[this, oldValidationAction]() { this->_validationAction = oldValidationAction; });
@@ -1073,26 +1078,23 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
opCtx->recoveryUnit()->onRollback([this,
- oldValidator = std::move(_swValidator),
- oldValidatorDoc = std::move(_validatorDoc),
+ oldValidator = std::move(_validator),
oldValidationLevel = _validationLevel,
oldValidationAction = _validationAction]() mutable {
- this->_swValidator = std::move(oldValidator);
- this->_validatorDoc = std::move(oldValidatorDoc);
+ this->_validator = std::move(oldValidator);
this->_validationLevel = oldValidationLevel;
this->_validationAction = oldValidationAction;
});
DurableCatalog::get(opCtx)->updateValidator(
opCtx, getCatalogId(), newValidator, newLevel, newAction);
- _validatorDoc = std::move(newValidator);
- auto validatorSW =
- parseValidator(opCtx, _validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!validatorSW.isOK()) {
- return validatorSW.getStatus();
+ auto validator =
+ parseValidator(opCtx, newValidator, MatchExpressionParser::kAllowAllSpecialFeatures);
+ if (!validator.isOK()) {
+ return validator.getStatus();
}
- _swValidator = std::move(validatorSW.getValue());
+ _validator = std::move(validator);
auto levelSW = _parseValidationLevel(newLevel);
if (!levelSW.isOK()) {
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index faa0f05100a..e3828f9471c 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -90,7 +90,7 @@ public:
}
const BSONObj getValidatorDoc() const final {
- return _validatorDoc.getOwned();
+ return _validator.validatorDoc.getOwned();
}
bool requiresIdIndex() const final;
@@ -235,12 +235,11 @@ public:
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
- StatusWithMatchExpression parseValidator(
- OperationContext* opCtx,
- const BSONObj& validator,
- MatchExpressionParser::AllowedFeatureSet allowedFeatures,
- boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion = boost::none) const final;
+ Validator parseValidator(OperationContext* opCtx,
+ const BSONObj& validator,
+ MatchExpressionParser::AllowedFeatureSet allowedFeatures,
+ boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
+ maxFeatureCompatibilityVersion = boost::none) const final;
/**
* Sets the validator for this collection.
@@ -394,17 +393,8 @@ private:
// If null, the default collation is simple binary compare.
std::unique_ptr<CollatorInterface> _collator;
- // Empty means no validator.
- BSONObj _validatorDoc;
-
- // The collection validator MatchExpression. This is stored as a StatusWith, as we lazily
- // enforce that collection validators are well formed.
- // -A non-OK Status indicates that the validator is not well formed, and any attempts to enforce
- // the validator (inserts) should error.
- // -A value of {nullptr} indicates that there is no validator.
- // -Anything else indicates a well formed validator. The MatchExpression will maintain
- // pointers into _validatorDoc.
- StatusWithMatchExpression _swValidator;
+
+ Validator _validator;
ValidationAction _validationAction;
ValidationLevel _validationLevel;
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index 001bee4db5d..ecab3451f6d 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -170,12 +170,11 @@ public:
std::abort();
}
- StatusWithMatchExpression parseValidator(
- OperationContext* opCtx,
- const BSONObj& validator,
- MatchExpressionParser::AllowedFeatureSet allowedFeatures,
- boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion) const {
+ Validator parseValidator(OperationContext* opCtx,
+ const BSONObj& validator,
+ MatchExpressionParser::AllowedFeatureSet allowedFeatures,
+ boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
+ maxFeatureCompatibilityVersion) const {
std::abort();
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 685ae79ef5c..f8c08086602 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -878,7 +878,7 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx,
if (!collectionOptions.validator.isEmpty()) {
boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator.get(), nss));
+ new ExpressionContext(opCtx, std::move(collator), nss));
// Save this to a variable to avoid reading the atomic variable multiple times.
const auto currentFCV = serverGlobalParams.featureCompatibility.getVersion();
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index aca5e622a02..44630f8772a 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/multi_key_path_tracker.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/service_context.h"
@@ -97,14 +98,14 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
if (_descriptor->isPartial()) {
const BSONObj& filter = _descriptor->partialFilterExpression();
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, _collator.get(), ns()));
+ _expCtxForFilter = make_intrusive<ExpressionContext>(
+ opCtx, CollatorInterface::cloneCollator(_collator.get()), ns());
// Parsing the partial filter expression is not expected to fail here since the
// expression would have been successfully parsed upstream during index creation.
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filter,
- std::move(expCtx),
+ _expCtxForFilter,
ExtensionsCallbackNoop(),
MatchExpressionParser::kBanAllSpecialFeatures);
invariant(statusWithMatcher.getStatus());
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index 7cb8d5c98c6..4f4691359aa 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -51,6 +51,7 @@ class IndexAccessMethod;
class IndexDescriptor;
class MatchExpression;
class OperationContext;
+class ExpressionContext;
class IndexCatalogEntryImpl : public IndexCatalogEntry {
IndexCatalogEntryImpl(const IndexCatalogEntryImpl&) = delete;
@@ -224,6 +225,8 @@ private:
std::unique_ptr<CollatorInterface> _collator;
std::unique_ptr<MatchExpression> _filterExpression;
+ // Special ExpressionContext used to evaluate the partial filter expression.
+ boost::intrusive_ptr<ExpressionContext> _expCtxForFilter;
// cached stuff
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 0c637730bf6..e8079ede4b9 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -667,6 +667,11 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
}
}
+ // Create an ExpressionContext, used to parse the match expression and to house the collator for
+ // the remaining checks.
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ new ExpressionContext(opCtx, std::move(collator), nss));
+
// Ensure if there is a filter, its valid.
BSONElement filterElement = spec.getField("partialFilterExpression");
if (filterElement) {
@@ -680,10 +685,6 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
"\"partialFilterExpression\" for an index must be a document");
}
- // The collator must outlive the constructed MatchExpression.
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator.get(), nss));
-
// Parsing the partial filter expression is not expected to fail here since the
// expression would have been successfully parsed upstream during index creation.
StatusWithMatchExpression statusWithMatcher =
@@ -717,7 +718,8 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec)
}
if (collationElement &&
- !CollatorInterface::collatorsMatch(collator.get(), _collection->getDefaultCollator())) {
+ !CollatorInterface::collatorsMatch(expCtx->getCollator(),
+ _collection->getDefaultCollator())) {
return Status(ErrorCodes::CannotCreateIndex,
"_id index must have the collection default collation");
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 2f2a7c4b09d..876a6f8847d 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -170,8 +170,11 @@ public:
// version on initial entry into count.
auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
+ auto expCtx = makeExpressionContextForGetExecutor(
+ opCtx, request.getCollation().value_or(BSONObj()), nss);
+
auto statusWithPlanExecutor =
- getExecutorCount(opCtx, collection, request, true /*explain*/, nss);
+ getExecutorCount(expCtx, collection, request, true /*explain*/, nss);
if (!statusWithPlanExecutor.isOK()) {
return statusWithPlanExecutor.getStatus();
}
@@ -228,7 +231,12 @@ public:
auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
auto statusWithPlanExecutor =
- getExecutorCount(opCtx, collection, request, false /*explain*/, nss);
+ getExecutorCount(makeExpressionContextForGetExecutor(
+ opCtx, request.getCollation().value_or(BSONObj()), nss),
+ collection,
+ request,
+ false /*explain*/,
+ nss);
uassertStatusOK(statusWithPlanExecutor.getStatus());
auto exec = std::move(statusWithPlanExecutor.getValue());
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index cbbaa62d243..135ab17773d 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -169,7 +169,7 @@ public:
Collection* const collection = ctx->getCollection();
auto executor = uassertStatusOK(
- getExecutorDistinct(opCtx, collection, QueryPlannerParams::DEFAULT, &parsedDistinct));
+ getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(executor.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -225,7 +225,7 @@ public:
Collection* const collection = ctx->getCollection();
auto executor =
- getExecutorDistinct(opCtx, collection, QueryPlannerParams::DEFAULT, &parsedDistinct);
+ getExecutorDistinct(collection, QueryPlannerParams::DEFAULT, &parsedDistinct);
uassertStatusOK(executor.getStatus());
{
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 5d995f1c70e..923d44eb10c 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -275,8 +275,9 @@ public:
css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
- const auto exec = uassertStatusOK(
- getExecutorDelete(opCtx, opDebug, collection, &parsedDelete, verbosity));
+
+ const auto exec =
+ uassertStatusOK(getExecutorDelete(opDebug, collection, &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -300,8 +301,8 @@ public:
css->checkShardVersionOrThrow(opCtx);
Collection* const collection = autoColl.getCollection();
- const auto exec = uassertStatusOK(
- getExecutorUpdate(opCtx, opDebug, collection, &parsedUpdate, verbosity));
+ const auto exec =
+ uassertStatusOK(getExecutorUpdate(opDebug, collection, &parsedUpdate, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), &bodyBuilder);
@@ -390,7 +391,7 @@ public:
checkIfTransactionOnCappedColl(collection, inTransaction);
const auto exec = uassertStatusOK(getExecutorDelete(
- opCtx, opDebug, collection, &parsedDelete, boost::none /* verbosity */));
+ opDebug, collection, &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -496,7 +497,7 @@ public:
checkIfTransactionOnCappedColl(collection, inTransaction);
const auto exec = uassertStatusOK(getExecutorUpdate(
- opCtx, opDebug, collection, &parsedUpdate, boost::none /* verbosity */));
+ opDebug, collection, &parsedUpdate, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 7ef36d2e1cf..fae03f2159a 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -255,16 +255,17 @@ public:
const bool nameOnly = jsobj["nameOnly"].trueValue();
const bool authorizedCollections = jsobj["authorizedCollections"].trueValue();
+ // The collator is null because collection objects are compared using binary comparison.
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(dbname));
+
// Check for 'filter' argument.
BSONElement filterElt = jsobj["filter"];
if (!filterElt.eoo()) {
if (filterElt.type() != mongo::Object) {
uasserted(ErrorCodes::BadValue, "\"filter\" must be an object");
}
- // The collator is null because collection objects are compared using binary comparison.
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, NamespaceString(StringData(dbname))));
+
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(filterElt.Obj(), std::move(expCtx));
uassertStatusOK(statusWithMatcher.getStatus());
@@ -299,7 +300,7 @@ public:
cursorNss);
auto ws = std::make_unique<WorkingSet>();
- auto root = std::make_unique<QueuedDataStage>(opCtx, ws.get());
+ auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get());
if (db) {
if (auto collNames = _getExactNameMatches(matcher.get())) {
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 9a383f85522..c656355ec06 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -126,9 +126,8 @@ public:
if (auto filterObj = cmd.getFilter()) {
// The collator is null because database metadata objects are compared using simple
// binary comparison.
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, NamespaceString(dbname)));
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(dbname));
auto matcher =
uassertStatusOK(MatchExpressionParser::parse(filterObj.get(), std::move(expCtx)));
filter = std::move(matcher);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 02dc4b7ead2..0c3cf1c3053 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -155,9 +155,13 @@ public:
str::stream() << "ns does not exist: " << ctx.getNss().ns(),
collection);
nss = ctx.getNss();
+
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
+
auto indexList = listIndexesInLock(opCtx, collection, nss, includeBuildUUIDs);
auto ws = std::make_unique<WorkingSet>();
- auto root = std::make_unique<QueuedDataStage>(opCtx, ws.get());
+ auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get());
for (auto&& indexSpec : indexList) {
WorkingSetID id = ws->allocate();
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index ad65b9d9a48..104c5da8cba 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -467,8 +467,8 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> createOuterPipelineProxyExe
// Transfer ownership of the Pipeline to the PipelineProxyStage.
auto ws = std::make_unique<WorkingSet>();
auto proxy = hasChangeStream
- ? std::make_unique<ChangeStreamProxyStage>(opCtx, std::move(pipeline), ws.get())
- : std::make_unique<PipelineProxyStage>(opCtx, std::move(pipeline), ws.get());
+ ? std::make_unique<ChangeStreamProxyStage>(expCtx.get(), std::move(pipeline), ws.get())
+ : std::make_unique<PipelineProxyStage>(expCtx.get(), std::move(pipeline), ws.get());
// This PlanExecutor will simply forward requests to the Pipeline, so does not need
// to yield or to be registered with any collection's CursorManager to receive
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 172cf11d2f7..3435008fc6c 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -410,11 +410,8 @@ private:
// info is more accurate.
AutoGetCollection collection(opCtx, _batch.getNamespace(), MODE_IX);
- auto exec = uassertStatusOK(getExecutorUpdate(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection.getCollection(),
- &parsedUpdate,
- verbosity));
+ auto exec = uassertStatusOK(getExecutorUpdate(
+ &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedUpdate, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(
exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
@@ -487,11 +484,8 @@ private:
AutoGetCollection collection(opCtx, _batch.getNamespace(), MODE_IX);
// Explain the plan tree.
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection.getCollection(),
- &parsedDelete,
- verbosity));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &CurOp::get(opCtx)->debug(), collection.getCollection(), &parsedDelete, verbosity));
auto bodyBuilder = result->getBodyBuilder();
Explain::explainStages(
exec.get(), collection.getCollection(), verbosity, BSONObj(), &bodyBuilder);
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 6c913377c4f..d2666c905e6 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -55,16 +55,16 @@ const size_t AndHashStage::kLookAheadWorks = 10;
// static
const char* AndHashStage::kStageType = "AND_HASH";
-AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+AndHashStage::AndHashStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
_memUsage(0),
_maxMemUsage(kDefaultMaxMemUsageBytes) {}
-AndHashStage::AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage)
- : PlanStage(kStageType, opCtx),
+AndHashStage::AndHashStage(ExpressionContext* expCtx, WorkingSet* ws, size_t maxMemUsage)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_hashingChildren(true),
_currentChild(0),
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 3659504486d..4bd591952f5 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -48,12 +48,12 @@ namespace mongo {
*/
class AndHashStage final : public PlanStage {
public:
- AndHashStage(OperationContext* opCtx, WorkingSet* ws);
+ AndHashStage(ExpressionContext* expCtx, WorkingSet* ws);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
- AndHashStage(OperationContext* opCtx, WorkingSet* ws, size_t maxMemUsage);
+ AndHashStage(ExpressionContext* expCtx, WorkingSet* ws, size_t maxMemUsage);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index 6cda1ad2bf1..57d2eb08b52 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -45,8 +45,8 @@ using std::vector;
// static
const char* AndSortedStage::kStageType = "AND_SORTED";
-AndSortedStage::AndSortedStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+AndSortedStage::AndSortedStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_targetNode(numeric_limits<size_t>::max()),
_targetId(WorkingSet::INVALID_ID),
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 3d72d15c1f9..0236d4a294a 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class AndSortedStage final : public PlanStage {
public:
- AndSortedStage(OperationContext* opCtx, WorkingSet* ws);
+ AndSortedStage(ExpressionContext* expCtx, WorkingSet* ws);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index f3032d54da9..a5571725990 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -57,14 +57,14 @@ namespace mongo {
// static
const char* CachedPlanStage::kStageType = "CACHED_PLAN";
-CachedPlanStage::CachedPlanStage(OperationContext* opCtx,
+CachedPlanStage::CachedPlanStage(ExpressionContext* expCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
const QueryPlannerParams& params,
size_t decisionWorks,
std::unique_ptr<PlanStage> root)
- : RequiresAllIndicesStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, expCtx, collection),
_ws(ws),
_canonicalQuery(cq),
_plannerParams(params),
@@ -227,7 +227,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
if (1 == solutions.size()) {
// Only one possible plan. Build the stages from the solution.
auto newRoot =
- StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[0], _ws);
+ StageBuilder::build(opCtx(), collection(), *_canonicalQuery, *solutions[0], _ws);
_children.emplace_back(std::move(newRoot));
_replannedQs = std::move(solutions.back());
solutions.pop_back();
@@ -249,7 +249,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
auto cachingMode = shouldCache ? MultiPlanStage::CachingMode::AlwaysCache
: MultiPlanStage::CachingMode::NeverCache;
_children.emplace_back(
- new MultiPlanStage(getOpCtx(), collection(), _canonicalQuery, cachingMode));
+ new MultiPlanStage(expCtx(), collection(), _canonicalQuery, cachingMode));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -258,7 +258,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s
}
auto nextPlanRoot =
- StageBuilder::build(getOpCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws);
+ StageBuilder::build(opCtx(), collection(), *_canonicalQuery, *solutions[ix], _ws);
multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 8a5fe5871d3..c57dda3e210 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -56,7 +56,7 @@ class PlanYieldPolicy;
*/
class CachedPlanStage final : public RequiresAllIndicesStage {
public:
- CachedPlanStage(OperationContext* opCtx,
+ CachedPlanStage(ExpressionContext* expCtx,
Collection* collection,
WorkingSet* ws,
CanonicalQuery* cq,
diff --git a/src/mongo/db/exec/change_stream_proxy.cpp b/src/mongo/db/exec/change_stream_proxy.cpp
index c10bccf4cd4..c16255a897b 100644
--- a/src/mongo/db/exec/change_stream_proxy.cpp
+++ b/src/mongo/db/exec/change_stream_proxy.cpp
@@ -39,10 +39,10 @@ namespace mongo {
const char* ChangeStreamProxyStage::kStageType = "CHANGE_STREAM_PROXY";
-ChangeStreamProxyStage::ChangeStreamProxyStage(OperationContext* opCtx,
+ChangeStreamProxyStage::ChangeStreamProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws)
- : PipelineProxyStage(opCtx, std::move(pipeline), ws, kStageType) {
+ : PipelineProxyStage(expCtx, std::move(pipeline), ws, kStageType) {
// Set _postBatchResumeToken to the initial PBRT that was added to the expression context during
// pipeline construction, and use it to obtain the starting time for _latestOplogTimestamp.
invariant(!_pipeline->getContext()->initialPostBatchResumeToken.isEmpty());
diff --git a/src/mongo/db/exec/change_stream_proxy.h b/src/mongo/db/exec/change_stream_proxy.h
index 0cfc9d8d825..6d115b78885 100644
--- a/src/mongo/db/exec/change_stream_proxy.h
+++ b/src/mongo/db/exec/change_stream_proxy.h
@@ -48,7 +48,7 @@ public:
* The 'pipeline' argument must be a $changeStream pipeline. Passing a non-$changeStream into
* the constructor will cause an invariant() to fail.
*/
- ChangeStreamProxyStage(OperationContext* opCtx,
+ ChangeStreamProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws);
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 5acfcec50fe..d9bf24824b3 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -56,12 +56,12 @@ using std::vector;
// static
const char* CollectionScan::kStageType = "COLLSCAN";
-CollectionScan::CollectionScan(OperationContext* opCtx,
+CollectionScan::CollectionScan(ExpressionContext* expCtx,
const Collection* collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_workingSet(workingSet),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
_params(params) {
@@ -117,11 +117,11 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
// snapshot where the oplog entries are not yet visible even after the wait.
invariant(!_params.tailable && collection()->ns().isOplog());
- getOpCtx()->recoveryUnit()->abandonSnapshot();
- collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(getOpCtx());
+ opCtx()->recoveryUnit()->abandonSnapshot();
+ collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx());
}
- _cursor = collection()->getCursor(getOpCtx(), forward);
+ _cursor = collection()->getCursor(opCtx(), forward);
if (!_lastSeenId.isNull()) {
invariant(_params.tailable);
@@ -171,7 +171,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
StatusWith<RecordId> goal = oploghack::keyForOptime(*_params.minTs);
if (goal.isOK()) {
boost::optional<RecordId> startLoc =
- collection()->getRecordStore()->oplogStartHack(getOpCtx(), goal.getValue());
+ collection()->getRecordStore()->oplogStartHack(opCtx(), goal.getValue());
if (startLoc && !startLoc->isNull()) {
LOGV2_DEBUG(20584, 3, "Using direct oplog seek");
record = _cursor->seekExact(*startLoc);
@@ -215,8 +215,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
WorkingSetID id = _workingSet->allocate();
WorkingSetMember* member = _workingSet->get(id);
member->recordId = record->id;
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- record->data.releaseToBson());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson());
_workingSet->transitionToRecordIdAndObj(id);
return returnIfMatches(member, id, out);
@@ -283,7 +282,7 @@ void CollectionScan::doDetachFromOperationContext() {
void CollectionScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> CollectionScan::getStats() {
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 1c8d815e141..b19915bb2c5 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -53,7 +53,7 @@ class CollectionScan final : public RequiresCollectionStage {
public:
static const char* kStageType;
- CollectionScan(OperationContext* opCtx,
+ CollectionScan(ExpressionContext* expCtx,
const Collection* collection,
const CollectionScanParams& params,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 73620f851df..c646e172f29 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -45,13 +45,13 @@ using std::vector;
// static
const char* CountStage::kStageType = "COUNT";
-CountStage::CountStage(OperationContext* opCtx,
+CountStage::CountStage(ExpressionContext* expCtx,
Collection* collection,
long long limit,
long long skip,
WorkingSet* ws,
PlanStage* child)
- : PlanStage(kStageType, opCtx), _limit(limit), _skip(skip), _leftToSkip(_skip), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _limit(limit), _skip(skip), _leftToSkip(_skip), _ws(ws) {
invariant(_skip >= 0);
invariant(_limit >= 0);
invariant(child);
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 3eb0683baf0..2eeaf5266e5 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -46,7 +46,7 @@ namespace mongo {
*/
class CountStage final : public PlanStage {
public:
- CountStage(OperationContext* opCtx,
+ CountStage(ExpressionContext* expCtx,
Collection* collection,
long long limit,
long long skip,
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index be7914299d9..9e9ec9ab490 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -73,8 +73,8 @@ const char* CountScan::kStageType = "COUNT_SCAN";
// When building the CountScan stage we take the keyPattern, index name, and multikey details from
// the CountScanParams rather than resolving them via the IndexDescriptor, since these may differ
// from the descriptor's contents.
-CountScan::CountScan(OperationContext* opCtx, CountScanParams params, WorkingSet* workingSet)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+CountScan::CountScan(ExpressionContext* expCtx, CountScanParams params, WorkingSet* workingSet)
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(std::move(params.keyPattern)),
_shouldDedup(params.isMultiKey),
@@ -112,7 +112,7 @@ PlanStage::StageState CountScan::doWork(WorkingSetID* out) {
if (needInit) {
// First call to work(). Perform cursor init.
- _cursor = indexAccessMethod()->newCursor(getOpCtx());
+ _cursor = indexAccessMethod()->newCursor(opCtx());
_cursor->setEndPosition(_endKey, _endKeyInclusive);
auto keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(
@@ -174,7 +174,7 @@ void CountScan::doDetachFromOperationContext() {
void CountScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> CountScan::getStats() {
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index 5ebe00b0a7a..c8105c713ff 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -87,7 +87,7 @@ struct CountScanParams {
*/
class CountScan final : public RequiresIndexStage {
public:
- CountScan(OperationContext* opCtx, CountScanParams params, WorkingSet* workingSet);
+ CountScan(ExpressionContext* expCtx, CountScanParams params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 4e5678defcc..4d9504a5b16 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -71,12 +71,12 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams* params) {
// static
const char* DeleteStage::kStageType = "DELETE";
-DeleteStage::DeleteStage(OperationContext* opCtx,
+DeleteStage::DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : RequiresMutableCollectionStage(kStageType, opCtx, collection),
+ : RequiresMutableCollectionStage(kStageType, expCtx, collection),
_params(std::move(params)),
_ws(ws),
_idRetrying(WorkingSet::INVALID_ID),
@@ -161,7 +161,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
bool docStillMatches;
try {
docStillMatches = write_stage_common::ensureStillMatches(
- collection(), getOpCtx(), _ws, id, _params->canonicalQuery);
+ collection(), opCtx(), _ws, id, _params->canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
memberFreer.dismiss();
@@ -201,8 +201,8 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// Do the write, unless this is an explain.
if (!_params->isExplain) {
try {
- WriteUnitOfWork wunit(getOpCtx());
- collection()->deleteDocument(getOpCtx(),
+ WriteUnitOfWork wunit(opCtx());
+ collection()->deleteDocument(opCtx(),
_params->stmtId,
recordId,
_params->opDebug,
@@ -261,8 +261,8 @@ void DeleteStage::doRestoreStateRequiresCollection() {
const NamespaceString& ns = collection()->ns();
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Demoted from primary while removing from " << ns.ns(),
- !getOpCtx()->writesAreReplicated() ||
- repl::ReplicationCoordinator::get(getOpCtx())->canAcceptWritesFor(getOpCtx(), ns));
+ !opCtx()->writesAreReplicated() ||
+ repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), ns));
}
unique_ptr<PlanStageStats> DeleteStage::getStats() {
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 139239d9de0..a0f9f056d92 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -99,7 +99,7 @@ class DeleteStage final : public RequiresMutableCollectionStage {
DeleteStage& operator=(const DeleteStage&) = delete;
public:
- DeleteStage(OperationContext* opCtx,
+ DeleteStage(ExpressionContext* expCtx,
std::unique_ptr<DeleteStageParams> params,
WorkingSet* ws,
Collection* collection,
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 860e6323a40..6df30d00432 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -46,8 +46,8 @@ using std::vector;
// static
const char* DistinctScan::kStageType = "DISTINCT_SCAN";
-DistinctScan::DistinctScan(OperationContext* opCtx, DistinctParams params, WorkingSet* workingSet)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+DistinctScan::DistinctScan(ExpressionContext* expCtx, DistinctParams params, WorkingSet* workingSet)
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(std::move(params.keyPattern)),
_scanDirection(params.scanDirection),
@@ -78,7 +78,7 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) {
boost::optional<IndexKeyEntry> kv;
try {
if (!_cursor)
- _cursor = indexAccessMethod()->newCursor(getOpCtx(), _scanDirection == 1);
+ _cursor = indexAccessMethod()->newCursor(opCtx(), _scanDirection == 1);
kv = _cursor->seek(IndexEntryComparison::makeKeyStringFromSeekPointForSeek(
_seekPoint,
indexAccessMethod()->getSortedDataInterface()->getKeyStringVersion(),
@@ -125,7 +125,7 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) {
member->keyData.push_back(IndexKeyDatum(_keyPattern,
kv->key,
workingSetIndexId(),
- getOpCtx()->recoveryUnit()->getSnapshotId()));
+ opCtx()->recoveryUnit()->getSnapshotId()));
_workingSet->transitionToRecordIdAndIdx(id);
*out = id;
@@ -156,7 +156,7 @@ void DistinctScan::doDetachFromOperationContext() {
void DistinctScan::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
unique_ptr<PlanStageStats> DistinctScan::getStats() {
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index 4596d92d378..e9c5c5c3852 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -95,7 +95,7 @@ struct DistinctParams {
*/
class DistinctScan final : public RequiresIndexStage {
public:
- DistinctScan(OperationContext* opCtx, DistinctParams params, WorkingSet* workingSet);
+ DistinctScan(ExpressionContext* expCtx, DistinctParams params, WorkingSet* workingSet);
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
diff --git a/src/mongo/db/exec/ensure_sorted.cpp b/src/mongo/db/exec/ensure_sorted.cpp
index aaec72bf377..f713a2eed3a 100644
--- a/src/mongo/db/exec/ensure_sorted.cpp
+++ b/src/mongo/db/exec/ensure_sorted.cpp
@@ -42,11 +42,11 @@ using std::unique_ptr;
const char* EnsureSortedStage::kStageType = "ENSURE_SORTED";
-EnsureSortedStage::EnsureSortedStage(OperationContext* opCtx,
+EnsureSortedStage::EnsureSortedStage(ExpressionContext* expCtx,
BSONObj pattern,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _sortKeyComparator(pattern) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _sortKeyComparator(pattern) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/ensure_sorted.h b/src/mongo/db/exec/ensure_sorted.h
index 423647eeacf..9d917b93744 100644
--- a/src/mongo/db/exec/ensure_sorted.h
+++ b/src/mongo/db/exec/ensure_sorted.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class EnsureSortedStage final : public PlanStage {
public:
- EnsureSortedStage(OperationContext* opCtx,
+ EnsureSortedStage(ExpressionContext* expCtx,
BSONObj pattern,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index acd290f0f1a..25c7e44ac92 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -43,7 +43,7 @@ using std::vector;
// static
const char* EOFStage::kStageType = "EOF";
-EOFStage::EOFStage(OperationContext* opCtx) : PlanStage(kStageType, opCtx) {}
+EOFStage::EOFStage(ExpressionContext* expCtx) : PlanStage(kStageType, expCtx) {}
EOFStage::~EOFStage() {}
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index 630bee72f9d..e60d6b4319b 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -39,7 +39,7 @@ namespace mongo {
*/
class EOFStage final : public PlanStage {
public:
- EOFStage(OperationContext* opCtx);
+ EOFStage(ExpressionContext* expCtx);
~EOFStage();
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index ce3e60f122b..914158d3191 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -49,12 +49,12 @@ using std::vector;
// static
const char* FetchStage::kStageType = "FETCH";
-FetchStage::FetchStage(OperationContext* opCtx,
+FetchStage::FetchStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
const Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_ws(ws),
_filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr),
_idRetrying(WorkingSet::INVALID_ID) {
@@ -101,9 +101,9 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) {
try {
if (!_cursor)
- _cursor = collection()->getCursor(getOpCtx());
+ _cursor = collection()->getCursor(opCtx());
- if (!WorkingSetCommon::fetch(getOpCtx(), _ws, id, _cursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _ws, id, _cursor, collection()->ns())) {
_ws->free(id);
return NEED_TIME;
}
@@ -151,7 +151,7 @@ void FetchStage::doDetachFromOperationContext() {
void FetchStage::doReattachToOperationContext() {
if (_cursor)
- _cursor->reattachToOperationContext(getOpCtx());
+ _cursor->reattachToOperationContext(opCtx());
}
PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index 074bd63b4d2..10dddc50a4f 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -50,7 +50,7 @@ class SeekableRecordCursor;
*/
class FetchStage : public RequiresCollectionStage {
public:
- FetchStage(OperationContext* opCtx,
+ FetchStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const MatchExpression* filter,
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 6e2b86df096..295b1d1e875 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -287,12 +287,12 @@ GeoNear2DStage::DensityEstimator::DensityEstimator(PlanStage::Children* children
}
// Initialize the internal states
-void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
+void GeoNear2DStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex) {
// Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
// This is handled in query planning.
- IndexScanParams scanParams(opCtx, twoDIndex);
+ IndexScanParams scanParams(expCtx->opCtx, twoDIndex);
scanParams.bounds = _nearParams->baseBounds;
// The "2d" field is always the first in the index
@@ -323,20 +323,20 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]);
invariant(!_indexScan);
- _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
+ _indexScan = new IndexScan(expCtx, scanParams, workingSet, nullptr);
_children->emplace_back(_indexScan);
}
// Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
// from the nearest document.
-PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* opCtx,
+PlanStage::StageState GeoNear2DStage::DensityEstimator::work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(opCtx, workingSet, twoDIndex);
+ buildIndexScan(expCtx, workingSet, twoDIndex);
}
WorkingSetID workingSetID;
@@ -414,7 +414,7 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(opCtx, workingSet, indexDescriptor(), out, &estimatedDistance);
+ _densityEstimator->work(expCtx(), workingSet, indexDescriptor(), out, &estimatedDistance);
if (state == PlanStage::IS_EOF) {
// 2d index only works with legacy points as centroid. $nearSphere will project
@@ -450,10 +450,10 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx,
static const string kTwoDIndexNearStage("GEO_NEAR_2D");
GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex)
- : NearStage(opCtx, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, twoDIndex),
+ : NearStage(expCtx, kTwoDIndexNearStage.c_str(), STAGE_GEO_NEAR_2D, workingSet, twoDIndex),
_nearParams(nearParams),
_fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
@@ -524,12 +524,12 @@ private:
// Helper class to maintain ownership of a match expression alongside an index scan
class FetchStageWithMatch final : public FetchStage {
public:
- FetchStageWithMatch(OperationContext* opCtx,
+ FetchStageWithMatch(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
MatchExpression* filter,
const Collection* collection)
- : FetchStage(opCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
+ : FetchStage(expCtx, ws, std::move(child), filter, collection), _matcher(filter) {}
private:
// Owns matcher
@@ -702,7 +702,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
.transitional_ignore();
// 2D indexes support covered search over additional fields they contain
- auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, _nearParams.filter);
+ auto scan = std::make_unique<IndexScan>(expCtx(), scanParams, workingSet, _nearParams.filter);
MatchExpression* docMatcher = nullptr;
@@ -714,7 +714,7 @@ GeoNear2DStage::nextInterval(OperationContext* opCtx,
// FetchStage owns index scan
_children.emplace_back(std::make_unique<FetchStageWithMatch>(
- opCtx, workingSet, std::move(scan), docMatcher, collection));
+ expCtx(), workingSet, std::move(scan), docMatcher, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
@@ -748,10 +748,10 @@ static int getFieldPosition(const IndexDescriptor* index, const string& fieldNam
static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index)
- : NearStage(opCtx, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, s2Index),
+ : NearStage(expCtx, kS2IndexNearStage.c_str(), STAGE_GEO_NEAR_2DSPHERE, workingSet, s2Index),
_nearParams(nearParams),
_fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
_currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
@@ -827,10 +827,10 @@ GeoNear2DSphereStage::DensityEstimator::DensityEstimator(PlanStage::Children* ch
}
// Setup the index scan stage for neighbors at this level.
-void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* opCtx,
+void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index) {
- IndexScanParams scanParams(opCtx, s2Index);
+ IndexScanParams scanParams(expCtx->opCtx, s2Index);
scanParams.bounds = _nearParams->baseBounds;
// Because the planner doesn't yet set up 2D index bounds, do it ourselves here
@@ -853,18 +853,18 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* op
// Index scan
invariant(!_indexScan);
- _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr);
+ _indexScan = new IndexScan(expCtx, scanParams, workingSet, nullptr);
_children->emplace_back(_indexScan);
}
-PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* opCtx,
+PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index,
WorkingSetID* out,
double* estimatedDistance) {
if (!_indexScan) {
// Setup index scan stage for current level.
- buildIndexScan(opCtx, workingSet, s2Index);
+ buildIndexScan(expCtx, workingSet, s2Index);
}
WorkingSetID workingSetID;
@@ -945,7 +945,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx,
double estimatedDistance;
PlanStage::StageState state =
- _densityEstimator->work(opCtx, workingSet, indexDescriptor(), out, &estimatedDistance);
+ _densityEstimator->work(expCtx(), workingSet, indexDescriptor(), out, &estimatedDistance);
if (state == IS_EOF) {
// We find a document in 4 neighbors at current level, but didn't at previous level.
@@ -1033,11 +1033,11 @@ GeoNear2DSphereStage::nextInterval(OperationContext* opCtx,
OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals);
- auto scan = std::make_unique<IndexScan>(opCtx, scanParams, workingSet, nullptr);
+ auto scan = std::make_unique<IndexScan>(expCtx(), scanParams, workingSet, nullptr);
// FetchStage owns index scan
_children.emplace_back(std::make_unique<FetchStage>(
- opCtx, workingSet, std::move(scan), _nearParams.filter, collection));
+ expCtx(), workingSet, std::move(scan), _nearParams.filter, collection));
return StatusWith<CoveredInterval*>(new CoveredInterval(
_children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index ce23ccd6e38..eb096064d53 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -69,7 +69,7 @@ struct GeoNearParams {
class GeoNear2DStage final : public NearStage {
public:
GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex);
@@ -92,14 +92,14 @@ private:
const GeoNearParams* nearParams,
const R2Annulus& fullBounds);
- PlanStage::StageState work(OperationContext* opCtx,
+ PlanStage::StageState work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* opCtx,
+ void buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* twoDIndex);
@@ -135,7 +135,7 @@ private:
class GeoNear2DSphereStage final : public NearStage {
public:
GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index);
@@ -163,14 +163,14 @@ private:
// Search for a document in neighbors at current level.
// Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
- PlanStage::StageState work(OperationContext* opCtx,
+ PlanStage::StageState work(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index,
WorkingSetID* out,
double* estimatedDistance);
private:
- void buildIndexScan(OperationContext* opCtx,
+ void buildIndexScan(ExpressionContext* expCtx,
WorkingSet* workingSet,
const IndexDescriptor* s2Index);
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index c3378824a58..d8b9400daba 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -49,22 +49,22 @@ using std::vector;
// static
const char* IDHackStage::kStageType = "IDHACK";
-IDHackStage::IDHackStage(OperationContext* opCtx,
+IDHackStage::IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : RequiresIndexStage(kStageType, opCtx, descriptor, ws),
+ : RequiresIndexStage(kStageType, expCtx, descriptor, ws),
_workingSet(ws),
_key(query->getQueryObj()["_id"].wrap()) {
_specificStats.indexName = descriptor->indexName();
_addKeyMetadata = query->getQueryRequest().returnKey();
}
-IDHackStage::IDHackStage(OperationContext* opCtx,
+IDHackStage::IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
const IndexDescriptor* descriptor)
- : RequiresIndexStage(kStageType, opCtx, descriptor, ws), _workingSet(ws), _key(key) {
+ : RequiresIndexStage(kStageType, expCtx, descriptor, ws), _workingSet(ws), _key(key) {
_specificStats.indexName = descriptor->indexName();
}
@@ -82,7 +82,7 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
WorkingSetID id = WorkingSet::INVALID_ID;
try {
// Look up the key by going directly to the index.
- RecordId recordId = indexAccessMethod()->findSingle(getOpCtx(), _key);
+ RecordId recordId = indexAccessMethod()->findSingle(opCtx(), _key);
// Key not found.
if (recordId.isNull()) {
@@ -100,11 +100,10 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) {
_workingSet->transitionToRecordIdAndIdx(id);
if (!_recordCursor)
- _recordCursor = collection()->getCursor(getOpCtx());
+ _recordCursor = collection()->getCursor(opCtx());
// Find the document associated with 'id' in the collection's record store.
- if (!WorkingSetCommon::fetch(
- getOpCtx(), _workingSet, id, _recordCursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _workingSet, id, _recordCursor, collection()->ns())) {
// We didn't find a document with RecordId 'id'.
_workingSet->free(id);
_commonStats.isEOF = true;
@@ -156,7 +155,7 @@ void IDHackStage::doDetachFromOperationContext() {
void IDHackStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(getOpCtx());
+ _recordCursor->reattachToOperationContext(opCtx());
}
// static
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 5a56f67b328..5c2ac9f894a 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -48,12 +48,12 @@ class RecordCursor;
class IDHackStage final : public RequiresIndexStage {
public:
/** Takes ownership of all the arguments -collection. */
- IDHackStage(OperationContext* opCtx,
+ IDHackStage(ExpressionContext* expCtx,
CanonicalQuery* query,
WorkingSet* ws,
const IndexDescriptor* descriptor);
- IDHackStage(OperationContext* opCtx,
+ IDHackStage(ExpressionContext* expCtx,
const BSONObj& key,
WorkingSet* ws,
const IndexDescriptor* descriptor);
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index f50d6d15fca..dc01957ef8b 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -59,11 +59,11 @@ namespace mongo {
// static
const char* IndexScan::kStageType = "IXSCAN";
-IndexScan::IndexScan(OperationContext* opCtx,
+IndexScan::IndexScan(ExpressionContext* expCtx,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter)
- : RequiresIndexStage(kStageType, opCtx, params.indexDescriptor, workingSet),
+ : RequiresIndexStage(kStageType, expCtx, params.indexDescriptor, workingSet),
_workingSet(workingSet),
_keyPattern(params.keyPattern.getOwned()),
_bounds(std::move(params.bounds)),
@@ -89,7 +89,7 @@ IndexScan::IndexScan(OperationContext* opCtx,
boost::optional<IndexKeyEntry> IndexScan::initIndexScan() {
// Perform the possibly heavy-duty initialization of the underlying index cursor.
- _indexCursor = indexAccessMethod()->newCursor(getOpCtx(), _forward);
+ _indexCursor = indexAccessMethod()->newCursor(opCtx(), _forward);
// We always seek once to establish the cursor position.
++_specificStats.seeks;
@@ -231,7 +231,7 @@ PlanStage::StageState IndexScan::doWork(WorkingSetID* out) {
WorkingSetMember* member = _workingSet->get(id);
member->recordId = kv->loc;
member->keyData.push_back(IndexKeyDatum(
- _keyPattern, kv->key, workingSetIndexId(), getOpCtx()->recoveryUnit()->getSnapshotId()));
+ _keyPattern, kv->key, workingSetIndexId(), opCtx()->recoveryUnit()->getSnapshotId()));
_workingSet->transitionToRecordIdAndIdx(id);
if (_addKeyMetadata) {
@@ -270,7 +270,7 @@ void IndexScan::doDetachFromOperationContext() {
void IndexScan::doReattachToOperationContext() {
if (_indexCursor)
- _indexCursor->reattachToOperationContext(getOpCtx());
+ _indexCursor->reattachToOperationContext(opCtx());
}
std::unique_ptr<PlanStageStats> IndexScan::getStats() {
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 5ee7d807282..d36f99d9f9b 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -108,7 +108,7 @@ public:
HIT_END
};
- IndexScan(OperationContext* opCtx,
+ IndexScan(ExpressionContext* expCtx,
IndexScanParams params,
WorkingSet* workingSet,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index e800d614039..41505be622f 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -43,11 +43,11 @@ using std::vector;
// static
const char* LimitStage::kStageType = "LIMIT";
-LimitStage::LimitStage(OperationContext* opCtx,
+LimitStage::LimitStage(ExpressionContext* expCtx,
long long limit,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _numToReturn(limit) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _numToReturn(limit) {
_specificStats.limit = _numToReturn;
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index f807838b540..ffc2f6a509c 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -45,7 +45,7 @@ namespace mongo {
*/
class LimitStage final : public PlanStage {
public:
- LimitStage(OperationContext* opCtx,
+ LimitStage(ExpressionContext* expCtx,
long long limit,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index cc7d40b073e..58a3e33f241 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -47,10 +47,10 @@ using std::vector;
// static
const char* MergeSortStage::kStageType = "SORT_MERGE";
-MergeSortStage::MergeSortStage(OperationContext* opCtx,
+MergeSortStage::MergeSortStage(ExpressionContext* expCtx,
const MergeSortStageParams& params,
WorkingSet* ws)
- : PlanStage(kStageType, opCtx),
+ : PlanStage(kStageType, expCtx),
_ws(ws),
_pattern(params.pattern),
_collator(params.collator),
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index 714f6e0c68a..efb2833b8fb 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -57,7 +57,7 @@ class MergeSortStageParams;
*/
class MergeSortStage final : public PlanStage {
public:
- MergeSortStage(OperationContext* opCtx, const MergeSortStageParams& params, WorkingSet* ws);
+ MergeSortStage(ExpressionContext* expCtx, const MergeSortStageParams& params, WorkingSet* ws);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 761d6b21ed0..8a33f98386e 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -43,10 +43,10 @@ using std::vector;
const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
-MultiIteratorStage::MultiIteratorStage(OperationContext* opCtx,
+MultiIteratorStage::MultiIteratorStage(ExpressionContext* expCtx,
WorkingSet* ws,
Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection), _ws(ws) {}
+ : RequiresCollectionStage(kStageType, expCtx, collection), _ws(ws) {}
void MultiIteratorStage::addIterator(unique_ptr<RecordCursor> it) {
_iterators.push_back(std::move(it));
@@ -74,8 +74,7 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) {
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
member->recordId = record->id;
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- record->data.releaseToBson());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), record->data.releaseToBson());
_ws->transitionToRecordIdAndObj(*out);
return PlanStage::ADVANCED;
}
@@ -105,7 +104,7 @@ void MultiIteratorStage::doDetachFromOperationContext() {
void MultiIteratorStage::doReattachToOperationContext() {
for (auto&& iterator : _iterators) {
- iterator->reattachToOperationContext(getOpCtx());
+ iterator->reattachToOperationContext(opCtx());
}
}
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 6abfa760e7a..accc0e4b1a6 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -47,7 +47,7 @@ namespace mongo {
*/
class MultiIteratorStage final : public RequiresCollectionStage {
public:
- MultiIteratorStage(OperationContext* opCtx, WorkingSet* ws, Collection* collection);
+ MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, Collection* collection);
void addIterator(std::unique_ptr<RecordCursor> it);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 96d691fdbb7..4fa9b8766a2 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -61,11 +61,11 @@ using std::vector;
// static
const char* MultiPlanStage::kStageType = "MULTI_PLAN";
-MultiPlanStage::MultiPlanStage(OperationContext* opCtx,
+MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_cachingMode(cachingMode),
_query(cq),
_bestPlanIdx(kNoSuchPlan),
@@ -203,7 +203,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
// make sense.
ScopedTimer timer(getClock(), &_commonStats.executionTimeMillis);
- size_t numWorks = getTrialPeriodWorks(getOpCtx(), collection());
+ size_t numWorks = getTrialPeriodWorks(opCtx(), collection());
size_t numResults = getTrialPeriodNumToReturn(*_query);
try {
@@ -365,7 +365,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
->set(*_query,
solutions,
std::move(ranking),
- getOpCtx()->getServiceContext()->getPreciseClockSource()->now())
+ opCtx()->getServiceContext()->getPreciseClockSource()->now())
.transitional_ignore();
}
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index f70b2800e63..1c33885d245 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -76,7 +76,7 @@ public:
* If 'shouldCache' is true, writes a cache entry for the winning plan to the plan cache
* when possible. If 'shouldCache' is false, the plan cache will never be written.
*/
- MultiPlanStage(OperationContext* opCtx,
+ MultiPlanStage(ExpressionContext* expCtx,
const Collection* collection,
CanonicalQuery* cq,
CachingMode cachingMode = CachingMode::AlwaysCache);
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index ac16083b0a5..30bbb894881 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -42,12 +42,12 @@ namespace mongo {
using std::unique_ptr;
using std::vector;
-NearStage::NearStage(OperationContext* opCtx,
+NearStage::NearStage(ExpressionContext* expCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
const IndexDescriptor* indexDescriptor)
- : RequiresIndexStage(typeName, opCtx, indexDescriptor, workingSet),
+ : RequiresIndexStage(typeName, expCtx, indexDescriptor, workingSet),
_workingSet(workingSet),
_searchState(SearchState_Initializing),
_nextIntervalStats(nullptr),
@@ -67,7 +67,7 @@ NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
- PlanStage::StageState state = initialize(getOpCtx(), _workingSet, out);
+ PlanStage::StageState state = initialize(opCtx(), _workingSet, out);
if (state == PlanStage::IS_EOF) {
_searchState = SearchState_Buffering;
return PlanStage::NEED_TIME;
@@ -139,7 +139,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
if (!_nextInterval) {
StatusWith<CoveredInterval*> intervalStatus =
- nextInterval(getOpCtx(), _workingSet, collection());
+ nextInterval(opCtx(), _workingSet, collection());
if (!intervalStatus.isOK()) {
_searchState = SearchState_Finished;
*error = intervalStatus.getStatus();
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 94d9639b312..8f55c777494 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -104,7 +104,7 @@ protected:
/**
* Subclasses of NearStage must provide basics + a stats object which gets owned here.
*/
- NearStage(OperationContext* opCtx,
+ NearStage(ExpressionContext* expCtx,
const char* typeName,
StageType type,
WorkingSet* workingSet,
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 3800536b62c..c50a4981e94 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -44,8 +44,11 @@ using std::vector;
// static
const char* OrStage::kStageType = "OR";
-OrStage::OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : PlanStage(kStageType, opCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
+OrStage::OrStage(ExpressionContext* expCtx,
+ WorkingSet* ws,
+ bool dedup,
+ const MatchExpression* filter)
+ : PlanStage(kStageType, expCtx), _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup) {}
void OrStage::addChild(std::unique_ptr<PlanStage> child) {
_children.emplace_back(std::move(child));
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 8d2c043ee46..e4ddcbcb2c0 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class OrStage final : public PlanStage {
public:
- OrStage(OperationContext* opCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
+ OrStage(ExpressionContext* expCtx, WorkingSet* ws, bool dedup, const MatchExpression* filter);
void addChild(std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index c0c1f2c23d2..bdfaaa746b8 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -46,16 +46,16 @@ using std::vector;
const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
-PipelineProxyStage::PipelineProxyStage(OperationContext* opCtx,
+PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws)
- : PipelineProxyStage(opCtx, std::move(pipeline), ws, kStageType) {}
+ : PipelineProxyStage(expCtx, std::move(pipeline), ws, kStageType) {}
-PipelineProxyStage::PipelineProxyStage(OperationContext* opCtx,
+PipelineProxyStage::PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws,
const char* stageTypeName)
- : PlanStage(stageTypeName, opCtx),
+ : PlanStage(stageTypeName, expCtx),
_pipeline(std::move(pipeline)),
_includeMetaData(_pipeline->getContext()->needsMerge), // send metadata to merger
_ws(ws) {
@@ -112,11 +112,11 @@ void PipelineProxyStage::doDetachFromOperationContext() {
}
void PipelineProxyStage::doReattachToOperationContext() {
- _pipeline->reattachToOperationContext(getOpCtx());
+ _pipeline->reattachToOperationContext(opCtx());
}
void PipelineProxyStage::doDispose() {
- _pipeline->dispose(getOpCtx());
+ _pipeline->dispose(opCtx());
}
unique_ptr<PlanStageStats> PipelineProxyStage::getStats() {
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 0bdbbd69a9f..99f8583f3c0 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -46,7 +46,7 @@ namespace mongo {
*/
class PipelineProxyStage : public PlanStage {
public:
- PipelineProxyStage(OperationContext* opCtx,
+ PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws);
@@ -86,7 +86,7 @@ public:
static const char* kStageType;
protected:
- PipelineProxyStage(OperationContext* opCtx,
+ PipelineProxyStage(ExpressionContext* expCtx,
std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
WorkingSet* ws,
const char* stageTypeName);
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index a24e6af266a..3a9475a5ca4 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -34,6 +34,7 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/pipeline/expression_context.h"
namespace mongo {
@@ -105,16 +106,18 @@ class RecordId;
*/
class PlanStage {
public:
- PlanStage(const char* typeName, OperationContext* opCtx)
- : _commonStats(typeName), _opCtx(opCtx) {}
+ PlanStage(const char* typeName, ExpressionContext* expCtx)
+ : _commonStats(typeName), _opCtx(expCtx->opCtx), _expCtx(expCtx) {
+ invariant(expCtx);
+ }
protected:
/**
* Obtain a PlanStage given a child stage. Called during the construction of derived
* PlanStage types with a single direct descendant.
*/
- PlanStage(OperationContext* opCtx, std::unique_ptr<PlanStage> child, const char* typeName)
- : PlanStage(typeName, opCtx) {
+ PlanStage(ExpressionContext* expCtx, std::unique_ptr<PlanStage> child, const char* typeName)
+ : PlanStage(typeName, expCtx) {
_children.push_back(std::move(child));
}
@@ -358,14 +361,14 @@ protected:
/**
* Does stage-specific detaching.
*
- * Implementations of this method cannot use the pointer returned from getOpCtx().
+ * Implementations of this method cannot use the pointer returned from opCtx().
*/
virtual void doDetachFromOperationContext() {}
/**
* Does stage-specific attaching.
*
- * If an OperationContext* is needed, use getOpCtx(), which will return a valid
+ * If an OperationContext* is needed, use opCtx(), which will return a valid
* OperationContext* (the one to which the stage is reattaching).
*/
virtual void doReattachToOperationContext() {}
@@ -377,15 +380,23 @@ protected:
ClockSource* getClock() const;
- OperationContext* getOpCtx() const {
+ OperationContext* opCtx() const {
return _opCtx;
}
+ ExpressionContext* expCtx() const {
+ return _expCtx;
+ }
+
Children _children;
CommonStats _commonStats;
private:
OperationContext* _opCtx;
+
+ // The PlanExecutor holds a strong reference to this which ensures that this pointer remains
+ // valid for the entire lifetime of the PlanStage.
+ ExpressionContext* _expCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 6051f738c84..ea1d1623a75 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -117,12 +117,12 @@ auto rehydrateIndexKey(const BSONObj& keyPattern, const BSONObj& dehydratedKey)
}
} // namespace
-ProjectionStage::ProjectionStage(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStage::ProjectionStage(ExpressionContext* expCtx,
const BSONObj& projObj,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
const char* stageType)
- : PlanStage{expCtx->opCtx, std::move(child), stageType},
+ : PlanStage{expCtx, std::move(child), stageType},
_projObj{expCtx->explain ? boost::make_optional(projObj.getOwned()) : boost::none},
_ws{*ws} {}
@@ -178,7 +178,7 @@ ProjectionStageDefault::ProjectionStageDefault(boost::intrusive_ptr<ExpressionCo
const projection_ast::Projection* projection,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : ProjectionStage{expCtx, projObj, ws, std::move(child), "PROJECTION_DEFAULT"},
+ : ProjectionStage{expCtx.get(), projObj, ws, std::move(child), "PROJECTION_DEFAULT"},
_requestedMetadata{projection->metadataDeps()},
_projectType{projection->type()},
_executor{projection_executor::buildProjectionExecutor(
@@ -230,7 +230,7 @@ Status ProjectionStageDefault::transform(WorkingSetMember* member) const {
return Status::OK();
}
-ProjectionStageCovered::ProjectionStageCovered(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStageCovered::ProjectionStageCovered(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
@@ -287,7 +287,7 @@ Status ProjectionStageCovered::transform(WorkingSetMember* member) const {
return Status::OK();
}
-ProjectionStageSimple::ProjectionStageSimple(boost::intrusive_ptr<ExpressionContext> expCtx,
+ProjectionStageSimple::ProjectionStageSimple(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index d84ba1168e0..5a5d525cc0a 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -43,7 +43,7 @@ namespace mongo {
*/
class ProjectionStage : public PlanStage {
protected:
- ProjectionStage(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStage(ExpressionContext* expCtx,
const BSONObj& projObj,
WorkingSet* ws,
std::unique_ptr<PlanStage> child,
@@ -117,7 +117,7 @@ public:
/**
* ProjectionNodeCovered should obtain a fast-path object through this constructor.
*/
- ProjectionStageCovered(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStageCovered(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
@@ -156,7 +156,7 @@ public:
/**
* ProjectionNodeSimple should obtain a fast-path object through this constructor.
*/
- ProjectionStageSimple(boost::intrusive_ptr<ExpressionContext> expCtx,
+ ProjectionStageSimple(ExpressionContext* expCtx,
const BSONObj& projObj,
const projection_ast::Projection* projection,
WorkingSet* ws,
diff --git a/src/mongo/db/exec/projection_executor_builder_test.cpp b/src/mongo/db/exec/projection_executor_builder_test.cpp
index c5c4f1240bc..58543a23d54 100644
--- a/src/mongo/db/exec/projection_executor_builder_test.cpp
+++ b/src/mongo/db/exec/projection_executor_builder_test.cpp
@@ -231,8 +231,9 @@ TEST_F(ProjectionExecutorTestWithFallBackToDefault, CanProjectFindElemMatch) {
}
TEST_F(ProjectionExecutorTestWithFallBackToDefault, ElemMatchRespectsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- getExpCtx()->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ getExpCtx()->setCollator(std::move(collator));
auto proj = parseWithFindFeaturesEnabled(fromjson("{a: {$elemMatch: {$gte: 'abc'}}}"));
auto executor = createProjectionExecutor(proj);
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 2eee4232245..c5f6339dfaa 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -41,8 +41,8 @@ using std::vector;
const char* QueuedDataStage::kStageType = "QUEUED_DATA";
-QueuedDataStage::QueuedDataStage(OperationContext* opCtx, WorkingSet* ws)
- : PlanStage(kStageType, opCtx), _ws(ws) {}
+QueuedDataStage::QueuedDataStage(ExpressionContext* expCtx, WorkingSet* ws)
+ : PlanStage(kStageType, expCtx), _ws(ws) {}
PlanStage::StageState QueuedDataStage::doWork(WorkingSetID* out) {
if (isEOF()) {
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index abf91132bef..b952062803e 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -48,7 +48,7 @@ class RecordId;
*/
class QueuedDataStage final : public PlanStage {
public:
- QueuedDataStage(OperationContext* opCtx, WorkingSet* ws);
+ QueuedDataStage(ExpressionContext* expCtx, WorkingSet* ws);
StageState doWork(WorkingSetID* out) final;
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index 7e07bf0ff44..46ef8d371e2 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -37,6 +37,7 @@
#include <memory>
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/unittest/unittest.h"
@@ -48,6 +49,8 @@ namespace {
using std::unique_ptr;
+const static NamespaceString kNss("db.dummy");
+
class QueuedDataStageTest : public ServiceContextMongoDTest {
public:
QueuedDataStageTest() {
@@ -56,7 +59,7 @@ public:
}
protected:
- OperationContext* getOpCtx() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -69,7 +72,8 @@ private:
//
TEST_F(QueuedDataStageTest, getValidStats) {
WorkingSet ws;
- auto mock = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
+ auto mock = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
const CommonStats* commonStats = mock->getCommonStats();
ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
const SpecificStats* specificStats = mock->getSpecificStats();
@@ -84,7 +88,8 @@ TEST_F(QueuedDataStageTest, getValidStats) {
TEST_F(QueuedDataStageTest, validateStats) {
WorkingSet ws;
WorkingSetID wsID;
- auto mock = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
+ auto mock = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
// make sure that we're at all zero
const CommonStats* stats = mock->getCommonStats();
diff --git a/src/mongo/db/exec/record_store_fast_count.cpp b/src/mongo/db/exec/record_store_fast_count.cpp
index 392a8e09033..e7142458cb3 100644
--- a/src/mongo/db/exec/record_store_fast_count.cpp
+++ b/src/mongo/db/exec/record_store_fast_count.cpp
@@ -35,11 +35,11 @@ namespace mongo {
const char* RecordStoreFastCountStage::kStageType = "RECORD_STORE_FAST_COUNT";
-RecordStoreFastCountStage::RecordStoreFastCountStage(OperationContext* opCtx,
+RecordStoreFastCountStage::RecordStoreFastCountStage(ExpressionContext* expCtx,
Collection* collection,
long long skip,
long long limit)
- : RequiresCollectionStage(kStageType, opCtx, collection), _skip(skip), _limit(limit) {
+ : RequiresCollectionStage(kStageType, expCtx, collection), _skip(skip), _limit(limit) {
invariant(_skip >= 0);
invariant(_limit >= 0);
}
@@ -54,7 +54,7 @@ PlanStage::StageState RecordStoreFastCountStage::doWork(WorkingSetID* out) {
// This stage never returns a working set member.
*out = WorkingSet::INVALID_ID;
- long long nCounted = collection()->numRecords(getOpCtx());
+ long long nCounted = collection()->numRecords(opCtx());
if (_skip) {
nCounted -= _skip;
diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h
index 973165969be..1986b333bb1 100644
--- a/src/mongo/db/exec/record_store_fast_count.h
+++ b/src/mongo/db/exec/record_store_fast_count.h
@@ -42,7 +42,7 @@ class RecordStoreFastCountStage final : public RequiresCollectionStage {
public:
static const char* kStageType;
- RecordStoreFastCountStage(OperationContext* opCtx,
+ RecordStoreFastCountStage(ExpressionContext* expCtx,
Collection* collection,
long long skip,
long long limit);
diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h
index 68516960f3f..15beac52ebf 100644
--- a/src/mongo/db/exec/requires_all_indices_stage.h
+++ b/src/mongo/db/exec/requires_all_indices_stage.h
@@ -41,8 +41,10 @@ namespace mongo {
*/
class RequiresAllIndicesStage : public RequiresCollectionStage {
public:
- RequiresAllIndicesStage(const char* stageType, OperationContext* opCtx, const Collection* coll)
- : RequiresCollectionStage(stageType, opCtx, coll) {
+ RequiresAllIndicesStage(const char* stageType,
+ ExpressionContext* expCtx,
+ const Collection* coll)
+ : RequiresCollectionStage(stageType, expCtx, coll) {
auto allEntriesShared = coll->getIndexCatalog()->getAllReadyEntriesShared();
_indexCatalogEntries.reserve(allEntriesShared.size());
_indexNames.reserve(allEntriesShared.size());
diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp
index 31ea9a4fa45..0d120e413cc 100644
--- a/src/mongo/db/exec/requires_collection_stage.cpp
+++ b/src/mongo/db/exec/requires_collection_stage.cpp
@@ -49,10 +49,10 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// We should be holding a lock associated with the name of the collection prior to yielding,
// even if the collection was renamed during yield.
- dassert(getOpCtx()->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
+ dassert(opCtx()->lockState()->isCollectionLockedForMode(_nss, MODE_IS));
- const CollectionCatalog& catalog = CollectionCatalog::get(getOpCtx());
- auto newNss = catalog.lookupNSSByUUID(getOpCtx(), _collectionUUID);
+ const CollectionCatalog& catalog = CollectionCatalog::get(opCtx());
+ auto newNss = catalog.lookupNSSByUUID(opCtx(), _collectionUUID);
uassert(ErrorCodes::QueryPlanKilled,
str::stream() << "collection dropped. UUID " << _collectionUUID,
newNss);
@@ -68,7 +68,7 @@ void RequiresCollectionStageBase<CollectionT>::doRestoreState() {
// restored locks on the correct name. It is now safe to restore the Collection pointer. The
// collection must exist, since we already successfully looked up the namespace string by UUID
// under the correct lock manager locks.
- _collection = catalog.lookupCollectionByUUID(getOpCtx(), _collectionUUID);
+ _collection = catalog.lookupCollectionByUUID(opCtx(), _collectionUUID);
invariant(_collection);
uassert(ErrorCodes::QueryPlanKilled,
diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h
index 896d9eb5181..3b0520281e1 100644
--- a/src/mongo/db/exec/requires_collection_stage.h
+++ b/src/mongo/db/exec/requires_collection_stage.h
@@ -55,8 +55,8 @@ namespace mongo {
template <typename CollectionT>
class RequiresCollectionStageBase : public PlanStage {
public:
- RequiresCollectionStageBase(const char* stageType, OperationContext* opCtx, CollectionT coll)
- : PlanStage(stageType, opCtx),
+ RequiresCollectionStageBase(const char* stageType, ExpressionContext* expCtx, CollectionT coll)
+ : PlanStage(stageType, expCtx),
_collection(coll),
_collectionUUID(_collection->uuid()),
_databaseEpoch(getDatabaseEpoch(_collection)),
@@ -94,8 +94,8 @@ private:
// collection pointer 'coll' must be non-null and must point to a valid collection.
uint64_t getDatabaseEpoch(CollectionT coll) const {
invariant(coll);
- auto databaseHolder = DatabaseHolder::get(getOpCtx());
- auto db = databaseHolder->getDb(getOpCtx(), coll->ns().ns());
+ auto databaseHolder = DatabaseHolder::get(opCtx());
+ auto db = databaseHolder->getDb(opCtx(), coll->ns().ns());
invariant(db);
return db->epoch();
}
diff --git a/src/mongo/db/exec/requires_index_stage.cpp b/src/mongo/db/exec/requires_index_stage.cpp
index 8e3e2382c9d..1073330fcf0 100644
--- a/src/mongo/db/exec/requires_index_stage.cpp
+++ b/src/mongo/db/exec/requires_index_stage.cpp
@@ -34,10 +34,10 @@
namespace mongo {
RequiresIndexStage::RequiresIndexStage(const char* stageType,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet)
- : RequiresCollectionStage(stageType, opCtx, indexDescriptor->getCollection()),
+ : RequiresCollectionStage(stageType, expCtx, indexDescriptor->getCollection()),
_weakIndexCatalogEntry(collection()->getIndexCatalog()->getEntryShared(indexDescriptor)) {
auto indexCatalogEntry = _weakIndexCatalogEntry.lock();
_indexDescriptor = indexCatalogEntry->descriptor();
diff --git a/src/mongo/db/exec/requires_index_stage.h b/src/mongo/db/exec/requires_index_stage.h
index 81d5649970e..374dc5f0615 100644
--- a/src/mongo/db/exec/requires_index_stage.h
+++ b/src/mongo/db/exec/requires_index_stage.h
@@ -48,7 +48,7 @@ namespace mongo {
class RequiresIndexStage : public RequiresCollectionStage {
public:
RequiresIndexStage(const char* stageType,
- OperationContext* opCtx,
+ ExpressionContext* expCtx,
const IndexDescriptor* indexDescriptor,
WorkingSet* workingSet);
diff --git a/src/mongo/db/exec/return_key.h b/src/mongo/db/exec/return_key.h
index 687b949d182..c9981dced1c 100644
--- a/src/mongo/db/exec/return_key.h
+++ b/src/mongo/db/exec/return_key.h
@@ -46,12 +46,12 @@ class ReturnKeyStage : public PlanStage {
public:
static constexpr StringData kStageName = "RETURN_KEY"_sd;
- ReturnKeyStage(OperationContext* opCtx,
+ ReturnKeyStage(ExpressionContext* expCtx,
std::vector<FieldPath> sortKeyMetaFields,
WorkingSet* ws,
SortKeyFormat sortKeyFormat,
std::unique_ptr<PlanStage> child)
- : PlanStage(opCtx, std::move(child), kStageName.rawData()),
+ : PlanStage(expCtx, std::move(child), kStageName.rawData()),
_ws(*ws),
_sortKeyMetaFields(std::move(sortKeyMetaFields)),
_sortKeyFormat(sortKeyFormat) {}
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 51c67dfe697..dad89927d60 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -50,11 +50,11 @@ using std::vector;
// static
const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
-ShardFilterStage::ShardFilterStage(OperationContext* opCtx,
+ShardFilterStage::ShardFilterStage(ExpressionContext* expCtx,
ScopedCollectionFilter collectionFilter,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _shardFilterer(std::move(collectionFilter)) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _shardFilterer(std::move(collectionFilter)) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index b902e3f54ee..5f0d8df7ea9 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -71,7 +71,7 @@ namespace mongo {
*/
class ShardFilterStage final : public PlanStage {
public:
- ShardFilterStage(OperationContext* opCtx,
+ ShardFilterStage(ExpressionContext* expCtx,
ScopedCollectionFilter collectionFilter,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index bc488c1b410..94bb81153e4 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -43,11 +43,11 @@ using std::vector;
// static
const char* SkipStage::kStageType = "SKIP";
-SkipStage::SkipStage(OperationContext* opCtx,
+SkipStage::SkipStage(ExpressionContext* expCtx,
long long toSkip,
WorkingSet* ws,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType, opCtx), _ws(ws), _toSkip(toSkip) {
+ : PlanStage(kStageType, expCtx), _ws(ws), _toSkip(toSkip) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 8751cb22471..24937662d02 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -44,7 +44,7 @@ namespace mongo {
*/
class SkipStage final : public PlanStage {
public:
- SkipStage(OperationContext* opCtx,
+ SkipStage(ExpressionContext* expCtx,
long long toSkip,
WorkingSet* ws,
std::unique_ptr<PlanStage> child);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 2fa2e6dfe3e..6b03db5b26f 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -40,7 +40,7 @@ SortStage::SortStage(boost::intrusive_ptr<ExpressionContext> expCtx,
SortPattern sortPattern,
bool addSortKeyMetadata,
std::unique_ptr<PlanStage> child)
- : PlanStage(kStageType.rawData(), expCtx->opCtx),
+ : PlanStage(kStageType.rawData(), expCtx.get()),
_ws(ws),
_sortKeyGen(sortPattern, expCtx->getCollator()),
_addSortKeyMetadata(addSortKeyMetadata) {
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 98e2774a747..d01d2fdcc63 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -48,13 +48,13 @@ namespace mongo {
const char* SortKeyGeneratorStage::kStageType = "SORT_KEY_GENERATOR";
-SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+SortKeyGeneratorStage::SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj)
- : PlanStage(kStageType, pExpCtx->opCtx),
+ : PlanStage(kStageType, expCtx.get()),
_ws(ws),
- _sortKeyGen({{sortSpecObj, pExpCtx}, pExpCtx->getCollator()}) {
+ _sortKeyGen({{sortSpecObj, expCtx}, expCtx->getCollator()}) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index 5732f2008f6..2679902dd2e 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -50,7 +50,7 @@ class WorkingSetMember;
*/
class SortKeyGeneratorStage final : public PlanStage {
public:
- SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+ SortKeyGeneratorStage(const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<PlanStage> child,
WorkingSet* ws,
const BSONObj& sortSpecObj);
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 455b038dc3a..6c9bc2d9379 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -49,6 +49,8 @@ using namespace mongo;
namespace {
+static const NamespaceString kNss("db.dummy");
+
class SortStageDefaultTest : public ServiceContextMongoDTest {
public:
static constexpr uint64_t kMaxMemoryUsageBytes = 1024u * 1024u;
@@ -59,7 +61,7 @@ public:
CollatorFactoryInterface::set(getServiceContext(), std::make_unique<CollatorFactoryMock>());
}
- OperationContext* getOpCtx() {
+ OperationContext* opCtx() {
return _opCtx.get();
}
@@ -82,8 +84,11 @@ public:
// so it's fine to declare
WorkingSet ws;
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx(), CollatorInterface::cloneCollator(collator), kNss);
+
// QueuedDataStage will be owned by SortStageDefault.
- auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
BSONObj inputObj = fromjson(inputStr);
BSONElement inputElt = inputObj.getField("input");
ASSERT(inputElt.isABSONObj());
@@ -103,10 +108,6 @@ public:
auto sortPattern = fromjson(patternStr);
- // Create an ExpressionContext for the SortKeyGeneratorStage.
- auto expCtx =
- make_intrusive<ExpressionContext>(getOpCtx(), collator, NamespaceString("foo"));
-
auto sortKeyGen = std::make_unique<SortKeyGeneratorStage>(
expCtx, std::move(queuedDataStage), &ws, sortPattern);
@@ -168,11 +169,10 @@ private:
TEST_F(SortStageDefaultTest, SortEmptyWorkingSet) {
WorkingSet ws;
- // Create an ExpressionContext for the SortKeyGeneratorStage.
- auto expCtx = make_intrusive<ExpressionContext>(getOpCtx(), nullptr, NamespaceString("foo"));
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx(), nullptr, kNss);
// QueuedDataStage will be owned by SortStageDefault.
- auto queuedDataStage = std::make_unique<QueuedDataStage>(getOpCtx(), &ws);
+ auto queuedDataStage = std::make_unique<QueuedDataStage>(expCtx.get(), &ws);
auto sortKeyGen =
std::make_unique<SortKeyGeneratorStage>(expCtx, std::move(queuedDataStage), &ws, BSONObj());
auto sortPattern = BSON("a" << 1);
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 34190125c51..7baabe4a011 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -148,6 +148,9 @@ public:
str::stream() << nss.toString() << " is not a valid namespace",
nss.isValid());
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
+
// Need a context to get the actual Collection*
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
@@ -172,12 +175,12 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
std::unique_ptr<PlanStage> userRoot{
- parseQuery(opCtx, collection, planObj, ws.get(), nss, &exprs)};
+ parseQuery(expCtx, collection, planObj, ws.get(), nss, &exprs)};
uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), nullptr != userRoot);
// Add a fetch at the top for the user so we can get obj back for sure.
- unique_ptr<PlanStage> rootFetch =
- std::make_unique<FetchStage>(opCtx, ws.get(), std::move(userRoot), nullptr, collection);
+ unique_ptr<PlanStage> rootFetch = std::make_unique<FetchStage>(
+ expCtx.get(), ws.get(), std::move(userRoot), nullptr, collection);
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(rootFetch), collection, PlanExecutor::YIELD_AUTO);
@@ -208,12 +211,14 @@ public:
return true;
}
- PlanStage* parseQuery(OperationContext* opCtx,
+ PlanStage* parseQuery(const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
BSONObj obj,
WorkingSet* workingSet,
const NamespaceString& nss,
std::vector<std::unique_ptr<MatchExpression>>* exprs) {
+ OperationContext* opCtx = expCtx->opCtx;
+
BSONElement firstElt = obj.firstElement();
if (!firstElt.isABSONObj()) {
return nullptr;
@@ -235,9 +240,6 @@ public:
}
BSONObj argObj = e.Obj();
if (filterTag == e.fieldName()) {
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx, collator, nss));
auto statusWithMatcher =
MatchExpressionParser::parse(argObj,
expCtx,
@@ -299,12 +301,12 @@ public:
params.direction = nodeArgs["direction"].numberInt();
params.shouldDedup = desc->isMultikey();
- return new IndexScan(opCtx, params, workingSet, matcher);
+ return new IndexScan(expCtx.get(), params, workingSet, matcher);
} else if ("andHash" == nodeName) {
uassert(
16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = std::make_unique<AndHashStage>(opCtx, workingSet);
+ auto andStage = std::make_unique<AndHashStage>(expCtx.get(), workingSet);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -313,7 +315,7 @@ public:
uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16923,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
@@ -328,7 +330,7 @@ public:
uassert(
16924, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- auto andStage = std::make_unique<AndSortedStage>(opCtx, workingSet);
+ auto andStage = std::make_unique<AndSortedStage>(expCtx.get(), workingSet);
int nodesAdded = 0;
BSONObjIterator it(nodeArgs["nodes"].Obj());
@@ -337,7 +339,7 @@ public:
uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16926,
"Can't parse sub-node of AND: " + e.Obj().toString(),
nullptr != subNode);
@@ -353,15 +355,15 @@ public:
16934, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
uassert(16935, "Dedup argument must be provided to OR", !nodeArgs["dedup"].eoo());
BSONObjIterator it(nodeArgs["nodes"].Obj());
- auto orStage =
- std::make_unique<OrStage>(opCtx, workingSet, nodeArgs["dedup"].Bool(), matcher);
+ auto orStage = std::make_unique<OrStage>(
+ expCtx.get(), workingSet, nodeArgs["dedup"].Bool(), matcher);
while (it.more()) {
BSONElement e = it.next();
if (!e.isABSONObj()) {
return nullptr;
}
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(
16936, "Can't parse sub-node of OR: " + e.Obj().toString(), nullptr != subNode);
orStage->addChild(std::move(subNode));
@@ -372,11 +374,12 @@ public:
uassert(
16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28731,
"Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
- return new FetchStage(opCtx, workingSet, std::move(subNode), matcher, collection);
+ return new FetchStage(
+ expCtx.get(), workingSet, std::move(subNode), matcher, collection);
} else if ("limit" == nodeName) {
uassert(16937,
"Limit stage doesn't have a filter (put it on the child)",
@@ -385,12 +388,12 @@ public:
16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj());
uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28732,
"Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
return new LimitStage(
- opCtx, nodeArgs["num"].numberInt(), workingSet, std ::move(subNode));
+ expCtx.get(), nodeArgs["num"].numberInt(), workingSet, std ::move(subNode));
} else if ("skip" == nodeName) {
uassert(16938,
"Skip stage doesn't have a filter (put it on the child)",
@@ -398,12 +401,12 @@ public:
uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj());
uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs)};
uassert(28733,
"Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
return new SkipStage(
- opCtx, nodeArgs["num"].numberInt(), workingSet, std::move(subNode));
+ expCtx.get(), nodeArgs["num"].numberInt(), workingSet, std::move(subNode));
} else if ("cscan" == nodeName) {
CollectionScanParams params;
@@ -417,7 +420,7 @@ public:
params.direction = CollectionScanParams::BACKWARD;
}
- return new CollectionScan(opCtx, collection, params, workingSet, matcher);
+ return new CollectionScan(expCtx.get(), collection, params, workingSet, matcher);
} else if ("mergeSort" == nodeName) {
uassert(
16971, "Nodes argument must be provided to sort", nodeArgs["nodes"].isABSONObj());
@@ -429,7 +432,7 @@ public:
params.pattern = nodeArgs["pattern"].Obj();
// Dedup is true by default.
- auto mergeStage = std::make_unique<MergeSortStage>(opCtx, params, workingSet);
+ auto mergeStage = std::make_unique<MergeSortStage>(expCtx.get(), params, workingSet);
BSONObjIterator it(nodeArgs["nodes"].Obj());
while (it.more()) {
@@ -437,7 +440,7 @@ public:
uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj());
std::unique_ptr<PlanStage> subNode{
- parseQuery(opCtx, collection, e.Obj(), workingSet, nss, exprs)};
+ parseQuery(expCtx, collection, e.Obj(), workingSet, nss, exprs)};
uassert(16974,
"Can't parse sub-node of mergeSort: " + e.Obj().toString(),
nullptr != subNode);
@@ -475,7 +478,7 @@ public:
return nullptr;
}
- return new TextStage(opCtx, params, workingSet, matcher);
+ return new TextStage(expCtx.get(), params, workingSet, matcher);
} else if ("delete" == nodeName) {
uassert(18636,
"Delete stage doesn't have a filter (put it on the child)",
@@ -486,13 +489,14 @@ public:
"isMulti argument must be provided to delete",
nodeArgs["isMulti"].type() == Bool);
PlanStage* subNode =
- parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs);
+ parseQuery(expCtx, collection, nodeArgs["node"].Obj(), workingSet, nss, exprs);
uassert(28734,
"Can't parse sub-node of DELETE: " + nodeArgs["node"].Obj().toString(),
nullptr != subNode);
auto params = std::make_unique<DeleteStageParams>();
params->isMulti = nodeArgs["isMulti"].Bool();
- return new DeleteStage(opCtx, std::move(params), workingSet, collection, subNode);
+ return new DeleteStage(
+ expCtx.get(), std::move(params), workingSet, collection, subNode);
} else {
return nullptr;
}
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 32c4cfa0bb8..054f48e3a2b 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -59,12 +59,12 @@ using std::vector;
const char* SubplanStage::kStageType = "SUBPLAN";
-SubplanStage::SubplanStage(OperationContext* opCtx,
+SubplanStage::SubplanStage(ExpressionContext* expCtx,
const Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
CanonicalQuery* cq)
- : RequiresAllIndicesStage(kStageType, opCtx, collection),
+ : RequiresAllIndicesStage(kStageType, expCtx, collection),
_ws(ws),
_plannerParams(params),
_query(cq) {
@@ -122,7 +122,7 @@ Status SubplanStage::planSubqueries() {
MatchExpression* orChild = _orExpression->getChild(i);
// Turn the i-th child into its own query.
- auto statusWithCQ = CanonicalQuery::canonicalize(getOpCtx(), *_query, orChild);
+ auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), *_query, orChild);
if (!statusWithCQ.isOK()) {
str::stream ss;
ss << "Can't canonicalize subchild " << orChild->debugString() << " "
@@ -263,7 +263,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// messages that can be generated if pickBestPlan yields.
invariant(_children.empty());
_children.emplace_back(
- std::make_unique<MultiPlanStage>(getOpCtx(),
+ std::make_unique<MultiPlanStage>(expCtx(),
collection(),
branchResult->canonicalQuery.get(),
MultiPlanStage::CachingMode::SometimesCache));
@@ -275,7 +275,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Dump all the solutions into the MPS.
for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
- auto nextPlanRoot = StageBuilder::build(getOpCtx(),
+ auto nextPlanRoot = StageBuilder::build(opCtx(),
collection(),
*branchResult->canonicalQuery,
*branchResult->solutions[ix],
@@ -362,8 +362,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// Use the index tags from planning each branch to construct the composite solution,
// and set that solution as our child stage.
_ws->clear();
- auto root =
- StageBuilder::build(getOpCtx(), collection(), *_query, *_compositeSolution.get(), _ws);
+ auto root = StageBuilder::build(opCtx(), collection(), *_query, *_compositeSolution.get(), _ws);
invariant(_children.empty());
_children.emplace_back(std::move(root));
@@ -385,7 +384,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
if (1 == solutions.size()) {
// Only one possible plan. Run it. Build the stages from the solution.
- auto root = StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[0], _ws);
+ auto root = StageBuilder::build(opCtx(), collection(), *_query, *solutions[0], _ws);
invariant(_children.empty());
_children.emplace_back(std::move(root));
@@ -398,7 +397,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
invariant(_children.empty());
- _children.emplace_back(new MultiPlanStage(getOpCtx(), collection(), _query));
+ _children.emplace_back(new MultiPlanStage(expCtx(), collection(), _query));
MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(child().get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
@@ -407,7 +406,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
}
auto nextPlanRoot =
- StageBuilder::build(getOpCtx(), collection(), *_query, *solutions[ix], _ws);
+ StageBuilder::build(opCtx(), collection(), *_query, *solutions[ix], _ws);
multiPlanStage->addPlan(std::move(solutions[ix]), std::move(nextPlanRoot), _ws);
}
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index f027d0412d3..07d8f956ca8 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -69,7 +69,7 @@ class OperationContext;
*/
class SubplanStage final : public RequiresAllIndicesStage {
public:
- SubplanStage(OperationContext* opCtx,
+ SubplanStage(ExpressionContext* expCtx,
const Collection* collection,
WorkingSet* ws,
const QueryPlannerParams& params,
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index 9efb4915c61..7b7e4598e43 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -56,12 +56,12 @@ using fts::MAX_WEIGHT;
const char* TextStage::kStageType = "TEXT";
-TextStage::TextStage(OperationContext* opCtx,
+TextStage::TextStage(ExpressionContext* expCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter)
- : PlanStage(kStageType, opCtx), _params(params) {
- _children.emplace_back(buildTextTree(opCtx, ws, filter, params.wantTextScore));
+ : PlanStage(kStageType, expCtx), _params(params) {
+ _children.emplace_back(buildTextTree(expCtx->opCtx, ws, filter, params.wantTextScore));
_specificStats.indexPrefix = _params.indexPrefix;
_specificStats.indexName = _params.index->indexName();
_specificStats.parsedTextQuery = _params.query.toBSON();
@@ -112,7 +112,7 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
ixparams.direction = -1;
ixparams.shouldDedup = _params.index->isMultikey();
- indexScanList.push_back(std::make_unique<IndexScan>(opCtx, ixparams, ws, nullptr));
+ indexScanList.push_back(std::make_unique<IndexScan>(expCtx(), ixparams, ws, nullptr));
}
// Build the union of the index scans as a TEXT_OR or an OR stage, depending on whether the
@@ -122,16 +122,16 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
// We use a TEXT_OR stage to get the union of the results from the index scans and then
// compute their text scores. This is a blocking operation.
auto textScorer =
- std::make_unique<TextOrStage>(opCtx, _params.spec, ws, filter, collection);
+ std::make_unique<TextOrStage>(expCtx(), _params.spec, ws, filter, collection);
textScorer->addChildren(std::move(indexScanList));
textMatchStage = std::make_unique<TextMatchStage>(
- opCtx, std::move(textScorer), _params.query, _params.spec, ws);
+ expCtx(), std::move(textScorer), _params.query, _params.spec, ws);
} else {
// Because we don't need the text score, we can use a non-blocking OR stage to get the union
// of the index scans.
- auto textSearcher = std::make_unique<OrStage>(opCtx, ws, true, filter);
+ auto textSearcher = std::make_unique<OrStage>(expCtx(), ws, true, filter);
textSearcher->addChildren(std::move(indexScanList));
@@ -140,10 +140,10 @@ unique_ptr<PlanStage> TextStage::buildTextTree(OperationContext* opCtx,
// WorkingSetMember inputs have fetched data.
const MatchExpression* emptyFilter = nullptr;
auto fetchStage = std::make_unique<FetchStage>(
- opCtx, ws, std::move(textSearcher), emptyFilter, collection);
+ expCtx(), ws, std::move(textSearcher), emptyFilter, collection);
textMatchStage = std::make_unique<TextMatchStage>(
- opCtx, std::move(fetchStage), _params.query, _params.spec, ws);
+ expCtx(), std::move(fetchStage), _params.query, _params.spec, ws);
}
return textMatchStage;
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index e96fbd5b602..1e601d769de 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -73,7 +73,7 @@ struct TextStageParams {
*/
class TextStage final : public PlanStage {
public:
- TextStage(OperationContext* opCtx,
+ TextStage(ExpressionContext* expCtx,
const TextStageParams& params,
WorkingSet* ws,
const MatchExpression* filter);
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index b405608c134..c0d15c0e2fb 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -45,12 +45,12 @@ using std::vector;
const char* TextMatchStage::kStageType = "TEXT_MATCH";
-TextMatchStage::TextMatchStage(OperationContext* opCtx,
+TextMatchStage::TextMatchStage(ExpressionContext* expCtx,
unique_ptr<PlanStage> child,
const FTSQueryImpl& query,
const FTSSpec& spec,
WorkingSet* ws)
- : PlanStage(kStageType, opCtx), _ftsMatcher(query, spec), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _ftsMatcher(query, spec), _ws(ws) {
_children.emplace_back(std::move(child));
}
diff --git a/src/mongo/db/exec/text_match.h b/src/mongo/db/exec/text_match.h
index 6155de1d48e..9ba74a5ac19 100644
--- a/src/mongo/db/exec/text_match.h
+++ b/src/mongo/db/exec/text_match.h
@@ -56,7 +56,7 @@ class RecordID;
*/
class TextMatchStage final : public PlanStage {
public:
- TextMatchStage(OperationContext* opCtx,
+ TextMatchStage(ExpressionContext* expCtx,
std::unique_ptr<PlanStage> child,
const FTSQueryImpl& query,
const FTSSpec& spec,
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 8202ad623c7..4f92025575a 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -52,12 +52,12 @@ using fts::FTSSpec;
const char* TextOrStage::kStageType = "TEXT_OR";
-TextOrStage::TextOrStage(OperationContext* opCtx,
+TextOrStage::TextOrStage(ExpressionContext* expCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
const Collection* collection)
- : RequiresCollectionStage(kStageType, opCtx, collection),
+ : RequiresCollectionStage(kStageType, expCtx, collection),
_ftsSpec(ftsSpec),
_ws(ws),
_scoreIterator(_scores.end()),
@@ -97,7 +97,7 @@ void TextOrStage::doDetachFromOperationContext() {
void TextOrStage::doReattachToOperationContext() {
if (_recordCursor)
- _recordCursor->reattachToOperationContext(getOpCtx());
+ _recordCursor->reattachToOperationContext(opCtx());
}
std::unique_ptr<PlanStageStats> TextOrStage::getStats() {
@@ -152,7 +152,7 @@ PlanStage::StageState TextOrStage::doWork(WorkingSetID* out) {
PlanStage::StageState TextOrStage::initStage(WorkingSetID* out) {
*out = WorkingSet::INVALID_ID;
try {
- _recordCursor = collection()->getCursor(getOpCtx());
+ _recordCursor = collection()->getCursor(opCtx());
_internalState = State::kReadingTerms;
return PlanStage::NEED_TIME;
} catch (const WriteConflictException&) {
@@ -268,8 +268,7 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out)
// Our parent expects RID_AND_OBJ members, so we fetch the document here if we haven't
// already.
try {
- if (!WorkingSetCommon::fetch(
- getOpCtx(), _ws, wsid, _recordCursor, collection()->ns())) {
+ if (!WorkingSetCommon::fetch(opCtx(), _ws, wsid, _recordCursor, collection()->ns())) {
_ws->free(wsid);
textRecordData->score = -1;
return NEED_TIME;
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 724ecba61a3..8b57b2f07e7 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -67,7 +67,7 @@ public:
kDone,
};
- TextOrStage(OperationContext* opCtx,
+ TextOrStage(ExpressionContext* expCtx,
const FTSSpec& ftsSpec,
WorkingSet* ws,
const MatchExpression* filter,
diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp
index a9e6c678000..908640e71ec 100644
--- a/src/mongo/db/exec/trial_stage.cpp
+++ b/src/mongo/db/exec/trial_stage.cpp
@@ -46,13 +46,13 @@ namespace mongo {
const char* TrialStage::kStageType = "TRIAL";
-TrialStage::TrialStage(OperationContext* opCtx,
+TrialStage::TrialStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> trialPlan,
std::unique_ptr<PlanStage> backupPlan,
size_t maxTrialWorks,
double minWorkAdvancedRatio)
- : PlanStage(kStageType, opCtx), _ws(ws) {
+ : PlanStage(kStageType, expCtx), _ws(ws) {
invariant(minWorkAdvancedRatio > 0);
invariant(minWorkAdvancedRatio <= 1);
invariant(maxTrialWorks > 0);
@@ -64,7 +64,7 @@ TrialStage::TrialStage(OperationContext* opCtx,
_backupPlan = std::move(backupPlan);
// We need to cache results during the trial phase in case it succeeds.
- _queuedData = std::make_unique<QueuedDataStage>(opCtx, _ws);
+ _queuedData = std::make_unique<QueuedDataStage>(expCtx, _ws);
// Set up stats tracking specific to this stage.
_specificStats.successThreshold = minWorkAdvancedRatio;
@@ -175,8 +175,7 @@ void TrialStage::_assessTrialAndBuildFinalPlan() {
// The trial plan succeeded, but we need to build a plan that includes the queued data. Create a
// final plan which UNIONs across the QueuedDataStage and the trial plan.
- std::unique_ptr<PlanStage> unionPlan =
- std::make_unique<OrStage>(getOpCtx(), _ws, false, nullptr);
+ std::unique_ptr<PlanStage> unionPlan = std::make_unique<OrStage>(expCtx(), _ws, false, nullptr);
static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_queuedData));
static_cast<OrStage*>(unionPlan.get())->addChild(std::move(_children.front()));
_replaceCurrentPlan(unionPlan);
@@ -208,19 +207,19 @@ void TrialStage::doDetachFromOperationContext() {
void TrialStage::doReattachToOperationContext() {
if (_backupPlan) {
- _backupPlan->reattachToOperationContext(getOpCtx());
+ _backupPlan->reattachToOperationContext(opCtx());
}
if (_queuedData) {
- _queuedData->reattachToOperationContext(getOpCtx());
+ _queuedData->reattachToOperationContext(opCtx());
}
}
void TrialStage::doDispose() {
if (_backupPlan) {
- _backupPlan->dispose(getOpCtx());
+ _backupPlan->dispose(opCtx());
}
if (_queuedData) {
- _queuedData->dispose(getOpCtx());
+ _queuedData->dispose(opCtx());
}
}
diff --git a/src/mongo/db/exec/trial_stage.h b/src/mongo/db/exec/trial_stage.h
index efa3c9c5fa5..9da1d04beab 100644
--- a/src/mongo/db/exec/trial_stage.h
+++ b/src/mongo/db/exec/trial_stage.h
@@ -58,7 +58,7 @@ public:
* Constructor. Both 'trialPlan' and 'backupPlan' must be non-nullptr; 'maxTrialEWorks' must be
* greater than 0, and 'minWorkAdvancedRatio' must be in the range (0,1].
*/
- TrialStage(OperationContext* opCtx,
+ TrialStage(ExpressionContext* expCtx,
WorkingSet* ws,
std::unique_ptr<PlanStage> trialPlan,
std::unique_ptr<PlanStage> backupPlan,
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 829ed4fd95f..6561aefea06 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -110,23 +110,23 @@ const char* UpdateStage::kStageType = "UPDATE";
const UpdateStats UpdateStage::kEmptyUpdateStats;
// Public constructor.
-UpdateStage::UpdateStage(OperationContext* opCtx,
+UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : UpdateStage(opCtx, params, ws, collection) {
+ : UpdateStage(expCtx, params, ws, collection) {
// We should never reach here if the request is an upsert.
invariant(!_params.request->isUpsert());
_children.emplace_back(child);
}
// Protected constructor.
-UpdateStage::UpdateStage(OperationContext* opCtx,
+UpdateStage::UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection)
- : RequiresMutableCollectionStage(kStageType, opCtx, collection),
+ : RequiresMutableCollectionStage(kStageType, expCtx, collection),
_params(params),
_ws(ws),
_doc(params.driver->getDocument()),
@@ -147,7 +147,7 @@ UpdateStage::UpdateStage(OperationContext* opCtx,
_shouldCheckForShardKeyUpdate =
!(request->isFromOplogApplication() || request->getNamespaceString().isConfigDB() ||
request->isFromMigration()) &&
- OperationShardingState::isOperationVersioned(opCtx);
+ OperationShardingState::isOperationVersioned(expCtx->opCtx);
_specificStats.isModUpdate = params.driver->type() == UpdateDriver::UpdateType::kOperator;
}
@@ -175,14 +175,14 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
bool docWasModified = false;
- auto* const css = CollectionShardingState::get(getOpCtx(), collection()->ns());
+ auto* const css = CollectionShardingState::get(opCtx(), collection()->ns());
auto metadata = css->getCurrentMetadata();
Status status = Status::OK();
- const bool validateForStorage = getOpCtx()->writesAreReplicated() && _enforceOkForStorage;
+ const bool validateForStorage = opCtx()->writesAreReplicated() && _enforceOkForStorage;
const bool isInsert = false;
FieldRefSet immutablePaths;
- if (getOpCtx()->writesAreReplicated() && !request->isFromMigration()) {
- if (metadata->isSharded() && !OperationShardingState::isOperationVersioned(getOpCtx())) {
+ if (opCtx()->writesAreReplicated() && !request->isFromMigration()) {
+ if (metadata->isSharded() && !OperationShardingState::isOperationVersioned(opCtx())) {
immutablePaths.fillFrom(metadata->getKeyPatternFields());
}
immutablePaths.keepShortest(&idFieldRef);
@@ -278,10 +278,10 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
}
- WriteUnitOfWork wunit(getOpCtx());
+ WriteUnitOfWork wunit(opCtx());
StatusWith<RecordData> newRecStatus = collection()->updateDocumentWithDamages(
- getOpCtx(), recordId, std::move(snap), source, _damages, &args);
- invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId());
+ opCtx(), recordId, std::move(snap), source, _damages, &args);
+ invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId());
wunit.commit();
newObj = uassertStatusOK(std::move(newRecStatus)).releaseToBson();
@@ -306,15 +306,15 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
}
}
- WriteUnitOfWork wunit(getOpCtx());
- newRecordId = collection()->updateDocument(getOpCtx(),
+ WriteUnitOfWork wunit(opCtx());
+ newRecordId = collection()->updateDocument(opCtx(),
recordId,
oldObj,
newObj,
driver->modsAffectIndices(),
_params.opDebug,
&args);
- invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId());
+ invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId());
wunit.commit();
}
}
@@ -498,7 +498,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
bool docStillMatches;
try {
docStillMatches = write_stage_common::ensureStillMatches(
- collection(), getOpCtx(), _ws, id, _params.canonicalQuery);
+ collection(), opCtx(), _ws, id, _params.canonicalQuery);
} catch (const WriteConflictException&) {
// There was a problem trying to detect if the document still exists, so retry.
memberFreer.dismiss();
@@ -544,8 +544,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// Set member's obj to be the doc we want to return.
if (_params.request->shouldReturnAnyDocs()) {
if (_params.request->shouldReturnNewDocs()) {
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
} else {
invariant(_params.request->shouldReturnOldDocs());
member->resetDocument(oldSnapshot, oldObj);
@@ -628,8 +627,8 @@ void UpdateStage::doRestoreStateRequiresCollection() {
const NamespaceString& nsString(request.getNamespaceString());
// We may have stepped down during the yield.
- bool userInitiatedWritesAndNotPrimary = getOpCtx()->writesAreReplicated() &&
- !repl::ReplicationCoordinator::get(getOpCtx())->canAcceptWritesFor(getOpCtx(), nsString);
+ bool userInitiatedWritesAndNotPrimary = opCtx()->writesAreReplicated() &&
+ !repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), nsString);
if (userInitiatedWritesAndNotPrimary) {
uasserted(ErrorCodes::PrimarySteppedDown,
@@ -639,7 +638,7 @@ void UpdateStage::doRestoreStateRequiresCollection() {
// The set of indices may have changed during yield. Make sure that the update driver has up to
// date index information.
- const auto& updateIndexData = CollectionQueryInfo::get(collection()).getIndexKeys(getOpCtx());
+ const auto& updateIndexData = CollectionQueryInfo::get(collection()).getIndexKeys(opCtx());
_params.driver->refreshIndexKeys(&updateIndexData);
}
@@ -731,12 +730,12 @@ bool UpdateStage::checkUpdateChangesShardKeyFields(ScopedCollectionMetadata meta
uassert(ErrorCodes::IllegalOperation,
"Must run update to shard key field in a multi-statement transaction or with "
"retryWrites: true.",
- getOpCtx()->getTxnNumber() || !getOpCtx()->writesAreReplicated());
+ opCtx()->getTxnNumber() || !opCtx()->writesAreReplicated());
if (!metadata->keyBelongsToMe(newShardKey)) {
if (MONGO_unlikely(hangBeforeThrowWouldChangeOwningShard.shouldFail())) {
LOGV2(20605, "Hit hangBeforeThrowWouldChangeOwningShard failpoint");
- hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(getOpCtx());
+ hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(opCtx());
}
uasserted(WouldChangeOwningShardInfo(oldObj.value(), newObj, false /* upsert */),
diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h
index cf167d603c0..a24cb2f50f9 100644
--- a/src/mongo/db/exec/update_stage.h
+++ b/src/mongo/db/exec/update_stage.h
@@ -81,7 +81,7 @@ class UpdateStage : public RequiresMutableCollectionStage {
UpdateStage& operator=(const UpdateStage&) = delete;
public:
- UpdateStage(OperationContext* opCtx,
+ UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
@@ -126,7 +126,7 @@ public:
const DuplicateKeyErrorInfo& errorInfo);
protected:
- UpdateStage(OperationContext* opCtx,
+ UpdateStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection);
diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp
index 387613df82f..c3613e5242b 100644
--- a/src/mongo/db/exec/upsert_stage.cpp
+++ b/src/mongo/db/exec/upsert_stage.cpp
@@ -73,12 +73,12 @@ void getShardKeyAndImmutablePaths(OperationContext* opCtx,
}
} // namespace
-UpsertStage::UpsertStage(OperationContext* opCtx,
+UpsertStage::UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
PlanStage* child)
- : UpdateStage(opCtx, params, ws, collection) {
+ : UpdateStage(expCtx, params, ws, collection) {
// We should never create this stage for a non-upsert request.
invariant(_params.request->isUpsert());
_children.emplace_back(child);
@@ -114,7 +114,7 @@ PlanStage::StageState UpsertStage::doWork(WorkingSetID* out) {
// Determine whether this is a user-initiated or internal request.
const bool isInternalRequest =
- !getOpCtx()->writesAreReplicated() || _params.request->isFromMigration();
+ !opCtx()->writesAreReplicated() || _params.request->isFromMigration();
// Generate the new document to be inserted.
_specificStats.objInserted = _produceNewDocumentForInsert(isInternalRequest);
@@ -132,7 +132,7 @@ PlanStage::StageState UpsertStage::doWork(WorkingSetID* out) {
BSONObj newObj = _specificStats.objInserted;
*out = _ws->allocate();
WorkingSetMember* member = _ws->get(*out);
- member->resetDocument(getOpCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
+ member->resetDocument(opCtx()->recoveryUnit()->getSnapshotId(), newObj.getOwned());
member->transitionToOwnedObj();
return PlanStage::ADVANCED;
}
@@ -147,7 +147,7 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
// 'q' field belong to this shard, but those in the 'u' field do not. In this case we need to
// throw so that MongoS can target the insert to the correct shard.
if (_shouldCheckForShardKeyUpdate) {
- auto* const css = CollectionShardingState::get(getOpCtx(), collection()->ns());
+ auto* const css = CollectionShardingState::get(opCtx(), collection()->ns());
const auto& metadata = css->getCurrentMetadata();
if (metadata->isSharded()) {
@@ -162,7 +162,7 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
"query, since its shard key belongs on a different shard. Cross-shard "
"upserts are only allowed when running in a transaction or with "
"retryWrites: true.",
- getOpCtx()->getTxnNumber());
+ opCtx()->getTxnNumber());
uasserted(WouldChangeOwningShardInfo(
_params.request->getQuery(), newDocument, true /* upsert */),
"The document we are inserting belongs on a different shard");
@@ -172,13 +172,13 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
if (MONGO_unlikely(hangBeforeUpsertPerformsInsert.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
- &hangBeforeUpsertPerformsInsert, getOpCtx(), "hangBeforeUpsertPerformsInsert");
+ &hangBeforeUpsertPerformsInsert, opCtx(), "hangBeforeUpsertPerformsInsert");
}
- writeConflictRetry(getOpCtx(), "upsert", collection()->ns().ns(), [&] {
- WriteUnitOfWork wunit(getOpCtx());
+ writeConflictRetry(opCtx(), "upsert", collection()->ns().ns(), [&] {
+ WriteUnitOfWork wunit(opCtx());
uassertStatusOK(
- collection()->insertDocument(getOpCtx(),
+ collection()->insertDocument(opCtx(),
InsertStatement(_params.request->getStmtId(), newDocument),
_params.opDebug,
_params.request->isFromMigration()));
@@ -192,13 +192,13 @@ void UpsertStage::_performInsert(BSONObj newDocument) {
BSONObj UpsertStage::_produceNewDocumentForInsert(bool isInternalRequest) {
// Obtain the sharding metadata. This will be needed to compute the shardKey paths. The metadata
// must remain in scope since it owns the pointers used by 'shardKeyPaths' and 'immutablePaths'.
- auto* css = CollectionShardingState::get(getOpCtx(), _params.request->getNamespaceString());
+ auto* css = CollectionShardingState::get(opCtx(), _params.request->getNamespaceString());
auto metadata = css->getCurrentMetadata();
// Compute the set of shard key paths and the set of immutable paths. Either may be empty.
FieldRefSet shardKeyPaths, immutablePaths;
getShardKeyAndImmutablePaths(
- getOpCtx(), metadata, isInternalRequest, &shardKeyPaths, &immutablePaths);
+ opCtx(), metadata, isInternalRequest, &shardKeyPaths, &immutablePaths);
// Reset the document into which we will be writing.
_doc.reset();
diff --git a/src/mongo/db/exec/upsert_stage.h b/src/mongo/db/exec/upsert_stage.h
index 092439bcf3d..8b149c26c7d 100644
--- a/src/mongo/db/exec/upsert_stage.h
+++ b/src/mongo/db/exec/upsert_stage.h
@@ -51,7 +51,7 @@ class UpsertStage final : public UpdateStage {
UpsertStage& operator=(const UpsertStage&) = delete;
public:
- UpsertStage(OperationContext* opCtx,
+ UpsertStage(ExpressionContext* expCtx,
const UpdateStageParams& params,
WorkingSet* ws,
Collection* collection,
diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp
index 5f4834bbdce..17c66fe7d59 100644
--- a/src/mongo/db/index/sort_key_generator_test.cpp
+++ b/src/mongo/db/index/sort_key_generator_test.cpp
@@ -46,7 +46,7 @@ namespace {
std::unique_ptr<SortKeyGenerator> makeSortKeyGen(const BSONObj& sortSpec,
const CollatorInterface* collator) {
boost::intrusive_ptr<ExpressionContext> pExpCtx(new ExpressionContextForTest());
- pExpCtx->setCollator(collator);
+ pExpCtx->setCollator(CollatorInterface::cloneCollator(collator));
SortPattern sortPattern{sortSpec, pExpCtx};
return std::make_unique<SortKeyGenerator>(std::move(sortPattern), collator);
}
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index 68075fbb396..49a72946651 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -52,9 +52,9 @@ class ParsedMatchExpression {
public:
ParsedMatchExpression(const std::string& str, const CollatorInterface* collator = nullptr)
: _obj(fromjson(str)) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
- StatusWithMatchExpression result = MatchExpressionParser::parse(_obj, std::move(expCtx));
+ _expCtx = make_intrusive<ExpressionContextForTest>();
+ _expCtx->setCollator(CollatorInterface::cloneCollator(collator));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(_obj, _expCtx);
ASSERT_OK(result.getStatus());
_expr = std::move(result.getValue());
}
@@ -66,6 +66,7 @@ public:
private:
const BSONObj _obj;
std::unique_ptr<MatchExpression> _expr;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
diff --git a/src/mongo/db/matcher/expression_expr.cpp b/src/mongo/db/matcher/expression_expr.cpp
index 138ce2d376a..01f3affa84c 100644
--- a/src/mongo/db/matcher/expression_expr.cpp
+++ b/src/mongo/db/matcher/expression_expr.cpp
@@ -92,8 +92,12 @@ bool ExprMatchExpression::equivalent(const MatchExpression* other) const {
}
void ExprMatchExpression::_doSetCollator(const CollatorInterface* collator) {
- _expCtx->setCollator(collator);
-
+ // This function is used to give match expression nodes which don't keep a pointer to the
+ // ExpressionContext access to the ExpressionContext's collator. Since the operation only ever
+ // has a single CollatorInterface, and since that collator is kept on the ExpressionContext,
+ // the collator pointer that we're propagating throughout the MatchExpression tree must match
+ // the one inside the ExpressionContext.
+ invariant(collator == _expCtx->getCollator());
if (_rewriteResult && _rewriteResult->matchExpression()) {
_rewriteResult->matchExpression()->setCollator(collator);
}
diff --git a/src/mongo/db/matcher/expression_expr_test.cpp b/src/mongo/db/matcher/expression_expr_test.cpp
index 87b9025510c..e9e2e6eab4b 100644
--- a/src/mongo/db/matcher/expression_expr_test.cpp
+++ b/src/mongo/db/matcher/expression_expr_test.cpp
@@ -59,8 +59,8 @@ public:
_matchExpression = MatchExpression::optimize(std::move(_matchExpression));
}
- void setCollator(CollatorInterface* collator) {
- _expCtx->setCollator(collator);
+ void setCollator(std::unique_ptr<CollatorInterface> collator) {
+ _expCtx->setCollator(std::move(collator));
if (_matchExpression) {
_matchExpression->setCollator(_expCtx->getCollator());
}
@@ -548,7 +548,7 @@ TEST_F(ExprMatchTest,
TEST_F(ExprMatchTest, InitialCollationUsedForComparisons) {
auto collator =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- setCollator(collator.get());
+ setCollator(std::move(collator));
createMatcher(fromjson("{$expr: {$eq: ['$x', 'abc']}}"));
ASSERT_TRUE(matches(BSON("x"
@@ -563,7 +563,7 @@ TEST_F(ExprMatchTest, SetCollatorChangesCollationUsedForComparisons) {
auto collator =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- setCollator(collator.get());
+ setCollator(std::move(collator));
ASSERT_TRUE(matches(BSON("x"
<< "AbC")));
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 8ead6ff5d2b..c519ee2b374 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -766,8 +766,9 @@ TEST(MatchExpressionParserArrayTest, AllStringNullCollation) {
TEST(MatchExpressionParserArrayTest, AllStringCollation) {
BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY("string")));
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_TRUE(result.isOK());
ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
@@ -775,6 +776,6 @@ TEST(MatchExpressionParserArrayTest, AllStringCollation) {
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::EQ, child->matchType());
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
- ASSERT_TRUE(eqMatch->getCollator() == &collator);
+ ASSERT_TRUE(eqMatch->getCollator() == expCtx->getCollator());
}
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index b14ba97988d..3c1ac33dc3e 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -61,14 +61,15 @@ TEST(MatchExpressionParserLeafTest, NullCollation) {
TEST(MatchExpressionParserLeafTest, Collation) {
BSONObj query = BSON("x"
<< "string");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleEQ2) {
@@ -104,14 +105,15 @@ TEST(MatchExpressionParserLeafTest, EQNullCollation) {
TEST(MatchExpressionParserLeafTest, EQCollation) {
BSONObj query = BSON("x" << BSON("$eq"
<< "string"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleGT1) {
@@ -139,14 +141,15 @@ TEST(MatchExpressionParserLeafTest, GTNullCollation) {
TEST(MatchExpressionParserLeafTest, GTCollation) {
BSONObj query = BSON("x" << BSON("$gt"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::GT, result.getValue()->matchType());
GTMatchExpression* match = static_cast<GTMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleLT1) {
@@ -175,14 +178,15 @@ TEST(MatchExpressionParserLeafTest, LTNullCollation) {
TEST(MatchExpressionParserLeafTest, LTCollation) {
BSONObj query = BSON("x" << BSON("$lt"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::LT, result.getValue()->matchType());
LTMatchExpression* match = static_cast<LTMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleGTE1) {
@@ -211,14 +215,15 @@ TEST(MatchExpressionParserLeafTest, GTENullCollation) {
TEST(MatchExpressionParserLeafTest, GTECollation) {
BSONObj query = BSON("x" << BSON("$gte"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::GTE, result.getValue()->matchType());
GTEMatchExpression* match = static_cast<GTEMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleLTE1) {
@@ -247,14 +252,15 @@ TEST(MatchExpressionParserLeafTest, LTENullCollation) {
TEST(MatchExpressionParserLeafTest, LTECollation) {
BSONObj query = BSON("x" << BSON("$lte"
<< "abc"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::LTE, result.getValue()->matchType());
LTEMatchExpression* match = static_cast<LTEMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleNE1) {
@@ -285,16 +291,17 @@ TEST(MatchExpressionParserLeafTest, NENullCollation) {
TEST(MatchExpressionParserLeafTest, NECollation) {
BSONObj query = BSON("x" << BSON("$ne"
<< "string"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::NOT, result.getValue()->matchType());
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::EQ, child->matchType());
EqualityMatchExpression* eqMatch = static_cast<EqualityMatchExpression*>(child);
- ASSERT_TRUE(eqMatch->getCollator() == &collator);
+ ASSERT_TRUE(eqMatch->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, SimpleModBad1) {
@@ -357,22 +364,24 @@ TEST(MatchExpressionParserLeafTest, IdCollation) {
TEST(MatchExpressionParserLeafTest, IdNullCollation) {
BSONObj query = BSON("$id"
<< "string");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
EqualityMatchExpression* match = static_cast<EqualityMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, RefCollation) {
BSONObj query = BSON("$ref"
<< "coll");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
@@ -383,9 +392,10 @@ TEST(MatchExpressionParserLeafTest, RefCollation) {
TEST(MatchExpressionParserLeafTest, DbCollation) {
BSONObj query = BSON("$db"
<< "db");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::EQ, result.getValue()->matchType());
@@ -416,14 +426,15 @@ TEST(MatchExpressionParserLeafTest, INNullCollation) {
TEST(MatchExpressionParserLeafTest, INCollation) {
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY("string")));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::MATCH_IN, result.getValue()->matchType());
InMatchExpression* match = static_cast<InMatchExpression*>(result.getValue().get());
- ASSERT_TRUE(match->getCollator() == &collator);
+ ASSERT_TRUE(match->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
@@ -760,16 +771,17 @@ TEST(MatchExpressionParserLeafTest, NINNullCollation) {
TEST(MatchExpressionParserLeafTest, NINCollation) {
BSONObj query = BSON("x" << BSON("$nin" << BSON_ARRAY("string")));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx);
ASSERT_OK(result.getStatus());
ASSERT_EQUALS(MatchExpression::NOT, result.getValue()->matchType());
MatchExpression* child = result.getValue()->getChild(0);
ASSERT_EQUALS(MatchExpression::MATCH_IN, child->matchType());
InMatchExpression* inMatch = static_cast<InMatchExpression*>(child);
- ASSERT_TRUE(inMatch->getCollator() == &collator);
+ ASSERT_TRUE(inMatch->getCollator() == expCtx->getCollator());
}
TEST(MatchExpressionParserLeafTest, Regex1) {
diff --git a/src/mongo/db/matcher/expression_with_placeholder_test.cpp b/src/mongo/db/matcher/expression_with_placeholder_test.cpp
index a56264ad1df..fcf6d86e5d7 100644
--- a/src/mongo/db/matcher/expression_with_placeholder_test.cpp
+++ b/src/mongo/db/matcher/expression_with_placeholder_test.cpp
@@ -88,9 +88,10 @@ TEST(ExpressionWithPlaceholderTest, ParseElemMatch) {
}
TEST(ExpressionWithPlaceholderTest, ParseCollation) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
auto rawFilter = fromjson("{i: 'abc'}");
auto parsedFilter = assertGet(MatchExpressionParser::parse(rawFilter, expCtx));
auto filter = assertGet(ExpressionWithPlaceholder::make(std::move(parsedFilter)));
diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
index 6c8dee32e26..f161bbd5eb2 100644
--- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
+++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp
@@ -176,9 +176,10 @@ TEST(InternalSchemaObjectMatchExpression, EquivalentReturnsCorrectResults) {
}
TEST(InternalSchemaObjectMatchExpression, SubExpressionRespectsCollator) {
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
auto query = fromjson(
"{a: {$_internalSchemaObjectMatch: {"
" b: {$eq: 'FOO'}"
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 85b06c4e7db..21a7a04ed19 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -56,11 +56,8 @@ long long deleteObjects(OperationContext* opCtx,
ParsedDelete parsedDelete(opCtx, &request);
uassertStatusOK(parsedDelete.parseRequest());
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &CurOp::get(opCtx)->debug(),
- collection,
- &parsedDelete,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */));
uassertStatusOK(exec->executePlan());
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index cc7039a10b9..be01c8d2863 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/ops/delete_request.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/assert_util.h"
@@ -59,6 +60,21 @@ Status ParsedDelete::parseRequest() {
// DeleteStage would not return the deleted document.
invariant(_request->getProj().isEmpty() || _request->shouldReturnDeleted());
+ std::unique_ptr<CollatorInterface> collator(nullptr);
+ if (!_request->getCollation().isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(_opCtx->getServiceContext())
+ ->makeFromBSON(_request->getCollation());
+
+ if (!statusWithCollator.isOK()) {
+ return statusWithCollator.getStatus();
+ }
+ collator = uassertStatusOK(std::move(statusWithCollator));
+ }
+ _expCtx = make_intrusive<ExpressionContext>(_opCtx,
+ std::move(collator),
+ _request->getNamespaceString(),
+ _request->getRuntimeConstants());
+
if (CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
return Status::OK();
}
@@ -94,11 +110,10 @@ Status ParsedDelete::parseQueryToCQ() {
qr->setRuntimeConstants(*runtimeConstants);
}
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(_opCtx,
std::move(qr),
- std::move(expCtx),
+ _expCtx,
extensionsCallback,
MatchExpressionParser::kAllowAllSpecialFeatures);
diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h
index 73f4bef19e4..09033065604 100644
--- a/src/mongo/db/ops/parsed_delete.h
+++ b/src/mongo/db/ops/parsed_delete.h
@@ -100,6 +100,21 @@ public:
*/
std::unique_ptr<CanonicalQuery> releaseParsedQuery();
+ /**
+ * This may return nullptr, specifically in cases where the query is IDHACK eligible.
+ */
+ const CanonicalQuery* parsedQuery() const {
+ return _canonicalQuery.get();
+ }
+
+ /**
+ * Always guaranteed to return a valid expression context.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtx() {
+ invariant(_expCtx.get());
+ return _expCtx;
+ }
+
private:
// Transactional context. Not owned by us.
OperationContext* _opCtx;
@@ -109,6 +124,8 @@ private:
// Parsed query object, or NULL if the query proves to be an id hack query.
std::unique_ptr<CanonicalQuery> _canonicalQuery;
+
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
};
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index 6ea2c5ddfa6..b495d48df9c 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -45,8 +45,9 @@ ParsedUpdate::ParsedUpdate(OperationContext* opCtx,
const ExtensionsCallback& extensionsCallback)
: _opCtx(opCtx),
_request(request),
- _driver(new ExpressionContext(
+ _expCtx(make_intrusive<ExpressionContext>(
opCtx, nullptr, _request->getNamespaceString(), _request->getRuntimeConstants())),
+ _driver(_expCtx),
_canonicalQuery(),
_extensionsCallback(extensionsCallback) {}
@@ -81,11 +82,11 @@ Status ParsedUpdate::parseRequest() {
if (!collator.isOK()) {
return collator.getStatus();
}
- _collator = std::move(collator.getValue());
+ _expCtx->setCollator(std::move(collator.getValue()));
}
- auto statusWithArrayFilters = parseArrayFilters(
- _request->getArrayFilters(), _opCtx, _collator.get(), _request->getNamespaceString());
+ auto statusWithArrayFilters =
+ parseArrayFilters(_expCtx, _request->getArrayFilters(), _request->getNamespaceString());
if (!statusWithArrayFilters.isOK()) {
return statusWithArrayFilters.getStatus();
}
@@ -147,9 +148,8 @@ Status ParsedUpdate::parseQueryToCQ() {
qr->setRuntimeConstants(*runtimeConstants);
}
- boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ = CanonicalQuery::canonicalize(
- _opCtx, std::move(qr), std::move(expCtx), _extensionsCallback, allowedMatcherFeatures);
+ _opCtx, std::move(qr), _expCtx, _extensionsCallback, allowedMatcherFeatures);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
}
@@ -165,7 +165,7 @@ Status ParsedUpdate::parseQueryToCQ() {
}
void ParsedUpdate::parseUpdate() {
- _driver.setCollator(_collator.get());
+ _driver.setCollator(_expCtx->getCollator());
_driver.setLogOp(true);
_driver.setFromOplogApplication(_request->isFromOplogApplication());
@@ -176,13 +176,11 @@ void ParsedUpdate::parseUpdate() {
}
StatusWith<std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>>>
-ParsedUpdate::parseArrayFilters(const std::vector<BSONObj>& rawArrayFiltersIn,
- OperationContext* opCtx,
- CollatorInterface* collator,
+ParsedUpdate::parseArrayFilters(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ const std::vector<BSONObj>& rawArrayFiltersIn,
const NamespaceString& nss) {
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFiltersOut;
for (auto rawArrayFilter : rawArrayFiltersIn) {
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
auto parsedArrayFilter =
MatchExpressionParser::parse(rawArrayFilter,
std::move(expCtx),
@@ -240,12 +238,18 @@ UpdateDriver* ParsedUpdate::getDriver() {
}
void ParsedUpdate::setCollator(std::unique_ptr<CollatorInterface> collator) {
- _collator = std::move(collator);
+ auto* rawCollator = collator.get();
- _driver.setCollator(_collator.get());
+ if (_canonicalQuery) {
+ _canonicalQuery->setCollator(std::move(collator));
+ } else {
+ _expCtx->setCollator(std::move(collator));
+ }
+
+ _driver.setCollator(rawCollator);
for (auto&& arrayFilter : _arrayFilters) {
- arrayFilter.second->getFilter()->setCollator(_collator.get());
+ arrayFilter.second->getFilter()->setCollator(rawCollator);
}
}
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index 06176336b17..e9be9312389 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -64,9 +64,8 @@ public:
* Parses the array filters portion of the update request.
*/
static StatusWith<std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>>>
- parseArrayFilters(const std::vector<BSONObj>& rawArrayFiltersIn,
- OperationContext* opCtx,
- CollatorInterface* collator,
+ parseArrayFilters(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ const std::vector<BSONObj>& rawArrayFiltersIn,
const NamespaceString& nss);
/**
@@ -128,13 +127,6 @@ public:
std::unique_ptr<CanonicalQuery> releaseParsedQuery();
/**
- * Get the collator of the parsed update.
- */
- const CollatorInterface* getCollator() const {
- return _collator.get();
- }
-
- /**
* Sets this ParsedUpdate's collator.
*
* This setter can be used to override the collator that was created from the update request
@@ -142,6 +134,13 @@ public:
*/
void setCollator(std::unique_ptr<CollatorInterface> collator);
+ /**
+ * Never returns nullptr.
+ */
+ boost::intrusive_ptr<ExpressionContext> expCtx() const {
+ return _expCtx;
+ }
+
private:
/**
* Parses the query portion of the update request.
@@ -159,12 +158,11 @@ private:
// Unowned pointer to the request object to process.
const UpdateRequest* const _request;
- // The collator for the parsed update. Owned here.
- std::unique_ptr<CollatorInterface> _collator;
-
// The array filters for the parsed update. Owned here.
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> _arrayFilters;
+ boost::intrusive_ptr<ExpressionContext> _expCtx;
+
// Driver for processing updates on matched documents.
UpdateDriver _driver;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 00b57ba533c..81683541275 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -97,8 +97,8 @@ UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest&
uassertStatusOK(parsedUpdate.parseRequest());
OpDebug* const nullOpDebug = nullptr;
- auto exec = uassertStatusOK(getExecutorUpdate(
- opCtx, nullOpDebug, collection, &parsedUpdate, boost::none /* verbosity */));
+ auto exec = uassertStatusOK(
+ getExecutorUpdate(nullOpDebug, collection, &parsedUpdate, boost::none /* verbosity */));
uassertStatusOK(exec->executePlan());
@@ -111,8 +111,8 @@ BSONObj applyUpdateOperators(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& from,
const BSONObj& operators) {
- const CollatorInterface* collator = nullptr;
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
+ auto expCtx =
+ make_intrusive<ExpressionContext>(opCtx, std::unique_ptr<CollatorInterface>(nullptr), nss);
UpdateDriver driver(std::move(expCtx));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
driver.parse(operators, arrayFilters);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 8560dff85af..149525ce366 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -669,11 +669,8 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
assertCanWrite_inlock(opCtx, ns);
- auto exec = uassertStatusOK(getExecutorUpdate(opCtx,
- &curOp.debug(),
- collection->getCollection(),
- &parsedUpdate,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorUpdate(
+ &curOp.debug(), collection->getCollection(), &parsedUpdate, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -912,11 +909,8 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangWithLockDuringBatchRemove, opCtx, "hangWithLockDuringBatchRemove");
- auto exec = uassertStatusOK(getExecutorDelete(opCtx,
- &curOp.debug(),
- collection.getCollection(),
- &parsedDelete,
- boost::none /* verbosity */));
+ auto exec = uassertStatusOK(getExecutorDelete(
+ &curOp.debug(), collection.getCollection(), &parsedDelete, boost::none /* verbosity */));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/pipeline/accumulator_test.cpp b/src/mongo/db/pipeline/accumulator_test.cpp
index 724e6a6838a..888887a2898 100644
--- a/src/mongo/db/pipeline/accumulator_test.cpp
+++ b/src/mongo/db/pipeline/accumulator_test.cpp
@@ -210,8 +210,9 @@ TEST(Accumulators, Min) {
TEST(Accumulators, MinRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorMin>(expCtx,
{{{Value("abc"_sd), Value("cba"_sd)}, Value("cba"_sd)}});
}
@@ -236,8 +237,9 @@ TEST(Accumulators, Max) {
TEST(Accumulators, MaxRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorMax>(expCtx,
{{{Value("abc"_sd), Value("cba"_sd)}, Value("abc"_sd)}});
}
@@ -333,8 +335,9 @@ TEST(Accumulators, Sum) {
TEST(Accumulators, AddToSetRespectsCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ expCtx->setCollator(std::move(collator));
assertExpectedResults<AccumulatorAddToSet>(expCtx,
{{{Value("a"_sd), Value("b"_sd), Value("c"_sd)},
Value(std::vector<Value>{Value("a"_sd)})}});
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 6710a1c78ce..3e04bb594bf 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -255,7 +255,7 @@ public:
GetNextResult getNext() {
pExpCtx->checkForInterrupt();
- if (MONGO_likely(!pExpCtx->explain)) {
+ if (MONGO_likely(!pExpCtx->shouldCollectExecStats())) {
return doGetNext();
}
diff --git a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
index eace0a24f3d..1ef3ac840f5 100644
--- a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
+++ b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
@@ -261,7 +261,7 @@ TEST_F(CheckResumeTokenTest, ShouldFailIfTokenHasWrongNamespace) {
TEST_F(CheckResumeTokenTest, ShouldSucceedWithBinaryCollation) {
CollatorInterfaceMock collatorCompareLower(CollatorInterfaceMock::MockType::kToLowerString);
- getExpCtx()->setCollator(&collatorCompareLower);
+ getExpCtx()->setCollator(collatorCompareLower.clone());
Timestamp resumeTimestamp(100, 1);
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index f135b105c35..4e203f16d38 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -103,10 +103,9 @@ ExpressionContext::ExpressionContext(
? TimeZoneDatabase::get(opCtx->getServiceContext())
: nullptr),
variablesParseState(variables.useIdGenerator()),
- _ownedCollator(std::move(collator)),
- _unownedCollator(_ownedCollator.get()),
- _documentComparator(_unownedCollator),
- _valueComparator(_unownedCollator),
+ _collator(std::move(collator)),
+ _documentComparator(_collator.get()),
+ _valueComparator(_collator.get()),
_resolvedNamespaces(std::move(resolvedNamespaces)) {
if (runtimeConstants) {
@@ -127,7 +126,7 @@ ExpressionContext::ExpressionContext(
}
ExpressionContext::ExpressionContext(OperationContext* opCtx,
- const CollatorInterface* collator,
+ std::unique_ptr<CollatorInterface> collator,
const NamespaceString& nss,
const boost::optional<RuntimeConstants>& runtimeConstants)
: ns(nss),
@@ -137,9 +136,9 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
? TimeZoneDatabase::get(opCtx->getServiceContext())
: nullptr),
variablesParseState(variables.useIdGenerator()),
- _unownedCollator(collator),
- _documentComparator(_unownedCollator),
- _valueComparator(_unownedCollator) {
+ _collator(std::move(collator)),
+ _documentComparator(_collator.get()),
+ _valueComparator(_collator.get()) {
if (runtimeConstants) {
variables.setRuntimeConstants(*runtimeConstants);
}
@@ -159,24 +158,12 @@ void ExpressionContext::checkForInterrupt() {
ExpressionContext::CollatorStash::CollatorStash(
const boost::intrusive_ptr<ExpressionContext>& expCtx,
std::unique_ptr<CollatorInterface> newCollator)
- : _expCtx(expCtx),
- _originalCollatorOwned(std::move(_expCtx->_ownedCollator)),
- _originalCollatorUnowned(_expCtx->_unownedCollator) {
+ : _expCtx(expCtx), _originalCollator(std::move(_expCtx->_collator)) {
_expCtx->setCollator(std::move(newCollator));
}
ExpressionContext::CollatorStash::~CollatorStash() {
- if (_originalCollatorOwned) {
- _expCtx->setCollator(std::move(_originalCollatorOwned));
- } else {
- _expCtx->setCollator(_originalCollatorUnowned);
- if (!_originalCollatorUnowned && _expCtx->_ownedCollator) {
- // If the original collation was 'nullptr', we cannot distinguish whether it was owned
- // or not. We always set '_ownedCollator' with the stash, so should reset it to null
- // here.
- _expCtx->_ownedCollator = nullptr;
- }
- }
+ _expCtx->setCollator(std::move(_originalCollator));
}
std::unique_ptr<ExpressionContext::CollatorStash> ExpressionContext::temporarilyChangeCollator(
@@ -185,14 +172,6 @@ std::unique_ptr<ExpressionContext::CollatorStash> ExpressionContext::temporarily
return std::unique_ptr<CollatorStash>(new CollatorStash(this, std::move(newCollator)));
}
-void ExpressionContext::setCollator(const CollatorInterface* collator) {
- _unownedCollator = collator;
-
- // Document/Value comparisons must be aware of the collation.
- _documentComparator = DocumentComparator(_unownedCollator);
- _valueComparator = ValueComparator(_unownedCollator);
-}
-
intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
NamespaceString ns,
boost::optional<UUID> uuid,
@@ -200,7 +179,7 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
auto collator = updatedCollator
? std::move(*updatedCollator)
- : (_ownedCollator ? _ownedCollator->clone() : std::unique_ptr<CollatorInterface>{});
+ : (_collator ? _collator->clone() : std::unique_ptr<CollatorInterface>{});
auto expCtx = make_intrusive<ExpressionContext>(opCtx,
explain,
@@ -223,16 +202,6 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
expCtx->useNewUpsert = useNewUpsert;
expCtx->jsHeapLimitMB = jsHeapLimitMB;
- // ExpressionContext is used both universally in Agg and in Find within a $expr. In the case
- // that this context is for use in $expr, the collator will be unowned and we will pass nullptr
- // in the constructor call above. If this is the case we must manually update the unowned
- // collator argument in the new ExpressionContext to match the old one. SERVER-31294 tracks an
- // effort to divorce the ExpressionContext from general Agg resources by creating an
- // AggregationContext. If that effort comes to fruition, this special-case collator handling
- // will be made unnecessary.
- if (!updatedCollator && !collator && _unownedCollator)
- expCtx->setCollator(_unownedCollator);
-
expCtx->variables = variables;
expCtx->variablesParseState = variablesParseState.copyWith(expCtx->variables.useIdGenerator());
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 26a1da21a70..0fba82ac610 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -95,8 +95,7 @@ public:
boost::intrusive_ptr<ExpressionContext> _expCtx;
- std::unique_ptr<CollatorInterface> _originalCollatorOwned;
- const CollatorInterface* _originalCollatorUnowned{nullptr};
+ std::unique_ptr<CollatorInterface> _originalCollator;
};
/**
@@ -136,7 +135,7 @@ public:
* If 'collator' is null, the simple collator will be used.
*/
ExpressionContext(OperationContext* opCtx,
- const CollatorInterface* collator,
+ std::unique_ptr<CollatorInterface> collator,
const NamespaceString& ns,
const boost::optional<RuntimeConstants>& runtimeConstants = boost::none);
@@ -168,7 +167,11 @@ public:
}
const CollatorInterface* getCollator() const {
- return _unownedCollator;
+ return _collator.get();
+ }
+
+ bool shouldCollectExecStats() const {
+ return static_cast<bool>(explain);
}
/**
@@ -181,10 +184,22 @@ public:
* the ExpressionContext.
*/
BSONObj getCollatorBSON() const {
- return _unownedCollator ? _unownedCollator->getSpec().toBSON() : CollationSpec::kSimpleSpec;
+ return _collator ? _collator->getSpec().toBSON() : CollationSpec::kSimpleSpec;
}
- void setCollator(const CollatorInterface* collator);
+ /**
+ * Sets '_collator' and resets 'documentComparator' and 'valueComparator'.
+ *
+ * Use with caution - '_collator' is used in the context of a Pipeline, and it is illegal
+ * to change the collation once a Pipeline has been parsed with this ExpressionContext.
+ */
+ void setCollator(std::unique_ptr<CollatorInterface> collator) {
+ _collator = std::move(collator);
+
+ // Document/Value comparisons must be aware of the collation.
+ _documentComparator = DocumentComparator(_collator.get());
+ _valueComparator = ValueComparator(_collator.get());
+ }
const DocumentComparator& getDocumentComparator() const {
return _documentComparator;
@@ -343,27 +358,10 @@ public:
protected:
static const int kInterruptCheckPeriod = 128;
- /**
- * Sets '_ownedCollator' and resets '_unownedCollator', 'documentComparator' and
- * 'valueComparator'.
- *
- * Use with caution - '_ownedCollator' is used in the context of a Pipeline, and it is illegal
- * to change the collation once a Pipeline has been parsed with this ExpressionContext.
- */
- void setCollator(std::unique_ptr<CollatorInterface> collator) {
- _ownedCollator = std::move(collator);
- setCollator(_ownedCollator.get());
- }
-
friend class CollatorStash;
- // Collator used for comparisons. This is owned in the context of a Pipeline.
- // TODO SERVER-31294: Move ownership of an aggregation's collator elsewhere.
- std::unique_ptr<CollatorInterface> _ownedCollator;
-
- // Collator used for comparisons. If '_ownedCollator' is non-null, then this must point to the
- // same collator object.
- const CollatorInterface* _unownedCollator = nullptr;
+ // Collator used for comparisons.
+ std::unique_ptr<CollatorInterface> _collator;
// Used for all comparisons of Document/Value during execution of the aggregation operation.
// Must not be changed after parsing a Pipeline with this ExpressionContext.
diff --git a/src/mongo/db/pipeline/expression_trim_test.cpp b/src/mongo/db/pipeline/expression_trim_test.cpp
index b21acb67ff4..a54ee025014 100644
--- a/src/mongo/db/pipeline/expression_trim_test.cpp
+++ b/src/mongo/db/pipeline/expression_trim_test.cpp
@@ -449,7 +449,7 @@ TEST(ExpressionTrimTest, TrimComparisonsShouldNotRespectCollation) {
intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
auto caseInsensitive =
std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
- expCtx->setCollator(caseInsensitive.get());
+ expCtx->setCollator(std::move(caseInsensitive));
auto trim = Expression::parseExpression(expCtx,
BSON("$trim" << BSON("input"
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index a93d355f549..0ce4c83f8eb 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -104,7 +104,12 @@ namespace {
* percentage of the collection.
*/
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorExecutor(
- Collection* coll, OperationContext* opCtx, long long sampleSize, long long numRecords) {
+ Collection* coll,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ long long sampleSize,
+ long long numRecords) {
+ OperationContext* opCtx = expCtx->opCtx;
+
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
@@ -123,7 +128,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = std::make_unique<MultiIteratorStage>(opCtx, ws.get(), coll);
+ std::unique_ptr<PlanStage> root =
+ std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
@@ -145,15 +151,15 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
const auto minWorkAdvancedRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
- auto randomCursorPlan =
- std::make_unique<ShardFilterStage>(opCtx, collectionFilter, ws.get(), std::move(root));
+ auto randomCursorPlan = std::make_unique<ShardFilterStage>(
+ expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
- opCtx, coll, CollectionScanParams{}, ws.get(), nullptr);
+ expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
- opCtx, collectionFilter, ws.get(), std::move(collScanPlan));
+ expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
- root = std::make_unique<TrialStage>(opCtx,
+ root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
@@ -229,11 +235,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
// 2) We not want a plan that will return separate values for each array element. For
// example, if we have a document {a: [1,2]} and group by "a" a DISTINCT_SCAN on an "a"
// index would produce one result for '1' and another for '2', which would be incorrect.
- auto distinctExecutor =
- getExecutorDistinct(expCtx->opCtx,
- collection,
- plannerOpts | QueryPlannerParams::STRICT_DISTINCT_ONLY,
- &parsedDistinct);
+ auto distinctExecutor = getExecutorDistinct(
+ collection, plannerOpts | QueryPlannerParams::STRICT_DISTINCT_ONLY, &parsedDistinct);
if (!distinctExecutor.isOK()) {
return distinctExecutor.getStatus().withContext(
"Unable to use distinct scan to optimize $group stage");
@@ -319,7 +322,7 @@ PipelineD::buildInnerQueryExecutor(Collection* collection,
const long long sampleSize = sampleStage->getSampleSize();
const long long numRecords = collection->getRecordStore()->numRecords(expCtx->opCtx);
auto exec = uassertStatusOK(
- createRandomCursorExecutor(collection, expCtx->opCtx, sampleSize, numRecords));
+ createRandomCursorExecutor(collection, expCtx, sampleSize, numRecords));
if (exec) {
// For sharded collections, the root of the plan tree is a TrialStage that may have
// chosen either a random-sampling cursor trial plan or a COLLSCAN backup plan. We
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 5d977f17737..fdb8cc496ec 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -152,8 +152,8 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// Make MatchExpression.
boost::intrusive_ptr<ExpressionContext> newExpCtx;
if (!expCtx.get()) {
- newExpCtx.reset(
- new ExpressionContext(opCtx, collator.get(), qr->nss(), qr->getRuntimeConstants()));
+ newExpCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::move(collator), qr->nss(), qr->getRuntimeConstants());
} else {
newExpCtx = expCtx;
invariant(CollatorInterface::collatorsMatch(collator.get(), expCtx->getCollator()));
@@ -175,7 +175,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::move(qr),
parsingCanProduceNoopMatchNodes(extensionsCallback, allowedFeatures),
std::move(me),
- std::move(collator),
projectionPolicies);
if (!initStatus.isOK()) {
@@ -200,11 +199,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
return qrStatus;
}
- std::unique_ptr<CollatorInterface> collator;
- if (baseQuery.getCollator()) {
- collator = baseQuery.getCollator()->clone();
- }
-
// Make the CQ we'll hopefully return.
std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
Status initStatus = cq->init(opCtx,
@@ -212,7 +206,6 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::move(qr),
baseQuery.canHaveNoopMatchNodes(),
root->shallowClone(),
- std::move(collator),
ProjectionPolicies::findProjectionPolicies());
if (!initStatus.isOK()) {
@@ -226,11 +219,9 @@ Status CanonicalQuery::init(OperationContext* opCtx,
std::unique_ptr<QueryRequest> qr,
bool canHaveNoopMatchNodes,
std::unique_ptr<MatchExpression> root,
- std::unique_ptr<CollatorInterface> collator,
const ProjectionPolicies& projectionPolicies) {
_expCtx = expCtx;
_qr = std::move(qr);
- _collator = std::move(collator);
_canHaveNoopMatchNodes = canHaveNoopMatchNodes;
@@ -305,15 +296,13 @@ void CanonicalQuery::initSortPattern(QueryMetadataBitSet unavailableMetadata) {
}
void CanonicalQuery::setCollator(std::unique_ptr<CollatorInterface> collator) {
- _collator = std::move(collator);
+ auto collatorRaw = collator.get();
+ // We must give the ExpressionContext the same collator.
+ _expCtx->setCollator(std::move(collator));
// The collator associated with the match expression tree is now invalid, since we have reset
- // the object owned by '_collator'. We must associate the match expression tree with the new
- // value of '_collator'.
- _root->setCollator(_collator.get());
-
- // In a similar vein, we must give the ExpressionContext the same collator.
- _expCtx->setCollator(_collator.get());
+ // the collator owned by the ExpressionContext.
+ _root->setCollator(collatorRaw);
}
// static
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 2598ac3157c..e1261805e20 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -154,7 +154,7 @@ public:
}
const CollatorInterface* getCollator() const {
- return _collator.get();
+ return _expCtx->getCollator();
}
/**
@@ -227,7 +227,6 @@ private:
std::unique_ptr<QueryRequest> qr,
bool canHaveNoopMatchNodes,
std::unique_ptr<MatchExpression> root,
- std::unique_ptr<CollatorInterface> collator,
const ProjectionPolicies& projectionPolicies);
// Initializes '_sortPattern', adding any metadata dependencies implied by the sort.
@@ -250,8 +249,6 @@ private:
// Keeps track of what metadata has been explicitly requested.
QueryMetadataBitSet _metadataDeps;
- std::unique_ptr<CollatorInterface> _collator;
-
bool _canHaveNoopMatchNodes = false;
};
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index cba75f67e00..6388f5986b0 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -97,6 +97,19 @@ using std::string;
using std::unique_ptr;
using std::vector;
+boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
+ OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss) {
+ invariant(opCtx);
+
+ auto expCtx = make_intrusive<ExpressionContext>(opCtx, nullptr, nss);
+ if (!requestCollation.isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(expCtx->opCtx->getServiceContext())
+ ->makeFromBSON(requestCollation);
+ expCtx->setCollator(uassertStatusOK(std::move(statusWithCollator)));
+ }
+ return expCtx;
+}
+
// static
void filterAllowedIndexEntries(const AllowedIndicesFilter& allowedIndicesFilter,
std::vector<IndexEntry>* indexEntries) {
@@ -368,7 +381,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"Collection {ns} does not exist. Using EOF plan: {canonicalQuery_Short}",
"ns"_attr = ns,
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
- root = std::make_unique<EOFStage>(opCtx);
+ root = std::make_unique<EOFStage>(canonicalQuery->getExpCtx().get());
return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root));
}
@@ -377,10 +390,10 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
plannerParams.options = plannerOptions;
fillOutPlannerParams(opCtx, collection, canonicalQuery.get(), &plannerParams);
- // If the canonical query does not have a user-specified collation, set it from the collection
- // default.
+ // If the canonical query does not have a user-specified collation and no one has given the
+ // CanonicalQuery a collation already, set it from the collection default.
if (canonicalQuery->getQueryRequest().getCollation().isEmpty() &&
- collection->getDefaultCollator()) {
+ canonicalQuery->getCollator() == nullptr && collection->getDefaultCollator()) {
canonicalQuery->setCollator(collection->getDefaultCollator()->clone());
}
@@ -393,12 +406,13 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"Using idhack: {canonicalQuery_Short}",
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
- root = std::make_unique<IDHackStage>(opCtx, canonicalQuery.get(), ws, descriptor);
+ root = std::make_unique<IDHackStage>(
+ canonicalQuery->getExpCtx().get(), canonicalQuery.get(), ws, descriptor);
// Might have to filter out orphaned docs.
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
root = std::make_unique<ShardFilterStage>(
- opCtx,
+ canonicalQuery->getExpCtx().get(),
CollectionShardingState::get(opCtx, canonicalQuery->nss())
->getOwnershipFilter(opCtx),
ws,
@@ -410,7 +424,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// Add a SortKeyGeneratorStage if the query requested sortKey metadata.
if (canonicalQuery->metadataDeps()[DocumentMetadataFields::kSortKey]) {
root = std::make_unique<SortKeyGeneratorStage>(
- canonicalQuery->getExpCtx(),
+ canonicalQuery->getExpCtx().get(),
std::move(root),
ws,
canonicalQuery->getQueryRequest().getSort());
@@ -422,7 +436,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
// the exception the $meta sortKey projection, which can be used along with the
// returnKey.
root = std::make_unique<ReturnKeyStage>(
- opCtx,
+ canonicalQuery->getExpCtx().get(),
cqProjection
? QueryPlannerCommon::extractSortKeyMetaFieldsFromProjection(*cqProjection)
: std::vector<FieldPath>{},
@@ -443,7 +457,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
std::move(root));
} else {
root = std::make_unique<ProjectionStageSimple>(
- canonicalQuery->getExpCtx(),
+ canonicalQuery->getExpCtx().get(),
canonicalQuery->getQueryRequest().getProj(),
canonicalQuery->getProj(),
ws,
@@ -498,13 +512,14 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
//
// 'decisionWorks' is used to determine whether the existing cache entry should
// be evicted, and the query replanned.
- auto cachedPlanStage = std::make_unique<CachedPlanStage>(opCtx,
- collection,
- ws,
- canonicalQuery.get(),
- plannerParams,
- cs->decisionWorks,
- std::move(root));
+ auto cachedPlanStage =
+ std::make_unique<CachedPlanStage>(canonicalQuery->getExpCtx().get(),
+ collection,
+ ws,
+ canonicalQuery.get(),
+ plannerParams,
+ cs->decisionWorks,
+ std::move(root));
return PrepareExecutionResult(std::move(canonicalQuery),
std::move(querySolution),
std::move(cachedPlanStage));
@@ -520,7 +535,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
"canonicalQuery_Short"_attr = redact(canonicalQuery->toStringShort()));
root = std::make_unique<SubplanStage>(
- opCtx, collection, ws, plannerParams, canonicalQuery.get());
+ canonicalQuery->getExpCtx().get(), collection, ws, plannerParams, canonicalQuery.get());
return PrepareExecutionResult(std::move(canonicalQuery), nullptr, std::move(root));
}
@@ -572,8 +587,8 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
} else {
// Many solutions. Create a MultiPlanStage to pick the best, update the cache,
// and so on. The working set will be shared by all candidate plans.
- auto multiPlanStage =
- std::make_unique<MultiPlanStage>(opCtx, collection, canonicalQuery.get());
+ auto multiPlanStage = std::make_unique<MultiPlanStage>(
+ canonicalQuery->getExpCtx().get(), collection, canonicalQuery.get());
for (size_t ix = 0; ix < solutions.size(); ++ix) {
if (solutions[ix]->cacheData.get()) {
@@ -715,11 +730,12 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* opCtx,
//
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete,
boost::optional<ExplainOptions::Verbosity> verbosity) {
+ auto expCtx = parsedDelete->expCtx();
+ OperationContext* opCtx = expCtx->opCtx;
const DeleteRequest* request = parsedDelete->getRequest();
const NamespaceString& nss(request->getNamespaceString());
@@ -763,7 +779,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
"nss_ns"_attr = nss.ns(),
"request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
- opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
+ opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss);
}
if (!parsedDelete->hasParsedQuery()) {
@@ -794,9 +810,13 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
"unparsedQuery"_attr = redact(unparsedQuery));
auto idHackStage = std::make_unique<IDHackStage>(
- opCtx, unparsedQuery["_id"].wrap(), ws.get(), descriptor);
- unique_ptr<DeleteStage> root = std::make_unique<DeleteStage>(
- opCtx, std::move(deleteStageParams), ws.get(), collection, idHackStage.release());
+ expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), descriptor);
+ unique_ptr<DeleteStage> root =
+ std::make_unique<DeleteStage>(expCtx.get(),
+ std::move(deleteStageParams),
+ ws.get(),
+ collection,
+ idHackStage.release());
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, policy);
}
@@ -831,7 +851,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
invariant(root);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(deleteStageParams), ws.get(), collection, root.release());
+ cq->getExpCtx().get(), std::move(deleteStageParams), ws.get(), collection, root.release());
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnDeleted());
@@ -861,11 +881,13 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
//
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate,
boost::optional<ExplainOptions::Verbosity> verbosity) {
+ auto expCtx = parsedUpdate->expCtx();
+ OperationContext* opCtx = expCtx->opCtx;
+
const UpdateRequest* request = parsedUpdate->getRequest();
UpdateDriver* driver = parsedUpdate->getDriver();
@@ -918,7 +940,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
"nss_ns"_attr = nss.ns(),
"request_getQuery"_attr = redact(request->getQuery()));
return PlanExecutor::make(
- opCtx, std::move(ws), std::make_unique<EOFStage>(opCtx), nullptr, policy, nss);
+ opCtx, std::move(ws), std::make_unique<EOFStage>(expCtx.get()), nullptr, policy, nss);
}
// Pass index information to the update driver, so that it can determine for us whether the
@@ -937,7 +959,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx);
const bool hasCollectionDefaultCollation = CollatorInterface::collatorsMatch(
- parsedUpdate->getCollator(), collection->getDefaultCollator());
+ expCtx->getCollator(), collection->getDefaultCollator());
if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
request->getProj().isEmpty() && hasCollectionDefaultCollation) {
@@ -988,10 +1010,12 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
updateStageParams.canonicalQuery = cq.get();
const bool isUpsert = updateStageParams.request->isUpsert();
- root = (isUpsert ? std::make_unique<UpsertStage>(
- opCtx, updateStageParams, ws.get(), collection, root.release())
- : std::make_unique<UpdateStage>(
- opCtx, updateStageParams, ws.get(), collection, root.release()));
+ root =
+ (isUpsert
+ ? std::make_unique<UpsertStage>(
+ cq->getExpCtx().get(), updateStageParams, ws.get(), collection, root.release())
+ : std::make_unique<UpdateStage>(
+ cq->getExpCtx().get(), updateStageParams, ws.get(), collection, root.release()));
if (!request->getProj().isEmpty()) {
invariant(request->shouldReturnAnyDocs());
@@ -1157,11 +1181,12 @@ bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
} // namespace
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
- OperationContext* opCtx,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
const CountCommand& request,
bool explain,
const NamespaceString& nss) {
+ OperationContext* opCtx = expCtx->opCtx;
unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
auto qr = std::make_unique<QueryRequest>(nss);
@@ -1171,7 +1196,6 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
qr->setHint(request.getHint());
qr->setExplain(explain);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ = CanonicalQuery::canonicalize(
opCtx,
std::move(qr),
@@ -1197,7 +1221,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
// machinery always assumes that the root stage for a count operation is a CountStage, so in
// this case we put a CountStage on top of an EOFStage.
unique_ptr<PlanStage> root = std::make_unique<CountStage>(
- opCtx, collection, limit, skip, ws.get(), new EOFStage(opCtx));
+ expCtx.get(), collection, limit, skip, ws.get(), new EOFStage(expCtx.get()));
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), nullptr, yieldPolicy, nss);
}
@@ -1212,7 +1236,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
if (useRecordStoreCount) {
unique_ptr<PlanStage> root =
- std::make_unique<RecordStoreFastCountStage>(opCtx, collection, skip, limit);
+ std::make_unique<RecordStoreFastCountStage>(expCtx.get(), collection, skip, limit);
return PlanExecutor::make(opCtx, std::move(ws), std::move(root), nullptr, yieldPolicy, nss);
}
@@ -1233,7 +1257,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
invariant(root);
// Make a CountStage to be the new root.
- root = std::make_unique<CountStage>(opCtx, collection, limit, skip, ws.get(), root.release());
+ root = std::make_unique<CountStage>(
+ expCtx.get(), collection, limit, skip, ws.get(), root.release());
// We must have a tree of stages in order to have a valid plan executor, but the query
// solution may be NULL. Takes ownership of all args other than 'collection' and 'opCtx'
return PlanExecutor::make(std::move(cq),
@@ -1641,10 +1666,9 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorWithoutPr
} // namespace
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- OperationContext* opCtx,
- Collection* collection,
- size_t plannerOptions,
- ParsedDistinct* parsedDistinct) {
+ Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct) {
+ auto expCtx = parsedDistinct->getQuery()->getExpCtx();
+ OperationContext* opCtx = expCtx->opCtx;
const auto yieldPolicy = opCtx->inMultiDocumentTransaction() ? PlanExecutor::INTERRUPT_ONLY
: PlanExecutor::YIELD_AUTO;
@@ -1652,7 +1676,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
// Treat collections that do not exist as empty collections.
return PlanExecutor::make(parsedDistinct->releaseQuery(),
std::make_unique<WorkingSet>(),
- std::make_unique<EOFStage>(opCtx),
+ std::make_unique<EOFStage>(expCtx.get()),
collection,
yieldPolicy);
}
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 99e1657c6a7..4390557ef05 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -49,6 +49,17 @@ class Collection;
class CountRequest;
/**
+ * Make an ExpressionContext to be used for non-aggregate commands. The result of this can be passed
+ * into any of the getExecutor* functions.
+ *
+ * Note that the getExecutor* functions may change the collation on the returned ExpressionContext
+ * if the collection has a default collation and no collation was specifically requested
+ * ('requestCollation' is empty).
+ */
+boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
+ OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss);
+
+/**
* Filter indexes retrieved from index catalog by
* allowed indices in query settings.
* Used by getExecutor().
@@ -192,10 +203,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln,
* distinct.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct(
- OperationContext* opCtx,
- Collection* collection,
- size_t plannerOptions,
- ParsedDistinct* parsedDistinct);
+ Collection* collection, size_t plannerOptions, ParsedDistinct* parsedDistinct);
/*
* Get a PlanExecutor for a query executing as part of a count command.
@@ -205,7 +213,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDist
* executing a count.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
- OperationContext* opCtx,
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
Collection* collection,
const CountCommand& request,
bool explain,
@@ -231,7 +239,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCoun
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDelete(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedDelete* parsedDelete,
@@ -258,7 +265,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDele
* If the query cannot be executed, returns a Status indicating why.
*/
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorUpdate(
- OperationContext* opCtx,
OpDebug* opDebug,
Collection* collection,
ParsedUpdate* parsedUpdate,
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index a9bc1b5021a..238f6636e0f 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -55,8 +55,11 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
const Direction direction) {
std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>();
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), NamespaceString(ns));
+
if (nullptr == collection) {
- auto eof = std::make_unique<EOFStage>(opCtx);
+ auto eof = std::make_unique<EOFStage>(expCtx.get());
// Takes ownership of 'ws' and 'eof'.
auto statusWithPlanExecutor = PlanExecutor::make(
opCtx, std::move(ws), std::move(eof), nullptr, yieldPolicy, NamespaceString(ns));
@@ -66,7 +69,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection
invariant(ns == collection->ns().ns());
- auto cs = _collectionScan(opCtx, ws.get(), collection, direction);
+ auto cs = _collectionScan(expCtx, ws.get(), collection, direction);
// Takes ownership of 'ws' and 'cs'.
auto statusWithPlanExecutor =
@@ -84,10 +87,13 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- auto root = _collectionScan(opCtx, ws.get(), collection, direction);
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ auto root = _collectionScan(expCtx, ws.get(), collection, direction);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), collection, root.release());
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -108,7 +114,10 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::indexScan(
int options) {
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(opCtx,
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ std::unique_ptr<PlanStage> root = _indexScan(expCtx,
ws.get(),
collection,
descriptor,
@@ -137,7 +146,10 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- std::unique_ptr<PlanStage> root = _indexScan(opCtx,
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ std::unique_ptr<PlanStage> root = _indexScan(expCtx,
ws.get(),
collection,
descriptor,
@@ -148,7 +160,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::deleteWith
InternalPlanner::IXSCAN_FETCH);
root = std::make_unique<DeleteStage>(
- opCtx, std::move(params), ws.get(), collection, root.release());
+ expCtx.get(), std::move(params), ws.get(), collection, root.release());
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -166,13 +178,16 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith
invariant(collection);
auto ws = std::make_unique<WorkingSet>();
- auto idHackStage = std::make_unique<IDHackStage>(opCtx, key, ws.get(), descriptor);
+ auto expCtx = make_intrusive<ExpressionContext>(
+ opCtx, std::unique_ptr<CollatorInterface>(nullptr), collection->ns());
+
+ auto idHackStage = std::make_unique<IDHackStage>(expCtx.get(), key, ws.get(), descriptor);
const bool isUpsert = params.request->isUpsert();
auto root = (isUpsert ? std::make_unique<UpsertStage>(
- opCtx, params, ws.get(), collection, idHackStage.release())
+ expCtx.get(), params, ws.get(), collection, idHackStage.release())
: std::make_unique<UpdateStage>(
- opCtx, params, ws.get(), collection, idHackStage.release()));
+ expCtx.get(), params, ws.get(), collection, idHackStage.release()));
auto executor =
PlanExecutor::make(opCtx, std::move(ws), std::move(root), collection, yieldPolicy);
@@ -180,14 +195,16 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::updateWith
return std::move(executor.getValue());
}
-std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- Direction direction) {
+std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ Direction direction) {
invariant(collection);
CollectionScanParams params;
- params.shouldWaitForOplogVisibility = shouldWaitForOplogVisibility(opCtx, collection, false);
+ params.shouldWaitForOplogVisibility =
+ shouldWaitForOplogVisibility(expCtx->opCtx, collection, false);
if (FORWARD == direction) {
params.direction = CollectionScanParams::FORWARD;
@@ -195,22 +212,23 @@ std::unique_ptr<PlanStage> InternalPlanner::_collectionScan(OperationContext* op
params.direction = CollectionScanParams::BACKWARD;
}
- return std::make_unique<CollectionScan>(opCtx, collection, params, ws, nullptr);
+ return std::make_unique<CollectionScan>(expCtx.get(), collection, params, ws, nullptr);
}
-std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- BoundInclusion boundInclusion,
- Direction direction,
- int options) {
+std::unique_ptr<PlanStage> InternalPlanner::_indexScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ BoundInclusion boundInclusion,
+ Direction direction,
+ int options) {
invariant(collection);
invariant(descriptor);
- IndexScanParams params(opCtx, descriptor);
+ IndexScanParams params(expCtx->opCtx, descriptor);
params.direction = direction;
params.bounds.isSimpleRange = true;
params.bounds.startKey = startKey;
@@ -219,10 +237,10 @@ std::unique_ptr<PlanStage> InternalPlanner::_indexScan(OperationContext* opCtx,
params.shouldDedup = descriptor->isMultikey();
std::unique_ptr<PlanStage> root =
- std::make_unique<IndexScan>(opCtx, std::move(params), ws, nullptr);
+ std::make_unique<IndexScan>(expCtx.get(), std::move(params), ws, nullptr);
if (InternalPlanner::IXSCAN_FETCH & options) {
- root = std::make_unique<FetchStage>(opCtx, ws, std::move(root), nullptr, collection);
+ root = std::make_unique<FetchStage>(expCtx.get(), ws, std::move(root), nullptr, collection);
}
return root;
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index a846a55b60b..228f6f4fd97 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -130,25 +130,27 @@ private:
*
* Used as a helper for collectionScan() and deleteWithCollectionScan().
*/
- static std::unique_ptr<PlanStage> _collectionScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- Direction direction);
+ static std::unique_ptr<PlanStage> _collectionScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ Direction direction);
/**
* Returns a plan stage that is either an index scan or an index scan with a fetch stage.
*
* Used as a helper for indexScan() and deleteWithIndexScan().
*/
- static std::unique_ptr<PlanStage> _indexScan(OperationContext* opCtx,
- WorkingSet* ws,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey,
- const BSONObj& endKey,
- BoundInclusion boundInclusion,
- Direction direction = FORWARD,
- int options = IXSCAN_DEFAULT);
+ static std::unique_ptr<PlanStage> _indexScan(
+ const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ WorkingSet* ws,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ BoundInclusion boundInclusion,
+ Direction direction = FORWARD,
+ int options = IXSCAN_DEFAULT);
};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index 471e57bfafb..2748bfd85ac 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -41,10 +41,19 @@
namespace mongo {
namespace {
-std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj,
- const CollatorInterface* collator = nullptr) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
+/**
+ * Produce a MatchExpression from BSON.
+ *
+ * If the caller would like the MatchExpression to have a collation associated with it, they may
+ * pass in an ExpressionContext owning the collation. Otherwise the caller may pass nullptr and a
+ * default-constructed ExpressionContextForTest will be used.
+ */
+std::unique_ptr<MatchExpression> parseMatchExpression(
+ const BSONObj& obj, boost::intrusive_ptr<ExpressionContext> expCtx = nullptr) {
+ if (!expCtx) {
+ expCtx = make_intrusive<ExpressionContextForTest>();
+ }
+
StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse query: " << obj.toString()
@@ -400,6 +409,9 @@ TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollations
entry.collator = &collator;
state.updateDiscriminators({entry});
+ boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
+ expCtx->setCollator(collator.clone());
+
auto discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
ASSERT(discriminators.find("a_1") != discriminators.end());
@@ -409,14 +421,13 @@ TEST(PlanCacheIndexabilityTest, DiscriminatorForCollationIndicatesWhenCollations
// Index collator matches query collator.
ASSERT_EQ(true,
disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: 'abc'}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: 'abc'}"), expCtx).get()));
ASSERT_EQ(true,
disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), &collator).get()));
- ASSERT_EQ(
- true,
- disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: {$_internalExprEq: 'abc'}}}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: {$in: ['abc', 'xyz']}}"), expCtx).get()));
+ ASSERT_EQ(true,
+ disc.isMatchCompatibleWithIndex(
+ parseMatchExpression(fromjson("{a: {$_internalExprEq: 'abc'}}}"), expCtx).get()));
// Expression is not a ComparisonMatchExpression, InternalExprEqMatchExpression or
// InMatchExpression.
@@ -547,6 +558,10 @@ TEST(PlanCacheIndexabilityTest, WildcardWithCollationDiscriminator) {
auto entryProjExecPair = makeWildcardEntry(BSON("a.$**" << 1));
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
entryProjExecPair.first.collator = &collator;
+
+ boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
+ expCtx->setCollator(collator.clone());
+
state.updateDiscriminators({entryProjExecPair.first});
const auto unindexedPathDiscriminators = state.buildWildcardDiscriminators("notIndexed");
@@ -563,7 +578,7 @@ TEST(PlanCacheIndexabilityTest, WildcardWithCollationDiscriminator) {
parseMatchExpression(fromjson("{a: \"hello world\"}"), nullptr).get()));
// Match expression which uses the same collation as the index is.
ASSERT_TRUE(disc.isMatchCompatibleWithIndex(
- parseMatchExpression(fromjson("{a: \"hello world\"}"), &collator).get()));
+ parseMatchExpression(fromjson("{a: \"hello world\"}"), expCtx).get()));
}
TEST(PlanCacheIndexabilityTest, WildcardPartialIndexDiscriminator) {
diff --git a/src/mongo/db/query/projection_test.cpp b/src/mongo/db/query/projection_test.cpp
index b52d27bfb80..9050416deef 100644
--- a/src/mongo/db/query/projection_test.cpp
+++ b/src/mongo/db/query/projection_test.cpp
@@ -55,9 +55,8 @@ projection_ast::Projection createProjection(const BSONObj& query,
ProjectionPolicies policies = {}) {
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT_OK(statusWithMatcher.getStatus());
@@ -85,9 +84,8 @@ void assertInvalidProjection(const char* queryStr, const char* projStr) {
BSONObj projObj = fromjson(projStr);
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, kTestNss));
+ new ExpressionContext(opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), kTestNss));
StatusWithMatchExpression statusWithMatcher =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT_OK(statusWithMatcher.getStatus());
diff --git a/src/mongo/db/query/query_planner_partialidx_test.cpp b/src/mongo/db/query/query_planner_partialidx_test.cpp
index 124b75c7518..ecb574fa728 100644
--- a/src/mongo/db/query/query_planner_partialidx_test.cpp
+++ b/src/mongo/db/query/query_planner_partialidx_test.cpp
@@ -36,6 +36,8 @@
namespace mongo {
namespace {
+const static NamespaceString kNs("db.dummyNs");
+
TEST_F(QueryPlannerTest, PartialIndexEq) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 0}}"));
@@ -448,9 +450,14 @@ TEST_F(QueryPlannerTest, PartialIndexNor) {
TEST_F(QueryPlannerTest, PartialIndexStringComparisonMatchingCollators) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 'cba'}}"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
- std::unique_ptr<MatchExpression> filterExpr = parseMatchExpression(filterObj, &collator);
- addIndex(fromjson("{a: 1}"), filterExpr.get(), &collator);
+
+ auto expCtxForPartialFilter = make_intrusive<ExpressionContext>(
+ opCtx.get(),
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ kNs);
+ std::unique_ptr<MatchExpression> filterExpr =
+ parseMatchExpression(filterObj, expCtxForPartialFilter);
+ addIndex(fromjson("{a: 1}"), filterExpr.get(), expCtxForPartialFilter->getCollator());
runQueryAsCommand(
fromjson("{find: 'testns', filter: {a: 'abc'}, collation: {locale: 'reverse'}}"));
@@ -468,9 +475,14 @@ TEST_F(QueryPlannerTest, PartialIndexStringComparisonMatchingCollators) {
TEST_F(QueryPlannerTest, PartialIndexNoStringComparisonNonMatchingCollators) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
BSONObj filterObj(fromjson("{a: {$gt: 0}}"));
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
- std::unique_ptr<MatchExpression> filterExpr = parseMatchExpression(filterObj, &collator);
- addIndex(fromjson("{a: 1}"), filterExpr.get(), &collator);
+
+ auto expCtxForPartialFilter = make_intrusive<ExpressionContext>(
+ opCtx.get(),
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ kNs);
+ std::unique_ptr<MatchExpression> filterExpr =
+ parseMatchExpression(filterObj, expCtxForPartialFilter);
+ addIndex(fromjson("{a: 1}"), filterExpr.get(), expCtxForPartialFilter->getCollator());
runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, collation: {locale: 'reverse'}}"));
assertNumSolutions(1U);
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 0ea76a16783..805fccdbc1f 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -53,6 +53,8 @@ const NamespaceString QueryPlannerTest::nss("test.collection");
void QueryPlannerTest::setUp() {
opCtx = serviceContext.makeOperationContext();
+ expCtx = make_intrusive<ExpressionContext>(
+ opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), nss);
internalQueryPlannerEnableHashIntersection.store(true);
params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
addIndex(BSON("_id" << 1));
@@ -62,6 +64,7 @@ void QueryPlannerTest::clearState() {
plannerStatus = Status::OK();
solns.clear();
cq.reset();
+ expCtx.reset();
relaxBoundsCheck = false;
}
@@ -327,7 +330,6 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
qr->setHint(hint);
qr->setMin(minObj);
qr->setMax(maxObj);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -408,7 +410,6 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
qr->setHint(hint);
qr->setMin(minObj);
qr->setMax(maxObj);
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -432,7 +433,6 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
std::unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -456,7 +456,6 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
std::unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(qr),
@@ -550,10 +549,13 @@ void QueryPlannerTest::assertHasOnlyCollscan() const {
}
std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(
- const BSONObj& obj, const CollatorInterface* collator) {
- boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(collator);
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj, std::move(expCtx));
+ const BSONObj& obj, const boost::intrusive_ptr<ExpressionContext>& optionalExpCtx) {
+ auto expCtx = optionalExpCtx;
+ if (!expCtx.get()) {
+ expCtx = make_intrusive<ExpressionContextForTest>();
+ }
+
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj, expCtx);
if (!status.isOK()) {
FAIL(str::stream() << "failed to parse query: " << obj.toString()
<< ". Reason: " << status.getStatus().toString());
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 4743f505fa2..d133a83002c 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -222,9 +222,16 @@ protected:
/**
* Helper function to parse a MatchExpression.
+ *
+ * If the caller wants a collator to be used with the match expression, pass an expression
+ * context owning that collator as the second argument. The expression context passed must
+ * outlive the returned match expression.
+ *
+ * If no ExpressionContext is passed a default-constructed ExpressionContextForTest will be
+ * used.
*/
- static std::unique_ptr<MatchExpression> parseMatchExpression(
- const BSONObj& obj, const CollatorInterface* collator = nullptr);
+ std::unique_ptr<MatchExpression> parseMatchExpression(
+ const BSONObj& obj, const boost::intrusive_ptr<ExpressionContext>& expCtx = nullptr);
//
// Data members.
@@ -234,6 +241,8 @@ protected:
QueryTestServiceContext serviceContext;
ServiceContext::UniqueOperationContext opCtx;
+ boost::intrusive_ptr<ExpressionContext> expCtx;
+
BSONObj queryObj;
std::unique_ptr<CanonicalQuery> cq;
QueryPlannerParams params;
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index dc1ee631607..2abe9e00aa0 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -76,9 +76,8 @@ bool filterMatches(const BSONObj& testFilter,
}
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(testCollator.get());
- StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(testFilter, std::move(expCtx));
+ expCtx->setCollator(std::move(testCollator));
+ StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(testFilter, expCtx);
if (!statusWithMatcher.isOK()) {
return false;
}
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index 142ed16fd2a..fb79d1e69b6 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -717,9 +717,8 @@ TEST(QuerySolutionTest, IndexScanNodeHasFieldExcludesSimpleBoundsStringFieldWhen
auto createMatchExprAndProjection(const BSONObj& query, const BSONObj& projObj) {
QueryTestServiceContext serviceCtx;
auto opCtx = serviceCtx.makeOperationContext();
- const CollatorInterface* collator = nullptr;
- const boost::intrusive_ptr<ExpressionContext> expCtx(
- new ExpressionContext(opCtx.get(), collator, NamespaceString("test.dummy")));
+ const boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(
+ opCtx.get(), std::unique_ptr<CollatorInterface>(nullptr), NamespaceString("test.dummy")));
StatusWithMatchExpression queryMatchExpr =
MatchExpressionParser::parse(query, std::move(expCtx));
ASSERT(queryMatchExpr.isOK());
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 907f8018f27..01005defd04 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -74,6 +74,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
const QuerySolution& qsol,
const QuerySolutionNode* root,
WorkingSet* ws) {
+ auto* const expCtx = cq.getExpCtx().get();
switch (root->getType()) {
case STAGE_COLLSCAN: {
const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
@@ -89,7 +90,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.resumeAfterRecordId = csn->resumeAfterRecordId;
params.stopApplyingFilterAfterFirstMatch = csn->stopApplyingFilterAfterFirstMatch;
return std::make_unique<CollectionScan>(
- opCtx, collection, params, ws, csn->filter.get());
+ expCtx, collection, params, ws, csn->filter.get());
}
case STAGE_IXSCAN: {
const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
@@ -113,13 +114,13 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.direction = ixn->direction;
params.addKeyMetadata = ixn->addKeyMetadata;
params.shouldDedup = ixn->shouldDedup;
- return std::make_unique<IndexScan>(opCtx, std::move(params), ws, ixn->filter.get());
+ return std::make_unique<IndexScan>(expCtx, std::move(params), ws, ixn->filter.get());
}
case STAGE_FETCH: {
const FetchNode* fn = static_cast<const FetchNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, fn->children[0], ws);
return std::make_unique<FetchStage>(
- opCtx, ws, std::move(childStage), fn->filter.get(), collection);
+ expCtx, ws, std::move(childStage), fn->filter.get(), collection);
}
case STAGE_SORT_DEFAULT: {
auto snDefault = static_cast<const SortNodeDefault*>(root);
@@ -155,7 +156,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
auto returnKeyNode = static_cast<const ReturnKeyNode*>(root);
auto childStage =
buildStages(opCtx, collection, cq, qsol, returnKeyNode->children[0], ws);
- return std::make_unique<ReturnKeyStage>(opCtx,
+ return std::make_unique<ReturnKeyStage>(expCtx,
std::move(returnKeyNode->sortKeyMetaFields),
ws,
cq.getExpCtx()->sortKeyFormat,
@@ -173,7 +174,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_PROJECTION_COVERED: {
auto pn = static_cast<const ProjectionNodeCovered*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
- return std::make_unique<ProjectionStageCovered>(cq.getExpCtx(),
+ return std::make_unique<ProjectionStageCovered>(cq.getExpCtx().get(),
cq.getQueryRequest().getProj(),
cq.getProj(),
ws,
@@ -183,7 +184,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_PROJECTION_SIMPLE: {
auto pn = static_cast<const ProjectionNodeSimple*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, pn->children[0], ws);
- return std::make_unique<ProjectionStageSimple>(cq.getExpCtx(),
+ return std::make_unique<ProjectionStageSimple>(cq.getExpCtx().get(),
cq.getQueryRequest().getProj(),
cq.getProj(),
ws,
@@ -192,16 +193,16 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
case STAGE_LIMIT: {
const LimitNode* ln = static_cast<const LimitNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, ln->children[0], ws);
- return std::make_unique<LimitStage>(opCtx, ln->limit, ws, std::move(childStage));
+ return std::make_unique<LimitStage>(expCtx, ln->limit, ws, std::move(childStage));
}
case STAGE_SKIP: {
const SkipNode* sn = static_cast<const SkipNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, sn->children[0], ws);
- return std::make_unique<SkipStage>(opCtx, sn->skip, ws, std::move(childStage));
+ return std::make_unique<SkipStage>(expCtx, sn->skip, ws, std::move(childStage));
}
case STAGE_AND_HASH: {
const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- auto ret = std::make_unique<AndHashStage>(opCtx, ws);
+ auto ret = std::make_unique<AndHashStage>(expCtx, ws);
for (size_t i = 0; i < ahn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, ahn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -210,7 +211,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
}
case STAGE_OR: {
const OrNode* orn = static_cast<const OrNode*>(root);
- auto ret = std::make_unique<OrStage>(opCtx, ws, orn->dedup, orn->filter.get());
+ auto ret = std::make_unique<OrStage>(expCtx, ws, orn->dedup, orn->filter.get());
for (size_t i = 0; i < orn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, orn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -219,7 +220,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
}
case STAGE_AND_SORTED: {
const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- auto ret = std::make_unique<AndSortedStage>(opCtx, ws);
+ auto ret = std::make_unique<AndSortedStage>(expCtx, ws);
for (size_t i = 0; i < asn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, asn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -232,7 +233,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.dedup = msn->dedup;
params.pattern = msn->sort;
params.collator = cq.getCollator();
- auto ret = std::make_unique<MergeSortStage>(opCtx, params, ws);
+ auto ret = std::make_unique<MergeSortStage>(expCtx, params, ws);
for (size_t i = 0; i < msn->children.size(); ++i) {
auto childStage = buildStages(opCtx, collection, cq, qsol, msn->children[i], ws);
ret->addChild(std::move(childStage));
@@ -254,7 +255,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
opCtx, node->index.identifier.catalogName);
invariant(twoDIndex);
- return std::make_unique<GeoNear2DStage>(params, opCtx, ws, twoDIndex);
+ return std::make_unique<GeoNear2DStage>(params, expCtx, ws, twoDIndex);
}
case STAGE_GEO_NEAR_2DSPHERE: {
const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
@@ -271,7 +272,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
opCtx, node->index.identifier.catalogName);
invariant(s2Index);
- return std::make_unique<GeoNear2DSphereStage>(params, opCtx, ws, s2Index);
+ return std::make_unique<GeoNear2DSphereStage>(params, expCtx, ws, s2Index);
}
case STAGE_TEXT: {
const TextNode* node = static_cast<const TextNode*>(root);
@@ -291,7 +292,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
// created by planning a query that contains "no-op" expressions.
params.query = static_cast<FTSQueryImpl&>(*node->ftsQuery);
params.wantTextScore = cq.metadataDeps()[DocumentMetadataFields::kTextScore];
- return std::make_unique<TextStage>(opCtx, params, ws, node->filter.get());
+ return std::make_unique<TextStage>(expCtx, params, ws, node->filter.get());
}
case STAGE_SHARDING_FILTER: {
const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
@@ -299,7 +300,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
auto css = CollectionShardingState::get(opCtx, collection->ns());
return std::make_unique<ShardFilterStage>(
- opCtx, css->getOwnershipFilter(opCtx), ws, std::move(childStage));
+ expCtx, css->getOwnershipFilter(opCtx), ws, std::move(childStage));
}
case STAGE_DISTINCT_SCAN: {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
@@ -320,7 +321,7 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.scanDirection = dn->direction;
params.bounds = dn->bounds;
params.fieldNo = dn->fieldNo;
- return std::make_unique<DistinctScan>(opCtx, std::move(params), ws);
+ return std::make_unique<DistinctScan>(expCtx, std::move(params), ws);
}
case STAGE_COUNT_SCAN: {
const CountScanNode* csn = static_cast<const CountScanNode*>(root);
@@ -342,13 +343,13 @@ std::unique_ptr<PlanStage> buildStages(OperationContext* opCtx,
params.startKeyInclusive = csn->startKeyInclusive;
params.endKey = csn->endKey;
params.endKeyInclusive = csn->endKeyInclusive;
- return std::make_unique<CountScan>(opCtx, std::move(params), ws);
+ return std::make_unique<CountScan>(expCtx, std::move(params), ws);
}
case STAGE_ENSURE_SORTED: {
const EnsureSortedNode* esn = static_cast<const EnsureSortedNode*>(root);
auto childStage = buildStages(opCtx, collection, cq, qsol, esn->children[0], ws);
return std::make_unique<EnsureSortedStage>(
- opCtx, esn->pattern, ws, std::move(childStage));
+ expCtx, esn->pattern, ws, std::move(childStage));
}
case STAGE_CACHED_PLAN:
case STAGE_CHANGE_STREAM_PROXY:
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 96582c70720..20129013287 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -321,7 +321,8 @@ Status _checkPrecondition(OperationContext* opCtx,
// applyOps does not allow any extensions, such as $text, $where, $geoNear, $near,
// $nearSphere, or $expr.
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator, nss));
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ new ExpressionContext(opCtx, CollatorInterface::cloneCollator(collator), nss));
Matcher matcher(preCondition["res"].Obj(), std::move(expCtx));
if (!matcher.matches(realres)) {
result->append("got", realres);
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index f62634da128..8344fc7d500 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -875,7 +875,7 @@ Status _updateWithQuery(OperationContext* opCtx,
}
auto planExecutorResult = mongo::getExecutorUpdate(
- opCtx, nullptr, collection, &parsedUpdate, boost::none /* verbosity */);
+ nullptr, collection, &parsedUpdate, boost::none /* verbosity */);
if (!planExecutorResult.isOK()) {
return planExecutorResult.getStatus();
}
@@ -1004,7 +1004,7 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx,
auto collection = collectionResult.getValue();
auto planExecutorResult = mongo::getExecutorDelete(
- opCtx, nullptr, collection, &parsedDelete, boost::none /* verbosity */);
+ nullptr, collection, &parsedDelete, boost::none /* verbosity */);
if (!planExecutorResult.isOK()) {
return planExecutorResult.getStatus();
}
diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp
index 426d84aa0ba..9c3cac056be 100644
--- a/src/mongo/db/update/addtoset_node_test.cpp
+++ b/src/mongo/db/update/addtoset_node_test.cpp
@@ -321,9 +321,10 @@ TEST_F(AddToSetNodeTest, ApplyCreateEmptyArrayIsNotNoop) {
TEST_F(AddToSetNodeTest, ApplyDeduplicationOfElementsToAddRespectsCollation) {
auto update = fromjson("{$addToSet: {a: {$each: ['abc', 'ABC', 'def', 'abc']}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
@@ -341,9 +342,10 @@ TEST_F(AddToSetNodeTest, ApplyDeduplicationOfElementsToAddRespectsCollation) {
TEST_F(AddToSetNodeTest, ApplyComparisonToExistingElementsRespectsCollation) {
auto update = fromjson("{$addToSet: {a: {$each: ['abc', 'def']}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
@@ -385,12 +387,13 @@ DEATH_TEST_REGEX(AddToSetNodeTest,
CannotSetCollatorIfCollatorIsNonNull,
"Invariant failure.*!_collator") {
auto update = fromjson("{$addToSet: {a: 1}}");
- CollatorInterfaceMock caseInsensitiveCollator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto caseInsensitiveCollator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&caseInsensitiveCollator);
+ expCtx->setCollator(std::move(caseInsensitiveCollator));
AddToSetNode node;
ASSERT_OK(node.init(update["$addToSet"]["a"], expCtx));
- node.setCollator(&caseInsensitiveCollator);
+ node.setCollator(expCtx->getCollator());
}
DEATH_TEST_REGEX(AddToSetNodeTest, CannotSetCollatorTwice, "Invariant failure.*!_collator") {
diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp
index 698842855f0..246b933f7f7 100644
--- a/src/mongo/db/update/compare_node_test.cpp
+++ b/src/mongo/db/update/compare_node_test.cpp
@@ -296,9 +296,10 @@ TEST_F(CompareNodeTest, ApplyExistingEmbeddedDocMaxNumber) {
TEST_F(CompareNodeTest, ApplyMinRespectsCollation) {
auto update = fromjson("{$min: {a: 'dba'}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
+ expCtx->setCollator(std::move(collator));
CompareNode node(CompareNode::CompareMode::kMin);
ASSERT_OK(node.init(update["$min"]["a"], expCtx));
@@ -359,12 +360,14 @@ DEATH_TEST_REGEX(CompareNodeTest,
CannotSetCollatorIfCollatorIsNonNull,
"Invariant failure.*!_collator") {
auto update = fromjson("{$max: {a: 1}}");
- CollatorInterfaceMock caseInsensitiveCollator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
+
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&caseInsensitiveCollator);
+ expCtx->setCollator(std::move(collator));
CompareNode node(CompareNode::CompareMode::kMax);
ASSERT_OK(node.init(update["$max"]["a"], expCtx));
- node.setCollator(&caseInsensitiveCollator);
+ node.setCollator(expCtx->getCollator());
}
DEATH_TEST_REGEX(CompareNodeTest, CannotSetCollatorTwice, "Invariant failure.*!_collator") {
diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp
index 39f41ba06f1..bca1b93a18f 100644
--- a/src/mongo/db/update/pull_node_test.cpp
+++ b/src/mongo/db/update/pull_node_test.cpp
@@ -319,9 +319,10 @@ TEST_F(PullNodeTest, ApplyWithCollation) {
// With the collation, this update will pull any string whose reverse is greater than the
// reverse of the "abc" string.
auto update = fromjson("{$pull : {a: {$gt: 'abc'}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -338,9 +339,10 @@ TEST_F(PullNodeTest, ApplyWithCollation) {
TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectNonStringMatches) {
auto update = fromjson("{$pull : {a: {$lt: 1}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -357,9 +359,10 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectNonStringMatches) {
TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectRegexMatches) {
auto update = fromjson("{$pull : {a: /a/}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -376,9 +379,10 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectRegexMatches) {
TEST_F(PullNodeTest, ApplyStringLiteralMatchWithCollation) {
auto update = fromjson("{$pull : {a: 'c'}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -395,9 +399,10 @@ TEST_F(PullNodeTest, ApplyStringLiteralMatchWithCollation) {
TEST_F(PullNodeTest, ApplyCollationDoesNotAffectNumberLiteralMatches) {
auto update = fromjson("{$pull : {a: 99}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a"], expCtx));
@@ -584,9 +589,10 @@ TEST_F(PullNodeTest, ApplyComplexDocAndMatching3) {
TEST_F(PullNodeTest, ApplyFullPredicateWithCollation) {
auto update = fromjson("{$pull: {'a.b': {x: 'blah'}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kAlwaysEqual);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullNode node;
ASSERT_OK(node.init(update["$pull"]["a.b"], expCtx));
diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp
index dd77b411dcf..6ca896d45b0 100644
--- a/src/mongo/db/update/pullall_node_test.cpp
+++ b/src/mongo/db/update/pullall_node_test.cpp
@@ -254,9 +254,10 @@ TEST_F(PullAllNodeTest, ApplyWithAllArrayElementsAndThenSome) {
TEST_F(PullAllNodeTest, ApplyWithCollator) {
auto update = fromjson("{$pullAll : {a: ['FOO', 'BAR']}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kToLowerString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kToLowerString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PullAllNode node;
ASSERT_OK(node.init(update["$pullAll"]["a"], expCtx));
diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp
index 985ee81ca2c..b6fa5e59aaa 100644
--- a/src/mongo/db/update/push_node_test.cpp
+++ b/src/mongo/db/update/push_node_test.cpp
@@ -611,9 +611,10 @@ TEST_F(PushNodeTest, ApplyWithEmbeddedFieldSort) {
TEST_F(PushNodeTest, ApplySortWithCollator) {
auto update = fromjson("{$push: {a: {$each: ['ha'], $sort: 1}}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
PushNode node;
ASSERT_OK(node.init(update["$push"]["a"], expCtx));
diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp
index 8f7024f80c1..9899fd44366 100644
--- a/src/mongo/db/update/update_driver.cpp
+++ b/src/mongo/db/update/update_driver.cpp
@@ -304,8 +304,6 @@ Status UpdateDriver::update(StringData matchedField,
}
void UpdateDriver::setCollator(const CollatorInterface* collator) {
- _expCtx->setCollator(collator);
-
if (_updateExecutor) {
_updateExecutor->setCollator(collator);
}
diff --git a/src/mongo/db/update/update_object_node_test.cpp b/src/mongo/db/update/update_object_node_test.cpp
index 1025f93fb0e..e1c0282eaab 100644
--- a/src/mongo/db/update/update_object_node_test.cpp
+++ b/src/mongo/db/update/update_object_node_test.cpp
@@ -2017,9 +2017,10 @@ TEST_F(UpdateObjectNodeTest, ChildrenShouldBeAppliedInAlphabeticalOrder) {
TEST_F(UpdateObjectNodeTest, CollatorShouldNotAffectUpdateOrder) {
auto setUpdate = fromjson("{$set: {abc: 5, cba: 6}}");
- CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ auto collator =
+ std::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString);
boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest());
- expCtx->setCollator(&collator);
+ expCtx->setCollator(std::move(collator));
std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
std::set<std::string> foundIdentifiers;
UpdateObjectNode root;