diff options
author | Arun Banala <arun.banala@mongodb.com> | 2019-05-23 11:12:19 +0100 |
---|---|---|
committer | Arun Banala <arun.banala@mongodb.com> | 2019-05-24 14:24:51 +0100 |
commit | 7536959e9afa7e5dd0ef7bc807630630e48d5706 (patch) | |
tree | 202f6bf9dd8264f1c8de61737e48a86e1ff2ab6a /src/mongo/db/pipeline/document_source.h | |
parent | 4f034e89cc7978317d4a6ef34bc718a83ab55ecb (diff) | |
download | mongo-7536959e9afa7e5dd0ef7bc807630630e48d5706.tar.gz |
SERVER-41180 Rename 'MergingLogic' to 'DistributedPlanLogic' to avoid confusion with $merge
Diffstat (limited to 'src/mongo/db/pipeline/document_source.h')
-rw-r--r-- | src/mongo/db/pipeline/document_source.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h index 8bc5d419321..5e8244e7984 100644 --- a/src/mongo/db/pipeline/document_source.h +++ b/src/mongo/db/pipeline/document_source.h @@ -220,7 +220,7 @@ public: * A struct representing the information needed to execute this stage on a distributed * collection. Describes how a pipeline should be split for sharded execution. */ - struct MergingLogic { + struct DistributedPlanLogic { // A stage which executes on each shard in parallel, or nullptr if nothing can be done in // parallel. For example, a partial $group before a subsequent global $group. boost::intrusive_ptr<DocumentSource> shardsStage = nullptr; @@ -454,10 +454,10 @@ public: * Otherwise, returns a struct representing what needs to be done to merge each shard's pipeline * into a single stream of results. Must not mutate the existing source object; if different * behaviour is required, a new source should be created and configured appropriately. It is an - * error for the returned MergingLogic to have identical pointers for 'shardsStage' and + * error for the returned DistributedPlanLogic to have identical pointers for 'shardsStage' and * 'mergingStage'. */ - virtual boost::optional<MergingLogic> mergingLogic() = 0; + virtual boost::optional<DistributedPlanLogic> distributedPlanLogic() = 0; /** * Returns true if it would be correct to execute this stage in parallel across the shards in |