summaryrefslogtreecommitdiff
path: root/src/mongo/s/query/cluster_aggregation_planner.h
blob: ea00fc28324a9339f8d4a71474ba6fc3045e92ca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/**
 * Copyright (C) 2018 MongoDB Inc.
 *
 * This program is free software: you can redistribute it and/or  modify
 * it under the terms of the GNU Affero General Public License, version 3,
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 * As a special exception, the copyright holders give permission to link the
 * code of portions of this program with the OpenSSL library under certain
 * conditions as described in each individual source file and distribute
 * linked combinations including the program with the OpenSSL library. You
 * must comply with the GNU Affero General Public License in all respects
 * for all of the code used other than as permitted herein. If you modify
 * file(s) with this exception, you may extend this exception to your
 * version of the file(s), but you are not obligated to do so. If you do not
 * wish to do so, delete this exception statement from your version. If you
 * delete this exception statement from all source files in the program,
 * then also delete it in the license file.
 */

#pragma once

#include "mongo/db/pipeline/exchange_spec_gen.h"
#include "mongo/db/pipeline/lite_parsed_pipeline.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/query/cluster_client_cursor_impl.h"
#include "mongo/s/query/owned_remote_cursor.h"
#include "mongo/s/shard_id.h"

namespace mongo {
namespace cluster_aggregation_planner {

/**
 * Represents the two halves of a pipeline that will execute in a sharded cluster. 'shardsPipeline'
 * will execute in parallel on each shard, and 'mergePipeline' will execute on the merge host -
 * either one of the shards or a mongos.
 */
struct SplitPipeline {
    SplitPipeline(std::unique_ptr<Pipeline, PipelineDeleter> shardsPipeline,
                  std::unique_ptr<Pipeline, PipelineDeleter> mergePipeline,
                  boost::optional<BSONObj> shardCursorsSortSpec)
        : shardsPipeline(std::move(shardsPipeline)),
          mergePipeline(std::move(mergePipeline)),
          shardCursorsSortSpec(std::move(shardCursorsSortSpec)) {}

    std::unique_ptr<Pipeline, PipelineDeleter> shardsPipeline;
    std::unique_ptr<Pipeline, PipelineDeleter> mergePipeline;

    // If set, the cursors from the shards are expected to be sorted according to this spec, and to
    // have populated a "$sortKey" metadata field which can be used to compare the results.
    boost::optional<BSONObj> shardCursorsSortSpec;
};

/**
 * Split the current Pipeline into a Pipeline for each shard, and a Pipeline that combines the
 * results within a merging process. This call also performs optimizations with the aim of reducing
 * computing time and network traffic when a pipeline has been split into two pieces.
 *
 * The 'mergePipeline' returned as part of the SplitPipeline here is not ready to execute until the
 * 'shardsPipeline' has been sent to the shards and cursors have been established. Once cursors have
 * been established, the merge pipeline can be made executable by calling 'addMergeCursorsSource()'
 */
SplitPipeline splitPipeline(std::unique_ptr<Pipeline, PipelineDeleter> pipeline);

/**
 * Creates a new DocumentSourceMergeCursors from the provided 'remoteCursors' and adds it to the
 * front of 'mergePipeline'.
 */
void addMergeCursorsSource(Pipeline* mergePipeline,
                           const LiteParsedPipeline&,
                           BSONObj cmdSentToShards,
                           std::vector<OwnedRemoteCursor> remoteCursors,
                           const std::vector<ShardId>& targetedShards,
                           boost::optional<BSONObj> shardCursorsSortSpec,
                           executor::TaskExecutor*);

/**
 * Builds a ClusterClientCursor which will execute 'pipeline'. If 'pipeline' consists entirely of
 * $skip and $limit stages, the pipeline is eliminated entirely and replaced with a RouterExecStage
 * tree that does same thing but will avoid using a RouterStagePipeline. Avoiding a
 * RouterStagePipeline will remove an expensive conversion from BSONObj -> Document for each result.
 */
ClusterClientCursorGuard buildClusterCursor(OperationContext* opCtx,
                                            std::unique_ptr<Pipeline, PipelineDeleter> pipeline,
                                            ClusterClientCursorParams&&);

struct ShardedExchangePolicy {
    // The exchange specification that will be sent to shards as part of the aggregate command.
    // It will be used by producers to determine how to distribute documents to consumers.
    ExchangeSpec exchangeSpec;

    // Shards that will run the consumer part of the exchange.
    std::vector<ShardId> consumerShards;
};

/**
 * If the merging pipeline is eligible for an $exchange merge optimization, returns the information
 * required to set that up.
 */
boost::optional<ShardedExchangePolicy> checkIfEligibleForExchange(OperationContext* opCtx,
                                                                  const Pipeline* mergePipeline);
}  // namespace cluster_aggregation_planner
}  // namespace mongo