1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
|
/**
* Copyright (c) 2012-2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include "mongo/platform/basic.h"
#include "mongo/db/pipeline/pipeline_d.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/s/d_state.h"
namespace mongo {
using boost::intrusive_ptr;
using std::shared_ptr;
using std::string;
namespace {
class MongodImplementation final : public DocumentSourceNeedsMongod::MongodInterface {
public:
MongodImplementation(const intrusive_ptr<ExpressionContext>& ctx)
: _ctx(ctx)
, _client(ctx->opCtx)
{}
DBClientBase* directClient() final {
// opCtx may have changed since our last call
invariant(_ctx->opCtx);
_client.setOpCtx(_ctx->opCtx);
return &_client;
}
bool isSharded(const NamespaceString& ns) final {
const ChunkVersion unsharded(0, 0, OID());
return !(shardingState.getVersion(ns.ns()).isWriteCompatibleWith(unsharded));
}
bool isCapped(const NamespaceString& ns) final {
AutoGetCollectionForRead ctx(_ctx->opCtx, ns.ns());
Collection* collection = ctx.getCollection();
return collection && collection->isCapped();
}
BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) final {
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (_ctx->bypassDocumentValidation)
maybeDisableValidation.emplace(_ctx->opCtx);
_client.insert(ns.ns(), objs);
return _client.getLastErrorDetailed();
}
private:
intrusive_ptr<ExpressionContext> _ctx;
DBDirectClient _client;
};
}
shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
OperationContext* txn,
Collection* collection,
const intrusive_ptr<Pipeline>& pPipeline,
const intrusive_ptr<ExpressionContext>& pExpCtx) {
// get the full "namespace" name
const string& fullName = pExpCtx->ns.ns();
// We will be modifying the source vector as we go
Pipeline::SourceContainer& sources = pPipeline->sources;
// Inject a MongodImplementation to sources that need them.
for (size_t i = 0; i < sources.size(); i++) {
DocumentSourceNeedsMongod* needsMongod =
dynamic_cast<DocumentSourceNeedsMongod*>(sources[i].get());
if (needsMongod) {
needsMongod->injectMongodInterface(
std::make_shared<MongodImplementation>(pExpCtx));
}
}
if (!sources.empty() && sources.front()->isValidInitialSource()) {
if (dynamic_cast<DocumentSourceMergeCursors*>(sources.front().get())) {
// Enable the hooks for setting up authentication on the subsequent internal
// connections we are going to create. This would normally have been done
// when SetShardVersion was called, but since SetShardVersion is never called
// on secondaries, this is needed.
ShardedConnectionInfo::addHook();
}
return std::shared_ptr<PlanExecutor>(); // don't need a cursor
}
// Look for an initial match. This works whether we got an initial query or not.
// If not, it results in a "{}" query, which will be what we want in that case.
const BSONObj queryObj = pPipeline->getInitialQuery();
if (!queryObj.isEmpty()) {
// This will get built in to the Cursor we'll create, so
// remove the match from the pipeline
sources.pop_front();
}
// Find the set of fields in the source documents depended on by this pipeline.
const DepsTracker deps = pPipeline->getDependencies(queryObj);
// Passing query an empty projection since it is faster to use ParsedDeps::extractFields().
// This will need to change to support covering indexes (SERVER-12015). There is an
// exception for textScore since that can only be retrieved by a query projection.
const BSONObj projectionForQuery = deps.needTextScore ? deps.toProjection() : BSONObj();
/*
Look for an initial sort; we'll try to add this to the
Cursor we create. If we're successful in doing that (further down),
we'll remove the $sort from the pipeline, because the documents
will already come sorted in the specified order as a result of the
index scan.
*/
intrusive_ptr<DocumentSourceSort> sortStage;
BSONObj sortObj;
if (!sources.empty()) {
sortStage = dynamic_cast<DocumentSourceSort*>(sources.front().get());
if (sortStage) {
// build the sort key
sortObj = sortStage->serializeSortKey(/*explain*/false).toBson();
}
}
// Create the PlanExecutor.
//
// If we try to create a PlanExecutor that includes both the match and the
// sort, and the two are incompatible wrt the available indexes, then
// we don't get a PlanExecutor back.
//
// So we try to use both first. If that fails, try again, without the
// sort.
//
// If we don't have a sort, jump straight to just creating a PlanExecutor.
// without the sort.
//
// If we are able to incorporate the sort into the PlanExecutor, remove it
// from the head of the pipeline.
//
// LATER - we should be able to find this out before we create the
// cursor. Either way, we can then apply other optimizations there
// are tickets for, such as SERVER-4507.
const size_t runnerOptions = QueryPlannerParams::DEFAULT
| QueryPlannerParams::INCLUDE_SHARD_FILTER
| QueryPlannerParams::NO_BLOCKING_SORT
;
std::shared_ptr<PlanExecutor> exec;
bool sortInRunner = false;
const WhereCallbackReal whereCallback(pExpCtx->opCtx, pExpCtx->ns.db());
if (sortStage) {
CanonicalQuery* cq;
Status status =
CanonicalQuery::canonicalize(pExpCtx->ns,
queryObj,
sortObj,
projectionForQuery,
&cq,
whereCallback);
PlanExecutor* rawExec;
if (status.isOK() && getExecutor(txn,
collection,
cq,
PlanExecutor::YIELD_AUTO,
&rawExec,
runnerOptions).isOK()) {
// success: The PlanExecutor will handle sorting for us using an index.
exec.reset(rawExec);
sortInRunner = true;
sources.pop_front();
if (sortStage->getLimitSrc()) {
// need to reinsert coalesced $limit after removing $sort
sources.push_front(sortStage->getLimitSrc());
}
}
}
if (!exec.get()) {
const BSONObj noSort;
CanonicalQuery* cq;
uassertStatusOK(
CanonicalQuery::canonicalize(pExpCtx->ns,
queryObj,
noSort,
projectionForQuery,
&cq,
whereCallback));
PlanExecutor* rawExec;
uassertStatusOK(getExecutor(txn,
collection,
cq,
PlanExecutor::YIELD_AUTO,
&rawExec,
runnerOptions));
exec.reset(rawExec);
}
// DocumentSourceCursor expects a yielding PlanExecutor that has had its state saved. We
// deregister the PlanExecutor so that it can be registered with ClientCursor.
exec->deregisterExec();
exec->saveState();
// Put the PlanExecutor into a DocumentSourceCursor and add it to the front of the pipeline.
intrusive_ptr<DocumentSourceCursor> pSource =
DocumentSourceCursor::create(fullName, exec, pExpCtx);
// Note the query, sort, and projection for explain.
pSource->setQuery(queryObj);
if (sortInRunner)
pSource->setSort(sortObj);
pSource->setProjection(deps.toProjection(), deps.toParsedDeps());
while (!sources.empty() && pSource->coalesce(sources.front())) {
sources.pop_front();
}
pPipeline->addInitialSource(pSource);
return exec;
}
} // namespace mongo
|