summaryrefslogtreecommitdiff
path: root/src/mongo/db/exec/update_stage.h
blob: 9393616654425f2bbffa6fa31feacbc22236c8c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
/**
 *    Copyright (C) 2018-present MongoDB, Inc.
 *
 *    This program is free software: you can redistribute it and/or modify
 *    it under the terms of the Server Side Public License, version 1,
 *    as published by MongoDB, Inc.
 *
 *    This program is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    Server Side Public License for more details.
 *
 *    You should have received a copy of the Server Side Public License
 *    along with this program. If not, see
 *    <http://www.mongodb.com/licensing/server-side-public-license>.
 *
 *    As a special exception, the copyright holders give permission to link the
 *    code of portions of this program with the OpenSSL library under certain
 *    conditions as described in each individual source file and distribute
 *    linked combinations including the program with the OpenSSL library. You
 *    must comply with the Server Side Public License in all respects for
 *    all of the code used other than as permitted herein. If you modify file(s)
 *    with this exception, you may extend this exception to your version of the
 *    file(s), but you are not obligated to do so. If you do not wish to do so,
 *    delete this exception statement from your version. If you delete this
 *    exception statement from all source files in the program, then also delete
 *    it in the license file.
 */

#pragma once


#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/requires_collection_stage.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/ops/parsed_update.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/update/update_driver.h"

namespace mongo {

class OperationContext;
class OpDebug;
struct PlanSummaryStats;

struct UpdateStageParams {
    UpdateStageParams(const UpdateRequest* r, UpdateDriver* d, OpDebug* o)
        : request(r), driver(d), opDebug(o), canonicalQuery(NULL) {}

    // Contains update parameters like whether it's a multi update or an upsert. Not owned.
    // Must outlive the UpdateStage.
    const UpdateRequest* request;

    // Contains the logic for applying mods to documents. Not owned. Must outlive
    // the UpdateStage.
    UpdateDriver* driver;

    // Needed to pass to Collection::updateDocument(...).
    OpDebug* opDebug;

    // Not owned here.
    CanonicalQuery* canonicalQuery;

private:
    // Default constructor not allowed.
    UpdateStageParams();
};

/**
 * Execution stage responsible for updates to documents and upserts. If the prior or
 * newly-updated version of the document was requested to be returned, then ADVANCED is
 * returned after updating or inserting a document. Otherwise, NEED_TIME is returned after
 * updating or inserting a document.
 *
 * Callers of doWork() must be holding a write lock.
 */
class UpdateStage final : public RequiresMutableCollectionStage {
    UpdateStage(const UpdateStage&) = delete;
    UpdateStage& operator=(const UpdateStage&) = delete;

public:
    UpdateStage(OperationContext* opCtx,
                const UpdateStageParams& params,
                WorkingSet* ws,
                Collection* collection,
                PlanStage* child);

    bool isEOF() final;
    StageState doWork(WorkingSetID* out) final;

    StageType stageType() const final {
        return STAGE_UPDATE;
    }

    std::unique_ptr<PlanStageStats> getStats() final;

    const SpecificStats* getSpecificStats() const final;

    static const char* kStageType;

    /**
     * Gets a pointer to the UpdateStats inside 'exec'.
     *
     * The 'exec' must have an UPDATE stage as its root stage, and the plan must be EOF before
     * calling this method.
     */
    static const UpdateStats* getUpdateStats(const PlanExecutor* exec);

    /**
     * Populate 'opDebug' with stats from 'updateStats' describing the execution of this update.
     */
    static void recordUpdateStatsInOpDebug(const UpdateStats* updateStats, OpDebug* opDebug);

    /**
     * Converts 'updateStats' into an UpdateResult.
     */
    static UpdateResult makeUpdateResult(const UpdateStats* updateStats);

    /**
     * Computes the document to insert if the upsert flag is set to true and no matching
     * documents are found in the database. The document to upsert is computing using the
     * query 'cq' and the update mods contained in 'driver'.
     *
     * If 'cq' is NULL, which can happen for the idhack update fast path, then 'query' is
     * used to compute the doc to insert instead of 'cq'.
     *
     * 'doc' is the mutable BSON document which you would like the update driver to use
     * when computing the document to insert.
     *
     * Set 'isInternalRequest' to true if the upsert was issued by the replication or
     * sharding systems.
     *
     * Returns the document to insert.
     */
    static BSONObj applyUpdateOpsForInsert(OperationContext* opCtx,
                                           const CanonicalQuery* cq,
                                           const BSONObj& query,
                                           UpdateDriver* driver,
                                           mutablebson::Document* doc,
                                           bool isInternalRequest,
                                           const NamespaceString& ns,
                                           bool enforceOkForStorage,
                                           UpdateStats* stats);

    /**
     * Returns true if an update failure due to a given DuplicateKey error is eligible for retry.
     * Requires that parsedUpdate.hasParsedQuery() is true.
     */
    static bool shouldRetryDuplicateKeyException(const ParsedUpdate& parsedUpdate,
                                                 const DuplicateKeyErrorInfo& errorInfo);

protected:
    void doSaveStateRequiresCollection() final {}

    void doRestoreStateRequiresCollection() final;

private:
    static const UpdateStats kEmptyUpdateStats;

    /**
     * Returns whether a given MatchExpression contains is a MatchType::EQ or a MatchType::AND node
     * with only MatchType::EQ children.
     */
    static bool matchContainsOnlyAndedEqualityNodes(const MatchExpression& root);

    /**
     * Computes the result of applying mods to the document 'oldObj' at RecordId 'recordId' in
     * memory, then commits these changes to the database. Returns a possibly unowned copy
     * of the newly-updated version of the document.
     */
    BSONObj transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& recordId);

    /**
     * Computes the document to insert and inserts it into the collection. Used if the
     * user requested an upsert and no matching documents were found.
     */
    void doInsert();

    /**
     * Have we performed all necessary updates? Even if this is true, we might not be EOF,
     * as we might still have to do an insert.
     */
    bool doneUpdating();

    /**
     * Examines the stats / update request and returns whether there is still an insert left
     * to do. If so then this stage is not EOF yet.
     */
    bool needInsert();

    /**
     * Stores 'idToRetry' in '_idRetrying' so the update can be retried during the next call to
     * doWork(). Always returns NEED_YIELD and sets 'out' to WorkingSet::INVALID_ID.
     */
    StageState prepareToRetryWSM(WorkingSetID idToRetry, WorkingSetID* out);

    /**
     * Checks that the updated doc has all required shard key fields and throws if it does not.
     *
     * Also checks if the updated doc still belongs to this node and throws if it does not. If the
     * doc no longer belongs to this shard, this means that one or more shard key field values have
     * been updated to a value belonging to a chunk that is not owned by this shard. We cannot apply
     * this update atomically.
     *
     * If the update changes shard key fields but the new shard key remains on the same node,
     * returns true. If the update does not change shard key fields, returns false.
     */
    bool checkUpdateChangesShardKeyFields(ScopedCollectionMetadata metadata,
                                          const Snapshotted<BSONObj>& oldObj);

    UpdateStageParams _params;

    // Not owned by us.
    WorkingSet* _ws;

    // If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
    WorkingSetID _idRetrying;

    // If not WorkingSet::INVALID_ID, we return this member to our caller.
    WorkingSetID _idReturning;

    // Stats
    UpdateStats _specificStats;

    // True if updated documents should be validated with storage_validation::storageValid().
    bool _enforceOkForStorage;

    // True if the request should be checked for an update to the shard key.
    bool _shouldCheckForShardKeyUpdate;

    // If the update was in-place, we may see it again.  This only matters if we're doing
    // a multi-update; if we're not doing a multi-update we stop after one update and we
    // won't see any more docs.
    //
    // For example: If we're scanning an index {x:1} and performing {$inc:{x:5}}, we'll keep
    // moving the document forward and it will continue to reappear in our index scan.
    // Unless the index is multikey, the underlying query machinery won't de-dup.
    //
    // If the update wasn't in-place we may see it again.  Our query may return the new
    // document and we wouldn't want to update that.
    //
    // So, no matter what, we keep track of where the doc wound up.
    typedef stdx::unordered_set<RecordId, RecordId::Hasher> RecordIdSet;
    const std::unique_ptr<RecordIdSet> _updatedRecordIds;

    // These get reused for each update.
    mutablebson::Document& _doc;
    mutablebson::DamageVector _damages;
};

}  // namespace mongo