diff options
author | James Wahlin <james.wahlin@10gen.com> | 2016-04-21 15:55:32 -0400 |
---|---|---|
committer | James Wahlin <james.wahlin@10gen.com> | 2016-04-21 16:48:06 -0400 |
commit | dc3d30af10b6859124d1ce8790ad8e097d1f06c7 (patch) | |
tree | 4cf90aec523a535c5974007a08fcc95e0f8528bb /src | |
parent | 1a371955e7a93f846a5ddddb19f33d6b270a3991 (diff) | |
download | mongo-dc3d30af10b6859124d1ce8790ad8e097d1f06c7.tar.gz |
SERVER-23271 Add keysInserted and keysDeleted metrics for CRUD ops
Diffstat (limited to 'src')
57 files changed, 550 insertions, 348 deletions
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err index 8c5eb8349e3..67d0f4af107 100644 --- a/src/mongo/base/error_codes.err +++ b/src/mongo/base/error_codes.err @@ -154,6 +154,7 @@ error_code("OplogOutOfOrder", 152) error_code("ChunkTooBig", 153) error_code("InconsistentShardIdentity", 154) error_code("CannotApplyOplogWhilePrimary", 155) +error_code("NeedsDocumentMove", 156) # Non-sequential error codes (for compatibility only) error_code("SocketException", 9001) diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index b11a17dd1cd..2883cb26439 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -182,7 +182,9 @@ Status cloneCollectionAsCapped(OperationContext* txn, } WriteUnitOfWork wunit(txn); - toCollection->insertDocument(txn, objToClone.value(), true, txn->writesAreReplicated()); + OpDebug* const nullOpDebug = nullptr; + toCollection->insertDocument( + txn, objToClone.value(), nullOpDebug, true, txn->writesAreReplicated()); wunit.commit(); // Go to the next document diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 2523a4779be..d83a825903b 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -36,7 +36,6 @@ #include "mongo/db/catalog/collection.h" - #include "mongo/base/counter.h" #include "mongo/base/owned_pointer_map.h" #include "mongo/bson/ordering.h" @@ -333,6 +332,7 @@ Status Collection::insertDocument(OperationContext* txn, const DocWriter* doc, b Status Collection::insertDocuments(OperationContext* txn, const vector<BSONObj>::const_iterator begin, const vector<BSONObj>::const_iterator end, + OpDebug* opDebug, bool enforceQuota, bool fromMigrate) { // Should really be done in the collection object at creation and updated on index create. @@ -355,7 +355,7 @@ Status Collection::insertDocuments(OperationContext* txn, if (_mustTakeCappedLockOnInsert) synchronizeOnCappedInFlightResource(txn->lockState(), _ns); - Status status = _insertDocuments(txn, begin, end, enforceQuota); + Status status = _insertDocuments(txn, begin, end, enforceQuota, opDebug); if (!status.isOK()) return status; invariant(sid == txn->recoveryUnit()->getSnapshotId()); @@ -371,11 +371,12 @@ Status Collection::insertDocuments(OperationContext* txn, Status Collection::insertDocument(OperationContext* txn, const BSONObj& docToInsert, + OpDebug* opDebug, bool enforceQuota, bool fromMigrate) { vector<BSONObj> docs; docs.push_back(docToInsert); - return insertDocuments(txn, docs.begin(), docs.end(), enforceQuota, fromMigrate); + return insertDocuments(txn, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate); } Status Collection::insertDocument(OperationContext* txn, @@ -418,7 +419,8 @@ Status Collection::insertDocument(OperationContext* txn, Status Collection::_insertDocuments(OperationContext* txn, const vector<BSONObj>::const_iterator begin, const vector<BSONObj>::const_iterator end, - bool enforceQuota) { + bool enforceQuota, + OpDebug* opDebug) { dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX)); if (isCapped() && _indexCatalog.haveAnyIndexes() && std::distance(begin, end) > 1) { @@ -457,7 +459,13 @@ Status Collection::_insertDocuments(OperationContext* txn, bsonRecords.push_back(bsonRecord); } - return _indexCatalog.indexRecords(txn, bsonRecords); + int64_t keysInserted; + status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted); + if (opDebug) { + opDebug->keysInserted += keysInserted; + } + + return status; } void Collection::notifyCappedWaitersIfNeeded() { @@ -475,15 +483,20 @@ Status Collection::aboutToDeleteCapped(OperationContext* txn, _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION); BSONObj doc = data.releaseToBson(); - _indexCatalog.unindexRecord(txn, doc, loc, false); + int64_t* const nullKeysDeleted = nullptr; + _indexCatalog.unindexRecord(txn, doc, loc, false, nullKeysDeleted); + + // We are not capturing and reporting to OpDebug the 'keysDeleted' by unindexRecord(). It is + // questionable whether reporting will add diagnostic value to users and may instead be + // confusing as it depends on our internal capped collection document removal strategy. + // We can consider adding either keysDeleted or a new metric reporting document removal if + // justified by user demand. return Status::OK(); } -void Collection::deleteDocument(OperationContext* txn, - const RecordId& loc, - bool fromMigrate, - bool noWarn) { +void Collection::deleteDocument( + OperationContext* txn, const RecordId& loc, OpDebug* opDebug, bool fromMigrate, bool noWarn) { if (isCapped()) { log() << "failing remove on a capped ns " << _ns << endl; uasserted(10089, "cannot remove from a capped collection"); @@ -500,7 +513,11 @@ void Collection::deleteDocument(OperationContext* txn, /* check if any cursors point to us. if so, advance them. */ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION); - _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn); + int64_t keysDeleted; + _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn, &keysDeleted); + if (opDebug) { + opDebug->keysDeleted += keysDeleted; + } _recordStore->deleteRecord(txn, loc); @@ -517,7 +534,7 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, const BSONObj& newDoc, bool enforceQuota, bool indexesAffected, - OpDebug* debug, + OpDebug* opDebug, OplogUpdateEntryArgs* args) { { auto status = checkValidation(txn, newDoc); @@ -597,45 +614,19 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, } } - // This can call back into Collection::recordStoreGoingToMove. If that happens, the old - // object is removed from all indexes. - StatusWith<RecordId> newLocation = _recordStore->updateRecord( + Status updateStatus = _recordStore->updateRecord( txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this); - if (!newLocation.isOK()) { - return newLocation; - } - - // At this point, the old object may or may not still be indexed, depending on if it was - // moved. If the object did move, we need to add the new location to all indexes. - if (newLocation.getValue() != oldLocation) { - if (debug) { - if (debug->nmoved == -1) // default of -1 rather than 0 - debug->nmoved = 1; - else - debug->nmoved += 1; - } - - std::vector<BsonRecord> bsonRecords; - BsonRecord bsonRecord = {newLocation.getValue(), &newDoc}; - bsonRecords.push_back(bsonRecord); - Status s = _indexCatalog.indexRecords(txn, bsonRecords); - if (!s.isOK()) - return StatusWith<RecordId>(s); - invariant(sid == txn->recoveryUnit()->getSnapshotId()); - args->updatedDoc = newDoc; - - auto opObserver = getGlobalServiceContext()->getOpObserver(); - if (opObserver) - opObserver->onUpdate(txn, *args); - - return newLocation; + if (updateStatus == ErrorCodes::NeedsDocumentMove) { + return _updateDocumentWithMove( + txn, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid); + } else if (!updateStatus.isOK()) { + return updateStatus; } // Object did not move. We update each index with each respective UpdateTicket. - - if (debug) - debug->keyUpdates = 0; + if (opDebug) + opDebug->keyUpdates = 0; if (indexesAffected) { IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true); @@ -643,12 +634,17 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, IndexDescriptor* descriptor = ii.next(); IndexAccessMethod* iam = ii.accessMethod(descriptor); - int64_t updatedKeys; - Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys); + int64_t keysInserted; + int64_t keysDeleted; + Status ret = iam->update( + txn, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted); if (!ret.isOK()) return StatusWith<RecordId>(ret); - if (debug) - debug->keyUpdates += updatedKeys; + if (opDebug) { + opDebug->keyUpdates += keysInserted; + opDebug->keysInserted += keysInserted; + opDebug->keysDeleted += keysDeleted; + } } } @@ -659,17 +655,58 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, if (opObserver) opObserver->onUpdate(txn, *args); - return newLocation; + return {oldLocation}; } -Status Collection::recordStoreGoingToMove(OperationContext* txn, - const RecordId& oldLocation, - const char* oldBuffer, - size_t oldSize) { - moveCounter.increment(); +StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn, + const RecordId& oldLocation, + const Snapshotted<BSONObj>& oldDoc, + const BSONObj& newDoc, + bool enforceQuota, + OpDebug* opDebug, + OplogUpdateEntryArgs* args, + const SnapshotId& sid) { + // Insert new record. + StatusWith<RecordId> newLocation = _recordStore->insertRecord( + txn, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota)); + if (!newLocation.isOK()) { + return newLocation; + } + + invariant(newLocation.getValue() != oldLocation); + _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION); - _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true); - return Status::OK(); + + // Remove indexes for old record. + int64_t keysDeleted; + _indexCatalog.unindexRecord(txn, oldDoc.value(), oldLocation, true, &keysDeleted); + + // Remove old record. + _recordStore->deleteRecord(txn, oldLocation); + + std::vector<BsonRecord> bsonRecords; + BsonRecord bsonRecord = {newLocation.getValue(), &newDoc}; + bsonRecords.push_back(bsonRecord); + + // Add indexes for new record. + int64_t keysInserted; + Status status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted); + if (!status.isOK()) { + return StatusWith<RecordId>(status); + } + + invariant(sid == txn->recoveryUnit()->getSnapshotId()); + args->updatedDoc = newDoc; + txn->getServiceContext()->getOpObserver()->onUpdate(txn, *args); + + moveCounter.increment(); + if (opDebug) { + opDebug->nmoved++; + opDebug->keysInserted += keysInserted; + opDebug->keysDeleted += keysDeleted; + } + + return newLocation; } Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) { diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index cb7417c4871..e1c68377b10 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -250,12 +250,14 @@ public: * so should be ignored by the user as an internal maintenance operation and not a * real delete. * 'loc' key to uniquely identify a record in a collection. + * 'opDebug' Optional argument. When not null, will be used to record operation statistics. * 'cappedOK' if true, allows deletes on capped collections (Cloner::copyDB uses this). * 'noWarn' if unindexing the record causes an error, if noWarn is true the error * will not be logged. */ void deleteDocument(OperationContext* txn, const RecordId& loc, + OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false); @@ -263,10 +265,13 @@ public: * Inserts all documents inside one WUOW. * Caller should ensure vector is appropriately sized for this. * If any errors occur (including WCE), caller should retry documents individually. + * + * 'opDebug' Optional argument. When not null, will be used to record operation statistics. */ Status insertDocuments(OperationContext* txn, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end, + OpDebug* opDebug, bool enforceQuota, bool fromMigrate = false); @@ -274,10 +279,12 @@ public: * this does NOT modify the doc before inserting * i.e. will not add an _id field for documents that are missing it * - * If enforceQuota is false, quotas will be ignored. + * 'opDebug' Optional argument. When not null, will be used to record operation statistics. + * 'enforceQuota' If false, quotas will be ignored. */ Status insertDocument(OperationContext* txn, const BSONObj& doc, + OpDebug* opDebug, bool enforceQuota, bool fromMigrate = false); @@ -298,6 +305,7 @@ public: * If the document fits in the old space, it is put there; if not, it is moved. * Sets 'args.updatedDoc' to the updated version of the document with damages applied, on * success. + * 'opDebug' Optional argument. When not null, will be used to record operation statistics. * @return the post update location of the doc (may or may not be the same as oldLocation) */ StatusWith<RecordId> updateDocument(OperationContext* txn, @@ -306,7 +314,7 @@ public: const BSONObj& newDoc, bool enforceQuota, bool indexesAffected, - OpDebug* debug, + OpDebug* opDebug, OplogUpdateEntryArgs* args); bool updateWithDamagesSupported() const; @@ -437,11 +445,6 @@ private: */ StatusWithMatchExpression parseValidator(const BSONObj& validator) const; - Status recordStoreGoingToMove(OperationContext* txn, - const RecordId& oldLocation, - const char* oldBuffer, - size_t oldSize); - Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc); Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data); @@ -456,7 +459,21 @@ private: Status _insertDocuments(OperationContext* txn, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end, - bool enforceQuota); + bool enforceQuota, + OpDebug* opDebug); + + + /** + * Perform update when document move will be required. + */ + StatusWith<RecordId> _updateDocumentWithMove(OperationContext* txn, + const RecordId& oldLocation, + const Snapshotted<BSONObj>& oldDoc, + const BSONObj& newDoc, + bool enforceQuota, + OpDebug* opDebug, + OplogUpdateEntryArgs* args, + const SnapshotId& sid); bool _enforceQuota(bool userEnforeQuota) const; diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index eb0dfa8a1b9..a64f43284ca 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -1135,7 +1135,8 @@ bool isDupsAllowed(IndexDescriptor* desc) { Status IndexCatalog::_indexFilteredRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords) { + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut) { InsertDeleteOptions options; options.logIfError = false; options.dupsAllowed = isDupsAllowed(index->descriptor()); @@ -1147,16 +1148,21 @@ Status IndexCatalog::_indexFilteredRecords(OperationContext* txn, txn, *bsonRecord.docPtr, bsonRecord.id, options, &inserted); if (!status.isOK()) return status; + + if (keysInsertedOut) { + *keysInsertedOut += inserted; + } } return Status::OK(); } Status IndexCatalog::_indexRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords) { + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut) { const MatchExpression* filter = index->getFilterExpression(); if (!filter) - return _indexFilteredRecords(txn, index, bsonRecords); + return _indexFilteredRecords(txn, index, bsonRecords, keysInsertedOut); std::vector<BsonRecord> filteredBsonRecords; for (auto bsonRecord : bsonRecords) { @@ -1164,14 +1170,15 @@ Status IndexCatalog::_indexRecords(OperationContext* txn, filteredBsonRecords.push_back(bsonRecord); } - return _indexFilteredRecords(txn, index, filteredBsonRecords); + return _indexFilteredRecords(txn, index, filteredBsonRecords, keysInsertedOut); } Status IndexCatalog::_unindexRecord(OperationContext* txn, IndexCatalogEntry* index, const BSONObj& obj, const RecordId& loc, - bool logIfError) { + bool logIfError, + int64_t* keysDeletedOut) { InsertDeleteOptions options; options.logIfError = logIfError; options.dupsAllowed = isDupsAllowed(index->descriptor()); @@ -1189,15 +1196,24 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn, << _collection->ns() << ". Status: " << status.toString(); } + if (keysDeletedOut) { + *keysDeletedOut += removed; + } + return Status::OK(); } Status IndexCatalog::indexRecords(OperationContext* txn, - const std::vector<BsonRecord>& bsonRecords) { + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut) { + if (keysInsertedOut) { + *keysInsertedOut = 0; + } + for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end(); ++i) { - Status s = _indexRecords(txn, *i, bsonRecords); + Status s = _indexRecords(txn, *i, bsonRecords, keysInsertedOut); if (!s.isOK()) return s; } @@ -1208,14 +1224,19 @@ Status IndexCatalog::indexRecords(OperationContext* txn, void IndexCatalog::unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, - bool noWarn) { + bool noWarn, + int64_t* keysDeletedOut) { + if (keysDeletedOut) { + *keysDeletedOut = 0; + } + for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end(); ++i) { IndexCatalogEntry* entry = *i; // If it's a background index, we DO NOT want to log anything. bool logIfError = entry->isReady(txn) ? !noWarn : false; - _unindexRecord(txn, entry, obj, loc, logIfError); + _unindexRecord(txn, entry, obj, loc, logIfError, keysDeletedOut); } } diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index 022aba664fa..2ccfef9a1d8 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -263,10 +263,25 @@ public: // ----- data modifiers ------ - // this throws for now - Status indexRecords(OperationContext* txn, const std::vector<BsonRecord>& bsonRecords); + /** + * When 'keysInsertedOut' is not null, it will be set to the number of index keys inserted by + * this operation. + * + * This method may throw. + */ + Status indexRecords(OperationContext* txn, + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut); - void unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, bool noWarn); + /** + * When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by + * this operation. + */ + void unindexRecord(OperationContext* txn, + const BSONObj& obj, + const RecordId& loc, + bool noWarn, + int64_t* keysDeletedOut); // ------- temp internal ------- @@ -297,17 +312,20 @@ private: Status _indexFilteredRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords); + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut); Status _indexRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords); + const std::vector<BsonRecord>& bsonRecords, + int64_t* keysInsertedOut); Status _unindexRecord(OperationContext* txn, IndexCatalogEntry* index, const BSONObj& obj, const RecordId& loc, - bool logIfError); + bool logIfError, + int64_t* keysDeletedOut); /** * this does no sanity checks diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index e7789c2ac92..cc09104ab10 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -217,7 +217,8 @@ struct Cloner::Fun { WriteUnitOfWork wunit(txn); BSONObj doc = tmp; - Status status = collection->insertDocument(txn, doc, true); + OpDebug* const nullOpDebug = nullptr; + Status status = collection->insertDocument(txn, doc, nullOpDebug, true); if (!status.isOK()) { error() << "error: exception cloning object in " << from_collection << ' ' << status << " obj:" << doc; @@ -610,7 +611,8 @@ Status Cloner::copyDb(OperationContext* txn, // dupsAllowed in IndexCatalog::_unindexRecord and SERVER-17487. for (set<RecordId>::const_iterator it = dups.begin(); it != dups.end(); ++it) { WriteUnitOfWork wunit(txn); - c->deleteDocument(txn, *it, false, true); + OpDebug* const nullOpDebug = nullptr; + c->deleteDocument(txn, *it, nullOpDebug, false, true); wunit.commit(); } diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 17f4b3d6be5..7d89e39e548 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -245,6 +245,7 @@ public: const FindAndModifyRequest& args = parseStatus.getValue(); const NamespaceString& nsString = args.getNamespaceString(); + OpDebug* opDebug = &CurOp::get(txn)->debug(); if (args.isRemove()) { DeleteRequest request(nsString); @@ -269,7 +270,8 @@ public: css->checkShardVersionOrThrow(txn); Collection* const collection = autoColl.getCollection(); - auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete); + auto statusWithPlanExecutor = + getExecutorDelete(txn, opDebug, collection, &parsedDelete); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } @@ -288,8 +290,6 @@ public: return parsedUpdateStatus; } - OpDebug* opDebug = &CurOp::get(txn)->debug(); - // Explain calls of the findAndModify command are read-only, but we take write // locks so that the timing information is more accurate. AutoGetCollection autoColl(txn, nsString, MODE_IX); @@ -303,7 +303,7 @@ public: Collection* collection = autoColl.getCollection(); auto statusWithPlanExecutor = - getExecutorUpdate(txn, collection, &parsedUpdate, opDebug); + getExecutorUpdate(txn, opDebug, collection, &parsedUpdate); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } @@ -353,6 +353,8 @@ public: lastOpSetterGuard.Dismiss(); } + OpDebug* opDebug = &CurOp::get(txn)->debug(); + // Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it is // executing a findAndModify. This is done to ensure that we can always match, modify, and // return the document under concurrency, if a matching document exists. @@ -387,7 +389,8 @@ public: } Collection* const collection = autoDb.getDb()->getCollection(nsString.ns()); - auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete); + auto statusWithPlanExecutor = + getExecutorDelete(txn, opDebug, collection, &parsedDelete); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } @@ -425,8 +428,6 @@ public: return appendCommandStatus(result, parsedUpdateStatus); } - OpDebug* opDebug = &CurOp::get(txn)->debug(); - AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX); Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX); @@ -476,7 +477,7 @@ public: } auto statusWithPlanExecutor = - getExecutorUpdate(txn, collection, &parsedUpdate, opDebug); + getExecutorUpdate(txn, opDebug, collection, &parsedUpdate); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 8f125382095..155525578ee 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -707,7 +707,10 @@ void State::insert(const string& ns, const BSONObj& o) { if (!res.getValue().isEmpty()) { bo = res.getValue(); } - uassertStatusOK(coll->insertDocument(_txn, bo, true)); + + // TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261. + OpDebug* const nullOpDebug = nullptr; + uassertStatusOK(coll->insertDocument(_txn, bo, nullOpDebug, true)); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insert", ns); @@ -737,7 +740,10 @@ void State::_insertToInc(BSONObj& o) { << ". size in bytes: " << o.objsize() << ", max size: " << BSONObjMaxUserSize); } - uassertStatusOK(coll->insertDocument(_txn, o, true, false)); + + // TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261. + OpDebug* const nullOpDebug = nullptr; + uassertStatusOK(coll->insertDocument(_txn, o, nullOpDebug, true, false)); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insertToInc", _config.incLong); diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index b286aa3f252..5bcfe71e365 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -101,7 +101,8 @@ public: return false; } } - Status status = collection->insertDocument(txn, obj, false); + OpDebug* const nullOpDebug = nullptr; + Status status = collection->insertDocument(txn, obj, nullOpDebug, false); if (status.isOK()) { wunit.commit(); } diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index e20696f3394..7886c225b2a 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -929,7 +929,8 @@ static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult state->getCollection()->ns().ns(), MODE_IX)); WriteUnitOfWork wunit(txn); - Status status = state->getCollection()->insertDocument(txn, insertDoc, true); + Status status = state->getCollection()->insertDocument( + txn, insertDoc, &CurOp::get(txn)->debug(), true); if (status.isOK()) { result->getStats().n++; @@ -1118,7 +1119,7 @@ static void multiUpdate(OperationContext* txn, try { invariant(collection); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug)); + uassertStatusOK(getExecutorUpdate(txn, debug, collection, &parsedUpdate)); uassertStatusOK(exec->executePlan()); @@ -1205,6 +1206,8 @@ static void multiRemove(OperationContext* txn, lastOpSetterGuard.Dismiss(); } + OpDebug* opDebug = &CurOp::get(txn)->debug(); + int attempt = 1; while (1) { try { @@ -1239,7 +1242,7 @@ static void multiRemove(OperationContext* txn, auto collection = autoDb.getDb()->getCollection(nss); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, opDebug, collection, &parsedDelete)); // Execute the delete and retrieve the number deleted. uassertStatusOK(exec->executePlan()); @@ -1250,6 +1253,7 @@ static void multiRemove(OperationContext* txn, if (collection) { collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed); } + CurOp::get(txn)->debug().setPlanSummaryMetrics(summary); if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) { diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp index 7e56675ef3c..ee86ad4be96 100644 --- a/src/mongo/db/commands/write_commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands/write_commands.cpp @@ -178,6 +178,8 @@ Status WriteCmd::explain(OperationContext* txn, // Get a reference to the singleton batch item (it's the 0th item in the batch). BatchItemRef batchItem(&request, 0); + OpDebug* opDebug = &CurOp::get(txn)->debug(); + if (BatchedCommandRequest::BatchType_Update == _writeType) { // Create the update request. UpdateRequest updateRequest(request.getNS()); @@ -192,8 +194,6 @@ Status WriteCmd::explain(OperationContext* txn, // Explained updates can yield. updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO); - OpDebug* debug = &CurOp::get(txn)->debug(); - ParsedUpdate parsedUpdate(txn, &updateRequest); Status parseStatus = parsedUpdate.parseRequest(); if (!parseStatus.isOK()) { @@ -212,7 +212,7 @@ Status WriteCmd::explain(OperationContext* txn, } std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug)); + uassertStatusOK(getExecutorUpdate(txn, opDebug, collection, &parsedUpdate)); // Explain the plan tree. Explain::explainStages(exec.get(), verbosity, out); @@ -248,7 +248,7 @@ Status WriteCmd::explain(OperationContext* txn, } std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, opDebug, collection, &parsedDelete)); // Explain the plan tree. Explain::explainStages(exec.get(), verbosity, out); diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 6b8618b3843..086f0215b82 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -517,7 +517,6 @@ string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockSt OPDEBUG_TOSTRING_HELP_BOOL(hasSortStage); OPDEBUG_TOSTRING_HELP_BOOL(fromMultiPlanner); OPDEBUG_TOSTRING_HELP_BOOL(replanned); - OPDEBUG_TOSTRING_HELP(nmoved); OPDEBUG_TOSTRING_HELP(nMatched); OPDEBUG_TOSTRING_HELP(nModified); OPDEBUG_TOSTRING_HELP(ninserted); @@ -528,6 +527,18 @@ string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockSt OPDEBUG_TOSTRING_HELP_BOOL(cursorExhausted); OPDEBUG_TOSTRING_HELP(keyUpdates); + if (nmoved > 0) { + s << " nmoved:" << nmoved; + } + + if (keysInserted > 0) { + s << " keysInserted:" << keysInserted; + } + + if (keysDeleted > 0) { + s << " keysDeleted:" << keysDeleted; + } + if (writeConflicts > 0) { s << " writeConflicts:" << writeConflicts; } @@ -638,8 +649,6 @@ void OpDebug::append(const CurOp& curop, OPDEBUG_APPEND_BOOL(hasSortStage); OPDEBUG_APPEND_BOOL(fromMultiPlanner); OPDEBUG_APPEND_BOOL(replanned); - OPDEBUG_APPEND_BOOL(moved); - OPDEBUG_APPEND_NUMBER(nmoved); OPDEBUG_APPEND_NUMBER(nMatched); OPDEBUG_APPEND_NUMBER(nModified); OPDEBUG_APPEND_NUMBER(ninserted); @@ -649,6 +658,19 @@ void OpDebug::append(const CurOp& curop, OPDEBUG_APPEND_BOOL(upsert); OPDEBUG_APPEND_BOOL(cursorExhausted); OPDEBUG_APPEND_NUMBER(keyUpdates); + OPDEBUG_APPEND_BOOL(moved); + + if (nmoved > 0) { + b.appendNumber("nmoved", nmoved); + } + + if (keysInserted > 0) { + b.appendNumber("keysInserted", keysInserted); + } + + if (keysDeleted > 0) { + b.appendNumber("keysDeleted", keysDeleted); + } if (writeConflicts > 0) { b.appendNumber("writeConflicts", writeConflicts); diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index 06539aeaacd..9d98191a0b5 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -184,15 +184,20 @@ public: long long nMatched{-1}; // number of records that match the query long long nModified{-1}; // number of records written (no no-ops) - long long nmoved{-1}; // updates resulted in a move (moves are expensive) long long ninserted{-1}; long long ndeleted{-1}; bool fastmod{false}; bool fastmodinsert{false}; // upsert of an $operation. builds a default object bool upsert{false}; // true if the update actually did an insert bool cursorExhausted{ - false}; // true if the cursor has been closed at end a find/getMore operation - int keyUpdates{-1}; + false}; // true if the cursor has been closed at end a find/getMore operation + int keyUpdates{-1}; // TODO SERVER-23272: Remove this metric. + + // The following metrics are initialized with 0 rather than -1 in order to simplify use by the + // CRUD path. + long long nmoved{0}; // updates resulted in a move (moves are expensive) + long long keysInserted{0}; // Number of index keys inserted. + long long keysDeleted{0}; // Number of index keys removed. long long writeConflicts{0}; // New Query Framework debugging/profiling info diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 6242ddaf8cc..84e696aabfd 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -256,7 +256,9 @@ static void logStartup(OperationContext* txn) { collection = db->getCollection(startupLogCollectionName); } invariant(collection); - uassertStatusOK(collection->insertDocument(txn, o, false)); + + OpDebug* const nullOpDebug = nullptr; + uassertStatusOK(collection->insertDocument(txn, o, nullOpDebug, false)); wunit.commit(); } diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index fa7824f0768..b266fa268e9 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -440,7 +440,8 @@ long long Helpers::removeRange(OperationContext* txn, if (callback) callback->goingToDelete(obj); - collection->deleteDocument(txn, rloc, fromMigrate); + OpDebug* const nullOpDebug = nullptr; + collection->deleteDocument(txn, rloc, nullOpDebug, fromMigrate); wuow.commit(); numDeleted++; } diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp index 0e9d7d28fa0..e1e88a3333a 100644 --- a/src/mongo/db/exec/delete.cpp +++ b/src/mongo/db/exec/delete.cpp @@ -34,6 +34,7 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/concurrency/write_conflict_exception.h" +#include "mongo/db/curop.h" #include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set_common.h" #include "mongo/db/exec/write_stage_common.h" @@ -214,7 +215,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { if (!_params.isExplain) { try { WriteUnitOfWork wunit(getOpCtx()); - _collection->deleteDocument(getOpCtx(), recordId, _params.fromMigrate); + _collection->deleteDocument(getOpCtx(), recordId, _params.opDebug, _params.fromMigrate); wunit.commit(); } catch (const WriteConflictException& wce) { memberFreer.Dismiss(); // Keep this member around so we can retry deleting it. diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h index 9a71c597e63..5c2435a2122 100644 --- a/src/mongo/db/exec/delete.h +++ b/src/mongo/db/exec/delete.h @@ -35,6 +35,7 @@ namespace mongo { class CanonicalQuery; +class OpDebug; class OperationContext; class PlanExecutor; @@ -44,7 +45,8 @@ struct DeleteStageParams { fromMigrate(false), isExplain(false), returnDeleted(false), - canonicalQuery(nullptr) {} + canonicalQuery(nullptr), + opDebug(nullptr) {} // Should we delete all documents returned from the child (a "multi delete"), or at most one // (a "single delete")? @@ -65,6 +67,9 @@ struct DeleteStageParams { // The user-requested sort specification. Currently used just for findAndModify. BSONObj sort; + + // Optional. When not null, delete metrics are recorded here. + OpDebug* opDebug; }; /** diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp index 1b52796a5a5..31b7147a90b 100644 --- a/src/mongo/db/exec/update.cpp +++ b/src/mongo/db/exec/update.cpp @@ -740,7 +740,7 @@ void UpdateStage::doInsert() { invariant(_collection); const bool enforceQuota = !request->isGod(); uassertStatusOK(_collection->insertDocument( - getOpCtx(), newObj, enforceQuota, request->isFromMigration())); + getOpCtx(), newObj, _params.opDebug, enforceQuota, request->isFromMigration())); // Technically, we should save/restore state here, but since we are going to return // immediately after, it would just be wasted work. diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h index c39717f9748..5548d0f1192 100644 --- a/src/mongo/db/exec/update.h +++ b/src/mongo/db/exec/update.h @@ -39,6 +39,7 @@ namespace mongo { class OperationContext; +class OpDebug; struct PlanSummaryStats; struct UpdateStageParams { diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index f99d3104c30..b10bc6add71 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -102,8 +102,8 @@ Status IndexAccessMethod::insert(OperationContext* txn, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numInserted) { + invariant(numInserted); *numInserted = 0; - BSONObjSet keys; // Delegate to the subclass. getKeys(obj, &keys); @@ -179,9 +179,10 @@ Status IndexAccessMethod::remove(OperationContext* txn, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numDeleted) { + invariant(numDeleted); + *numDeleted = 0; BSONObjSet keys; getKeys(obj, &keys); - *numDeleted = 0; for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { removeOneKey(txn, *i, loc, options.dupsAllowed); @@ -291,7 +292,14 @@ Status IndexAccessMethod::validateUpdate(OperationContext* txn, Status IndexAccessMethod::update(OperationContext* txn, const UpdateTicket& ticket, - int64_t* numUpdated) { + int64_t* numInserted, + int64_t* numDeleted) { + invariant(numInserted); + invariant(numDeleted); + + *numInserted = 0; + *numDeleted = 0; + if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } @@ -317,7 +325,8 @@ Status IndexAccessMethod::update(OperationContext* txn, } } - *numUpdated = ticket.added.size(); + *numInserted = ticket.added.size(); + *numDeleted = ticket.removed.size(); return Status::OK(); } diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index f0376af716a..1509500d916 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -71,7 +71,7 @@ public: /** * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k -> - * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL, + * 'loc') into the index. 'obj' is the object at the location 'loc'. * 'numInserted' will be set to the number of keys added to the index for the document. If * there is more than one key for 'obj', either all keys will be inserted or none will. * @@ -84,8 +84,8 @@ public: int64_t* numInserted); /** - * Analogous to above, but remove the records instead of inserting them. If not NULL, - * numDeleted will be set to the number of keys removed from the index for the document. + * Analogous to above, but remove the records instead of inserting them. + * 'numDeleted' will be set to the number of keys removed from the index for the document. */ Status remove(OperationContext* txn, const BSONObj& obj, @@ -118,8 +118,14 @@ public: * 'from' will remain. Assumes that the index has not changed since validateUpdate was * called. If the index was changed, we may return an error, as our ticket may have been * invalidated. + * + * 'numInserted' will be set to the number of keys inserted into the index for the document. + * 'numDeleted' will be set to the number of keys removed from the index for the document. */ - Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated); + Status update(OperationContext* txn, + const UpdateTicket& ticket, + int64_t* numInserted, + int64_t* numDeleted); /** * Returns an unpositioned cursor over 'this' index. diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index bce793e9c6c..4ad5616781a 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -719,7 +719,7 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess // The common case: no implicit collection creation if (!upsert || collection != NULL) { unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug())); + uassertStatusOK(getExecutorUpdate(txn, &op.debug(), collection, &parsedUpdate)); // Run the plan and get stats out. uassertStatusOK(exec->executePlan()); @@ -786,7 +786,7 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess auto collection = ctx.db()->getCollection(nsString); invariant(collection); unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug())); + uassertStatusOK(getExecutorUpdate(txn, &op.debug(), collection, &parsedUpdate)); // Run the plan and get stats out. uassertStatusOK(exec->executePlan()); @@ -865,7 +865,7 @@ void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Mess auto collection = ctx.db()->getCollection(nsString); unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, &op.debug(), collection, &parsedDelete)); // Run the plan and get the number of docs deleted. uassertStatusOK(exec->executePlan()); @@ -984,7 +984,7 @@ void insertMultiSingletons(OperationContext* txn, invariant(collection); } - uassertStatusOK(collection->insertDocument(txn, *it, true)); + uassertStatusOK(collection->insertDocument(txn, *it, &op.debug(), true)); wouw.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "insert", ns); @@ -1015,7 +1015,7 @@ void insertMultiVector(OperationContext* txn, invariant(collection); } - uassertStatusOK(collection->insertDocuments(txn, begin, end, true, false)); + uassertStatusOK(collection->insertDocuments(txn, begin, end, &op.debug(), true, false)); wunit.commit(); int inserted = end - begin; diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 1893f38b87e..55ae0dba84d 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -132,7 +132,8 @@ void profile(OperationContext* txn, NetworkOp op) { Collection* const coll = db->getCollection(db->getProfilingNS()); if (coll) { WriteUnitOfWork wuow(txn); - coll->insertDocument(txn, p, false); + OpDebug* const nullOpDebug = nullptr; + coll->insertDocument(txn, p, nullOpDebug, false); wuow.commit(); break; diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp index 870deb66dd5..7f509308ad4 100644 --- a/src/mongo/db/ops/delete.cpp +++ b/src/mongo/db/ops/delete.cpp @@ -66,8 +66,8 @@ long long deleteObjects(OperationContext* txn, auto client = txn->getClient(); auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp(); - std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); + std::unique_ptr<PlanExecutor> exec = uassertStatusOK( + getExecutorDelete(txn, &CurOp::get(txn)->debug(), collection, &parsedDelete)); uassertStatusOK(exec->executePlan()); diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index 010abf6be3b..82a816e5b3c 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -114,7 +114,7 @@ UpdateResult update(OperationContext* txn, uassertStatusOK(parsedUpdate.parseRequest()); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug)); + uassertStatusOK(getExecutorUpdate(txn, opDebug, collection, &parsedUpdate)); uassertStatusOK(exec->executePlan()); if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) { diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index d2c70e1d673..933f60f34f8 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -637,6 +637,7 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn, // StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, + OpDebug* opDebug, Collection* collection, ParsedDelete* parsedDelete) { const DeleteRequest* request = parsedDelete->getRequest(); @@ -672,6 +673,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, deleteStageParams.isExplain = request->isExplain(); deleteStageParams.returnDeleted = request->shouldReturnDeleted(); deleteStageParams.sort = request->getSort(); + deleteStageParams.opDebug = opDebug; unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); PlanExecutor::YieldPolicy policy = @@ -777,9 +779,9 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO } // namespace StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn, + OpDebug* opDebug, Collection* collection, - ParsedUpdate* parsedUpdate, - OpDebug* opDebug) { + ParsedUpdate* parsedUpdate) { const UpdateRequest* request = parsedUpdate->getRequest(); UpdateDriver* driver = parsedUpdate->getDriver(); diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h index 49b83166ac5..34c8cf0f0e8 100644 --- a/src/mongo/db/query/get_executor.h +++ b/src/mongo/db/query/get_executor.h @@ -145,6 +145,7 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn * If the query cannot be executed, returns a Status indicating why. */ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, + OpDebug* opDebug, Collection* collection, ParsedDelete* parsedDelete); @@ -164,9 +165,9 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* tx * If the query cannot be executed, returns a Status indicating why. */ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn, + OpDebug* opDebug, Collection* collection, - ParsedUpdate* parsedUpdate, - OpDebug* opDebug); + ParsedUpdate* parsedUpdate); /** * Get a PlanExecutor for a group operation. diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 94602815666..50f0dc5f039 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -488,8 +488,9 @@ OpTime writeOpsToOplog(OperationContext* txn, const std::vector<BSONObj>& ops) { OldClientContext ctx(txn, rsOplogName, _localDB); WriteUnitOfWork wunit(txn); - checkOplogInsert( - _localOplogCollection->insertDocuments(txn, ops.begin(), ops.end(), false)); + OpDebug* const nullOpDebug = nullptr; + checkOplogInsert(_localOplogCollection->insertDocuments( + txn, ops.begin(), ops.end(), nullOpDebug, false)); lastOptime = fassertStatusOK(ErrorCodes::InvalidBSON, OpTime::parseFromOplogEntry(ops.back())); wunit.commit(); @@ -820,7 +821,9 @@ Status applyOperation_inlock(OperationContext* txn, } WriteUnitOfWork wuow(txn); - status = collection->insertDocuments(txn, insertObjs.begin(), insertObjs.end(), true); + OpDebug* const nullOpDebug = nullptr; + status = collection->insertDocuments( + txn, insertObjs.begin(), insertObjs.end(), nullOpDebug, true); if (!status.isOK()) { return status; } @@ -851,7 +854,8 @@ Status applyOperation_inlock(OperationContext* txn, { WriteUnitOfWork wuow(txn); try { - status = collection->insertDocument(txn, o, true); + OpDebug* const nullOpDebug = nullptr; + status = collection->insertDocument(txn, o, nullOpDebug, true); } catch (DBException dbe) { status = dbe.toStatus(); } diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp index a50570d4160..4f1ba8a5cbe 100644 --- a/src/mongo/db/repl/rs_rollback_test.cpp +++ b/src/mongo/db/repl/rs_rollback_test.cpp @@ -772,9 +772,12 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) { coll = autoDb.getDb()->createCollection(_txn.get(), "test.t"); } ASSERT(coll); - ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), false)); - ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), false)); - ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), false)); + OpDebug* const nullOpDebug = nullptr; + ASSERT_OK( + coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false)); + ASSERT_OK( + coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false)); + ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), nullOpDebug, false)); wuow.commit(); } const auto commonOperation = diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 0c8d5af2264..d151976c6f8 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -946,7 +946,8 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { Collection* const coll = db->getOrCreateCollection(txn, nss.toString()); invariant(coll); - Status status = coll->insertDocument(txn, missingObj, true); + OpDebug* const nullOpDebug = nullptr; + Status status = coll->insertDocument(txn, missingObj, nullOpDebug, true); uassert(15917, str::stream() << "failed to insert missing doc: " << status.toString(), status.isOK()); diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp index 084761c01ea..3ff3ae5ce96 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp @@ -111,13 +111,13 @@ public: return StatusWith<RecordId>(RecordId(6, 4)); } - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { - return StatusWith<RecordId>(oldLocation); + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { + return Status::OK(); } virtual bool updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp index 6f1c48663dd..7fa168612b0 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp @@ -430,12 +430,12 @@ StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* return StatusWith<RecordId>(loc); } -StatusWith<RecordId> EphemeralForTestRecordStore::updateRecord(OperationContext* txn, - const RecordId& loc, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { +Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn, + const RecordId& loc, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { EphemeralForTestRecord* oldRecord = recordFor(loc); int oldLen = oldRecord->size; @@ -447,7 +447,7 @@ StatusWith<RecordId> EphemeralForTestRecordStore::updateRecord(OperationContext* // doc-locking), and therefore must notify that it is updating a document. Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc); if (!callbackStatus.isOK()) { - return StatusWith<RecordId>(callbackStatus); + return callbackStatus; } } @@ -460,7 +460,7 @@ StatusWith<RecordId> EphemeralForTestRecordStore::updateRecord(OperationContext* cappedDeleteAsNeeded(txn); - return StatusWith<RecordId>(loc); + return Status::OK(); } bool EphemeralForTestRecordStore::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h index 324a30653eb..8f83cfe9dfd 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h @@ -69,12 +69,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp index 6a6d1a8e437..bff423434b4 100644 --- a/src/mongo/db/storage/kv/kv_catalog.cpp +++ b/src/mongo/db/storage/kv/kv_catalog.cpp @@ -290,10 +290,8 @@ void KVCatalog::putMetaData(OperationContext* opCtx, } LOG(3) << "recording new metadata: " << obj; - StatusWith<RecordId> status = - _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); - fassert(28521, status.getStatus()); - invariant(status.getValue() == loc); + Status status = _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); + fassert(28521, status.isOK()); } Status KVCatalog::renameCollection(OperationContext* opCtx, @@ -322,10 +320,8 @@ Status KVCatalog::renameCollection(OperationContext* opCtx, b.appendElementsUnique(old); BSONObj obj = b.obj(); - StatusWith<RecordId> status = - _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); - fassert(28522, status.getStatus()); - invariant(status.getValue() == loc); + Status status = _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); + fassert(28522, status.isOK()); } stdx::lock_guard<stdx::mutex> lk(_identsLock); diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp index 278474cabb0..a1c6cf5eb4f 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp @@ -360,10 +360,21 @@ void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationCo RecordData entry = _namespacesRecordStore->dataFor(txn, _namespacesRecordId); const BSONObj newEntry = applyUpdateOperators(entry.releaseToBson(), update); - StatusWith<RecordId> result = _namespacesRecordStore->updateRecord( + + Status result = _namespacesRecordStore->updateRecord( txn, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL); - fassert(17486, result.getStatus()); - setNamespacesRecordId(txn, result.getValue()); + + if (ErrorCodes::NeedsDocumentMove == result) { + StatusWith<RecordId> newLocation = _namespacesRecordStore->insertRecord( + txn, newEntry.objdata(), newEntry.objsize(), false); + fassert(40074, newLocation.getStatus().isOK()); + + _namespacesRecordStore->deleteRecord(txn, _namespacesRecordId); + + setNamespacesRecordId(txn, newLocation.getValue()); + } else { + fassert(17486, result.isOK()); + } } void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) { diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h index a496f1ff31e..4e984a4469d 100644 --- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h +++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h @@ -74,12 +74,12 @@ public: // ------------------------------ - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { invariant(false); } diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp index 0105d10c4df..f5089ef787f 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp @@ -367,49 +367,30 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn, return StatusWith<RecordId>(loc.getValue().toRecordId()); } -StatusWith<RecordId> RecordStoreV1Base::updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int dataSize, - bool enforceQuota, - UpdateNotifier* notifier) { +Status RecordStoreV1Base::updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int dataSize, + bool enforceQuota, + UpdateNotifier* notifier) { MmapV1RecordHeader* oldRecord = recordFor(DiskLoc::fromRecordId(oldLocation)); if (oldRecord->netLength() >= dataSize) { // Make sure to notify other queries before we do an in-place update. if (notifier) { Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation); if (!callbackStatus.isOK()) - return StatusWith<RecordId>(callbackStatus); + return callbackStatus; } // we fit memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize); - return StatusWith<RecordId>(oldLocation); + return Status::OK(); } // We enforce the restriction of unchanging capped doc sizes above the storage layer. invariant(!isCapped()); - // we have to move - if (dataSize + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation) { - return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB"); - } - - StatusWith<RecordId> newLocation = _insertRecord(txn, data, dataSize, enforceQuota); - if (!newLocation.isOK()) - return newLocation; - - // insert worked, so we delete old record - if (notifier) { - Status moveStatus = notifier->recordStoreGoingToMove( - txn, oldLocation, oldRecord->data(), oldRecord->netLength()); - if (!moveStatus.isOK()) - return StatusWith<RecordId>(moveStatus); - } - - deleteRecord(txn, oldLocation); - - return newLocation; + return {ErrorCodes::NeedsDocumentMove, "Update requires document move"}; } bool RecordStoreV1Base::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h index d34c5a9b3f0..4520256f901 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h @@ -199,12 +199,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 2beb8368bf4..64f93e50c62 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -76,10 +76,6 @@ public: class UpdateNotifier { public: virtual ~UpdateNotifier() {} - virtual Status recordStoreGoingToMove(OperationContext* txn, - const RecordId& oldLocation, - const char* oldBuffer, - size_t oldSize) = 0; virtual Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) = 0; }; @@ -386,22 +382,23 @@ public: } /** - * @param notifier - Only used by record stores which do not support doc-locking. - * In the case of a document move, this is called after the document - * has been written to the new location, but before it is deleted from - * the old location. - * In the case of an in-place update, this is called just before the - * in-place write occurs. - * @return Status or RecordId, RecordId might be different + * @param notifier - Only used by record stores which do not support doc-locking. Called only + * in the case of an in-place update. Called just before the in-place write + * occurs. + * @return Status - If a document move is required (MMAPv1 only) then a status of + * ErrorCodes::NeedsDocumentMove will be returned. On receipt of this status + * no update will be performed. It is the caller's responsibility to: + * 1. Remove the existing document and associated index keys. + * 2. Insert a new document and index keys. * * For capped record stores, the record size will never change. */ - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) = 0; + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) = 0; /** * @return Returns 'false' if this record store does not implement diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index 95fd4c993b0..8fc234ab9cd 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -257,10 +257,23 @@ TEST(RecordStoreTestHarness, Update1) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); - StatusWith<RecordId> res = + Status status = rs->updateRecord(opCtx.get(), loc, s2.c_str(), s2.size() + 1, false, NULL); - ASSERT_OK(res.getStatus()); - loc = res.getValue(); + + if (ErrorCodes::NeedsDocumentMove == status) { + // NeedsDocumentMove should only be possible under MMAPv1. We don't have the means + // to check storageEngine here but asserting 'supportsDocLocking()' is false + // provides an equivalent check as only MMAPv1 will/should return false. + ASSERT_FALSE(harnessHelper->supportsDocLocking()); + StatusWith<RecordId> newLocation = + rs->insertRecord(opCtx.get(), s2.c_str(), s2.size() + 1, false); + ASSERT_OK(newLocation.getStatus()); + rs->deleteRecord(opCtx.get(), loc); + loc = newLocation.getValue(); + } else { + ASSERT_OK(status); + } + uow.commit(); } } diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp index 0d7c9433503..cd27acf9c69 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.cpp +++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp @@ -77,10 +77,19 @@ TEST(RecordStoreTestHarness, UpdateRecord) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); - StatusWith<RecordId> res = + Status res = rs->updateRecord(opCtx.get(), loc, data.c_str(), data.size() + 1, false, NULL); - ASSERT_OK(res.getStatus()); - loc = res.getValue(); + + if (ErrorCodes::NeedsDocumentMove == res) { + StatusWith<RecordId> newLocation = + rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); + ASSERT_OK(newLocation.getStatus()); + rs->deleteRecord(opCtx.get(), loc); + loc = newLocation.getValue(); + } else { + ASSERT_OK(res); + } + uow.commit(); } } @@ -136,10 +145,19 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); - StatusWith<RecordId> res = + Status res = rs->updateRecord(opCtx.get(), locs[i], data.c_str(), data.size() + 1, false, NULL); - ASSERT_OK(res.getStatus()); - locs[i] = res.getValue(); + + if (ErrorCodes::NeedsDocumentMove == res) { + StatusWith<RecordId> newLocation = + rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); + ASSERT_OK(newLocation.getStatus()); + rs->deleteRecord(opCtx.get(), locs[i]); + locs[i] = newLocation.getValue(); + } else { + ASSERT_OK(res); + } + uow.commit(); } } @@ -194,21 +212,21 @@ TEST(RecordStoreTestHarness, UpdateRecordWithMoveNotifier) { UpdateNotifierSpy umn(opCtx.get(), loc, oldData.c_str(), oldData.size()); WriteUnitOfWork uow(opCtx.get()); - StatusWith<RecordId> res = rs->updateRecord( + Status res = rs->updateRecord( opCtx.get(), loc, newData.c_str(), newData.size() + 1, false, &umn); - ASSERT_OK(res.getStatus()); - // UpdateNotifier::recordStoreGoingToMove() called only if - // the RecordId for the record changes - if (loc == res.getValue()) { - ASSERT_EQUALS(0, umn.numMoveCallbacks()); - // Only MMAP v1 is required to use the UpdateNotifier for in-place updates, - // so the number of callbacks is expected to be 0 for non-MMAP storage engines. - ASSERT_GTE(1, umn.numInPlaceCallbacks()); - } else { - ASSERT_EQUALS(1, umn.numMoveCallbacks()); + + if (ErrorCodes::NeedsDocumentMove == res) { + StatusWith<RecordId> newLocation = + rs->insertRecord(opCtx.get(), newData.c_str(), newData.size() + 1, false); + ASSERT_OK(newLocation.getStatus()); + rs->deleteRecord(opCtx.get(), loc); + loc = newLocation.getValue(); ASSERT_EQUALS(0, umn.numInPlaceCallbacks()); + } else { + ASSERT_OK(res); + ASSERT_GTE(1, umn.numInPlaceCallbacks()); } - loc = res.getValue(); + uow.commit(); } } diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h index f82feb6b592..be52887cf2b 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.h +++ b/src/mongo/db/storage/record_store_test_updaterecord.h @@ -41,21 +41,10 @@ namespace { class UpdateNotifierSpy : public UpdateNotifier { public: UpdateNotifierSpy(OperationContext* txn, const RecordId& loc, const char* buf, size_t size) - : _txn(txn), _loc(loc), _data(buf, size), nMoveCalls(0), nInPlaceCalls(0) {} + : _txn(txn), _loc(loc), _data(buf, size), nInPlaceCalls(0) {} ~UpdateNotifierSpy() {} - Status recordStoreGoingToMove(OperationContext* txn, - const RecordId& oldLocation, - const char* oldBuffer, - size_t oldSize) { - nMoveCalls++; - ASSERT_EQUALS(_txn, txn); - ASSERT_EQUALS(_loc, oldLocation); - ASSERT_EQUALS(_data, oldBuffer); - return Status::OK(); - } - Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) { nInPlaceCalls++; ASSERT_EQUALS(_txn, txn); @@ -63,10 +52,6 @@ public: return Status::OK(); } - int numMoveCallbacks() const { - return nMoveCalls; - } - int numInPlaceCallbacks() const { return nInPlaceCalls; } @@ -77,7 +62,6 @@ private: std::string _data; // To verify the number of callbacks to the notifier. - int nMoveCalls; int nInPlaceCalls; }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index f2ebc977f34..636f2d86ffd 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -1367,12 +1367,12 @@ StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn, return insertRecord(txn, buf.get(), len, enforceQuota); } -StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, - const RecordId& id, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { +Status WiredTigerRecordStore::updateRecord(OperationContext* txn, + const RecordId& id, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { WiredTigerCursor curwrap(_uri, _tableId, true, txn); curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); @@ -1402,7 +1402,7 @@ StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, cappedDeleteAsNeeded(txn, id); } - return StatusWith<RecordId>(id); + return Status::OK(); } bool WiredTigerRecordStore::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index 3335e774c4c..9e7cc01f276 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -131,12 +131,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual StatusWith<RecordId> updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual Status updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index 7dcc4033f70..e09ccdf3e65 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -239,8 +239,8 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { rs->dataFor(t1.get(), id1); rs->dataFor(t2.get(), id1); - ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); - ASSERT_OK(rs->updateRecord(t1.get(), id2, "B", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL)); + ASSERT_OK(rs->updateRecord(t1.get(), id2, "B", 2, false, NULL)); try { // this should fail @@ -289,7 +289,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { { WriteUnitOfWork w(t1.get()); - ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL)); w.commit(); } diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp index f6f0f48c858..2e28621b833 100644 --- a/src/mongo/dbtests/counttests.cpp +++ b/src/mongo/dbtests/counttests.cpp @@ -90,6 +90,7 @@ protected: void insert(const char* s) { WriteUnitOfWork wunit(&_txn); const BSONObj o = fromjson(s); + OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; @@ -97,9 +98,9 @@ protected: oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), false); + _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); } else { - _collection->insertDocument(&_txn, o, false); + _collection->insertDocument(&_txn, o, nullOpDebug, false); } wunit.commit(); } diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index e81c45bdd94..3b3c3531ec9 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -351,13 +351,16 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); + OpDebug* const nullOpDebug = nullptr; coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), + nullOpDebug, true); coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), + nullOpDebug, true); wunit.commit(); } @@ -394,13 +397,16 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); + OpDebug* const nullOpDebug = nullptr; coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), + nullOpDebug, true); coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), + nullOpDebug, true); wunit.commit(); } @@ -436,13 +442,16 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); + OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), + nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), + nullOpDebug, true)); wunit.commit(); } @@ -488,8 +497,9 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents with enforceQuota=true. int32_t nDocs = 1000; + OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), true); + coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); } wunit.commit(); } @@ -520,8 +530,9 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; + OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), true); + coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); } wunit.commit(); } @@ -555,8 +566,9 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; + OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), true); + coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); } wunit.commit(); } @@ -590,8 +602,9 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; + OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), true); + coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); } wunit.commit(); } diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp index 943f0a124fd..311e5a5e20d 100644 --- a/src/mongo/dbtests/pdfiletests.cpp +++ b/src/mongo/dbtests/pdfiletests.cpp @@ -76,13 +76,14 @@ public: BSONObj x = BSON("x" << 1); ASSERT(x["_id"].type() == 0); Collection* collection = _context.db()->getOrCreateCollection(&_txn, ns()); - ASSERT(!collection->insertDocument(&_txn, x, true).isOK()); + OpDebug* const nullOpDebug = nullptr; + ASSERT(!collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); StatusWith<BSONObj> fixed = fixDocumentForInsert(x); ASSERT(fixed.isOK()); x = fixed.getValue(); ASSERT(x["_id"].type() == jstOID); - ASSERT(collection->insertDocument(&_txn, x, true).isOK()); + ASSERT(collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); wunit.commit(); } }; diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index be91a55fdf1..865ee18dacc 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -93,7 +93,8 @@ public: WriteUnitOfWork wuow(&_txn); const bool enforceQuota = false; - ASSERT_OK(collection->insertDocument(&_txn, obj, enforceQuota)); + OpDebug* const nullOpDebug = nullptr; + ASSERT_OK(collection->insertDocument(&_txn, obj, nullOpDebug, enforceQuota)); wuow.commit(); } diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index de3d484dbaa..f325004216f 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -105,13 +105,15 @@ public: void insert(const BSONObj& doc) { WriteUnitOfWork wunit(&_txn); - _coll->insertDocument(&_txn, doc, false); + OpDebug* const nullOpDebug = nullptr; + _coll->insertDocument(&_txn, doc, nullOpDebug, false); wunit.commit(); } void remove(const RecordId& recordId) { WriteUnitOfWork wunit(&_txn); - _coll->deleteDocument(&_txn, recordId); + OpDebug* const nullOpDebug = nullptr; + _coll->deleteDocument(&_txn, recordId, nullOpDebug); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index 12e8f33fb3c..8b619d9dfce 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -64,7 +64,8 @@ public: void insert(const BSONObj& doc) { WriteUnitOfWork wunit(&_txn); - ASSERT_OK(_coll->insertDocument(&_txn, doc, false)); + OpDebug* const nullOpDebug = nullptr; + ASSERT_OK(_coll->insertDocument(&_txn, doc, nullOpDebug, false)); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index 676da68abcd..fabf10ba05b 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -459,10 +459,11 @@ public: // We should have read in the first 'firstRead' recordIds. Invalidate the first. exec->saveState(); + OpDebug* const nullOpDebug = nullptr; set<RecordId>::iterator it = recordIds.begin(); { WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++); + coll->deleteDocument(&_txn, *it++, nullOpDebug); wuow.commit(); } exec->restoreState(); @@ -478,7 +479,7 @@ public: while (it != recordIds.end()) { { WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++); + coll->deleteDocument(&_txn, *it++, nullOpDebug); wuow.commit(); } } diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 49c11d6dd44..8cf6d28d1a5 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -99,15 +99,16 @@ protected: void insert(const BSONObj& o) { WriteUnitOfWork wunit(&_txn); + OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), false); + _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); } else { - _collection->insertDocument(&_txn, o, false); + _collection->insertDocument(&_txn, o, nullOpDebug, false); } wunit.commit(); } diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 0becd7c9fba..ec092669747 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -249,9 +249,10 @@ protected: coll = db->createCollection(&_txn, ns()); } + OpDebug* const nullOpDebug = nullptr; if (o.hasField("_id")) { _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, o, true); + coll->insertDocument(&_txn, o, nullOpDebug, true); _txn.setReplicatedWrites(true); wunit.commit(); return; @@ -263,7 +264,7 @@ protected: b.appendOID("_id", &id); b.appendElements(o); _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, b.obj(), true); + coll->insertDocument(&_txn, b.obj(), nullOpDebug, true); _txn.setReplicatedWrites(true); wunit.commit(); } diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index 41405295e14..f437cb68c64 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -88,7 +88,8 @@ Status truncateCollection(OperationContext* txn, const NamespaceString& nss) { void insertRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - ASSERT_OK(coll->insertDocument(txn, data, false)); + OpDebug* const nullOpDebug = nullptr; + ASSERT_OK(coll->insertDocument(txn, data, nullOpDebug, false)); } void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index fe82b1c5589..308c81482d1 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -106,13 +106,14 @@ public: Collection* coll; RecordId id1; { + OpDebug* const nullOpDebug = nullptr; WriteUnitOfWork wunit(&_txn); ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true)); wunit.commit(); } @@ -155,12 +156,13 @@ public: Collection* coll; RecordId id1; { + OpDebug* const nullOpDebug = nullptr; WriteUnitOfWork wunit(&_txn); ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); wunit.commit(); } @@ -208,16 +210,17 @@ public: void run() { // Create a new collection, insert three records. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; { WriteUnitOfWork wunit(&_txn); ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 3), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 3), nullOpDebug, true)); wunit.commit(); } @@ -239,11 +242,10 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 1 << "a" << 9); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); - // Assert the RecordId has not changed after an in-place update. - ASSERT_EQ(id1, statusW.getValue()); + + ASSERT_OK(updateStatus); wunit.commit(); } @@ -258,6 +260,7 @@ public: void run() { // Create a new collection, insert records {_id: 1} and {_id: 2} and check it's valid. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; { @@ -265,9 +268,9 @@ public: ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true)); wunit.commit(); } @@ -280,11 +283,9 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 9); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); - // Assert the RecordId has not changed after an in-place update. - ASSERT_EQ(id1, statusW.getValue()); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -294,10 +295,9 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 1); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); - id1 = statusW.getValue(); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -326,6 +326,7 @@ public: void run() { // Create a new collection, insert three records and check it's valid. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; // {a: [b: 1, c: 2]}, {a: [b: 2, c: 2]}, {a: [b: 1, c: 1]} @@ -343,10 +344,10 @@ public: coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, doc1, true)); + ASSERT_OK(coll->insertDocument(&_txn, doc1, nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, doc2, true)); - ASSERT_OK(coll->insertDocument(&_txn, doc3, true)); + ASSERT_OK(coll->insertDocument(&_txn, doc2, nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, doc3, nullOpDebug, true)); wunit.commit(); } @@ -369,10 +370,9 @@ public: // Update a document's indexed field without updating the index. { WriteUnitOfWork wunit(&_txn); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc1_b.objdata(), doc1_b.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); - id1 = statusW.getValue(); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -382,9 +382,9 @@ public: // Index validation should still be valid. { WriteUnitOfWork wunit(&_txn); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc1_c.objdata(), doc1_c.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -399,6 +399,7 @@ public: void run() { // Create a new collection, insert three records and check it's valid. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; { @@ -406,10 +407,10 @@ public: ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "b" << 1), nullOpDebug, true)); wunit.commit(); } @@ -431,9 +432,9 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 2 << "a" << 3); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -448,6 +449,7 @@ public: void run() { // Create a new collection, insert two records and check it's valid. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; { @@ -455,9 +457,9 @@ public: ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); wunit.commit(); } @@ -480,9 +482,9 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 1); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); + ASSERT_OK(updateStatus); wunit.commit(); } @@ -497,6 +499,7 @@ public: void run() { // Create a new collection, insert five records and check it's valid. Database* db = _ctx.db(); + OpDebug* const nullOpDebug = nullptr; Collection* coll; RecordId id1; { @@ -504,12 +507,14 @@ public: ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1 << "b" << 4), true)); + ASSERT_OK(coll->insertDocument( + &_txn, BSON("_id" << 1 << "a" << 1 << "b" << 4), nullOpDebug, true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2 << "b" << 5), true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "a" << 3), true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 4 << "b" << 6), true)); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 5 << "c" << 7), true)); + ASSERT_OK(coll->insertDocument( + &_txn, BSON("_id" << 2 << "a" << 2 << "b" << 5), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 3 << "a" << 3), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 4 << "b" << 6), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 5 << "c" << 7), nullOpDebug, true)); wunit.commit(); } @@ -541,9 +546,9 @@ public: { WriteUnitOfWork wunit(&_txn); auto doc = BSON("_id" << 1 << "a" << 1 << "b" << 3); - auto statusW = rs->updateRecord( + auto updateStatus = rs->updateRecord( &_txn, id1, doc.objdata(), doc.objsize(), /*enforceQuota*/ false, NULL); - ASSERT_OK(statusW.getStatus()); + ASSERT_OK(updateStatus); wunit.commit(); } |