diff options
author | Benety Goh <benety@mongodb.com> | 2016-04-21 14:31:09 -0400 |
---|---|---|
committer | Benety Goh <benety@mongodb.com> | 2016-04-21 14:31:09 -0400 |
commit | 0bd67d53681e4826a44f2b45b136f50cd36de44b (patch) | |
tree | 6de928d1480421d36774bc1061fde8942c80f3f1 /src/mongo | |
parent | 42415534834c82518dfdb4c75c3b22b75edb5eff (diff) | |
download | mongo-0bd67d53681e4826a44f2b45b136f50cd36de44b.tar.gz |
Revert "SERVER-23271 Add keysInserted and keysDeleted metrics for CRUD ops"
This reverts commit 6bbaee174447ee1c9177c72bdd07f050ab07e901.
Diffstat (limited to 'src/mongo')
57 files changed, 306 insertions, 515 deletions
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err index 67d0f4af107..8c5eb8349e3 100644 --- a/src/mongo/base/error_codes.err +++ b/src/mongo/base/error_codes.err @@ -154,7 +154,6 @@ error_code("OplogOutOfOrder", 152) error_code("ChunkTooBig", 153) error_code("InconsistentShardIdentity", 154) error_code("CannotApplyOplogWhilePrimary", 155) -error_code("NeedsDocumentMove", 156) # Non-sequential error codes (for compatibility only) error_code("SocketException", 9001) diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 2883cb26439..b11a17dd1cd 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -182,9 +182,7 @@ Status cloneCollectionAsCapped(OperationContext* txn, } WriteUnitOfWork wunit(txn); - OpDebug* const nullOpDebug = nullptr; - toCollection->insertDocument( - txn, objToClone.value(), nullOpDebug, true, txn->writesAreReplicated()); + toCollection->insertDocument(txn, objToClone.value(), true, txn->writesAreReplicated()); wunit.commit(); // Go to the next document diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index bfb250608b1..2523a4779be 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -36,6 +36,7 @@ #include "mongo/db/catalog/collection.h" + #include "mongo/base/counter.h" #include "mongo/base/owned_pointer_map.h" #include "mongo/bson/ordering.h" @@ -332,7 +333,6 @@ Status Collection::insertDocument(OperationContext* txn, const DocWriter* doc, b Status Collection::insertDocuments(OperationContext* txn, const vector<BSONObj>::const_iterator begin, const vector<BSONObj>::const_iterator end, - OpDebug* opDebug, bool enforceQuota, bool fromMigrate) { // Should really be done in the collection object at creation and updated on index create. @@ -355,7 +355,7 @@ Status Collection::insertDocuments(OperationContext* txn, if (_mustTakeCappedLockOnInsert) synchronizeOnCappedInFlightResource(txn->lockState(), _ns); - Status status = _insertDocuments(txn, begin, end, enforceQuota, opDebug); + Status status = _insertDocuments(txn, begin, end, enforceQuota); if (!status.isOK()) return status; invariant(sid == txn->recoveryUnit()->getSnapshotId()); @@ -371,12 +371,11 @@ Status Collection::insertDocuments(OperationContext* txn, Status Collection::insertDocument(OperationContext* txn, const BSONObj& docToInsert, - OpDebug* opDebug, bool enforceQuota, bool fromMigrate) { vector<BSONObj> docs; docs.push_back(docToInsert); - return insertDocuments(txn, docs.begin(), docs.end(), opDebug, enforceQuota, fromMigrate); + return insertDocuments(txn, docs.begin(), docs.end(), enforceQuota, fromMigrate); } Status Collection::insertDocument(OperationContext* txn, @@ -419,8 +418,7 @@ Status Collection::insertDocument(OperationContext* txn, Status Collection::_insertDocuments(OperationContext* txn, const vector<BSONObj>::const_iterator begin, const vector<BSONObj>::const_iterator end, - bool enforceQuota, - OpDebug* opDebug) { + bool enforceQuota) { dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX)); if (isCapped() && _indexCatalog.haveAnyIndexes() && std::distance(begin, end) > 1) { @@ -459,13 +457,7 @@ Status Collection::_insertDocuments(OperationContext* txn, bsonRecords.push_back(bsonRecord); } - int64_t keysInserted; - status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted); - if (opDebug) { - opDebug->keysInserted += keysInserted; - } - - return status; + return _indexCatalog.indexRecords(txn, bsonRecords); } void Collection::notifyCappedWaitersIfNeeded() { @@ -483,21 +475,13 @@ Status Collection::aboutToDeleteCapped(OperationContext* txn, _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION); BSONObj doc = data.releaseToBson(); - int64_t* const nullKeysDeleted = nullptr; - _indexCatalog.unindexRecord(txn, doc, loc, false, nullKeysDeleted); - - // We are not capturing and reporting to OpDebug the 'keysDeleted' by unindexRecord(). It is - // questionable whether reporting will add diagnostic value to users and may instead be - // confusing as it depends on our internal capped collection document removal strategy. - // We can consider adding either keysDeleted or a new metric reporting document removal if - // justified by user demand. + _indexCatalog.unindexRecord(txn, doc, loc, false); return Status::OK(); } void Collection::deleteDocument(OperationContext* txn, const RecordId& loc, - OpDebug* opDebug, bool fromMigrate, bool noWarn) { if (isCapped()) { @@ -516,11 +500,7 @@ void Collection::deleteDocument(OperationContext* txn, /* check if any cursors point to us. if so, advance them. */ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION); - int64_t keysDeleted; - _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn, &keysDeleted); - if (opDebug) { - opDebug->keysDeleted += keysDeleted; - } + _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn); _recordStore->deleteRecord(txn, loc); @@ -537,7 +517,7 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, const BSONObj& newDoc, bool enforceQuota, bool indexesAffected, - OpDebug* opDebug, + OpDebug* debug, OplogUpdateEntryArgs* args) { { auto status = checkValidation(txn, newDoc); @@ -617,19 +597,45 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, } } - Status updateStatus = _recordStore->updateRecord( + // This can call back into Collection::recordStoreGoingToMove. If that happens, the old + // object is removed from all indexes. + StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this); - if (updateStatus == ErrorCodes::NeedsDocumentMove) { - return _updateDocumentWithMove( - txn, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid); - } else if (!updateStatus.isOK()) { - return updateStatus; + if (!newLocation.isOK()) { + return newLocation; + } + + // At this point, the old object may or may not still be indexed, depending on if it was + // moved. If the object did move, we need to add the new location to all indexes. + if (newLocation.getValue() != oldLocation) { + if (debug) { + if (debug->nmoved == -1) // default of -1 rather than 0 + debug->nmoved = 1; + else + debug->nmoved += 1; + } + + std::vector<BsonRecord> bsonRecords; + BsonRecord bsonRecord = {newLocation.getValue(), &newDoc}; + bsonRecords.push_back(bsonRecord); + Status s = _indexCatalog.indexRecords(txn, bsonRecords); + if (!s.isOK()) + return StatusWith<RecordId>(s); + invariant(sid == txn->recoveryUnit()->getSnapshotId()); + args->updatedDoc = newDoc; + + auto opObserver = getGlobalServiceContext()->getOpObserver(); + if (opObserver) + opObserver->onUpdate(txn, *args); + + return newLocation; } // Object did not move. We update each index with each respective UpdateTicket. - if (opDebug) - opDebug->keyUpdates = 0; + + if (debug) + debug->keyUpdates = 0; if (indexesAffected) { IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true); @@ -637,17 +643,12 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, IndexDescriptor* descriptor = ii.next(); IndexAccessMethod* iam = ii.accessMethod(descriptor); - int64_t keysInserted; - int64_t keysDeleted; - Status ret = iam->update( - txn, *updateTickets.mutableMap()[descriptor], &keysInserted, &keysDeleted); + int64_t updatedKeys; + Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys); if (!ret.isOK()) return StatusWith<RecordId>(ret); - if (opDebug) { - opDebug->keyUpdates += keysInserted; - opDebug->keysInserted += keysInserted; - opDebug->keysDeleted += keysDeleted; - } + if (debug) + debug->keyUpdates += updatedKeys; } } @@ -658,58 +659,17 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, if (opObserver) opObserver->onUpdate(txn, *args); - return {oldLocation}; + return newLocation; } -StatusWith<RecordId> Collection::_updateDocumentWithMove(OperationContext* txn, - const RecordId& oldLocation, - const Snapshotted<BSONObj>& oldDoc, - const BSONObj& newDoc, - bool enforceQuota, - OpDebug* opDebug, - OplogUpdateEntryArgs* args, - const SnapshotId& sid) { - // Insert new record. - StatusWith<RecordId> newLocation = _recordStore->insertRecord( - txn, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota)); - if (!newLocation.isOK()) { - return newLocation; - } - - invariant(newLocation.getValue() != oldLocation); - - _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION); - - // Remove indexes for old record. - int64_t keysDeleted; - _indexCatalog.unindexRecord(txn, oldDoc.value(), oldLocation, true, &keysDeleted); - - // Remove old record. - _recordStore->deleteRecord(txn, oldLocation); - - std::vector<BsonRecord> bsonRecords; - BsonRecord bsonRecord = {newLocation.getValue(), &newDoc}; - bsonRecords.push_back(bsonRecord); - - // Add indexes for new record. - int64_t keysInserted; - Status status = _indexCatalog.indexRecords(txn, bsonRecords, &keysInserted); - if (!status.isOK()) { - return StatusWith<RecordId>(status); - } - - invariant(sid == txn->recoveryUnit()->getSnapshotId()); - args->updatedDoc = newDoc; - txn->getServiceContext()->getOpObserver()->onUpdate(txn, *args); - +Status Collection::recordStoreGoingToMove(OperationContext* txn, + const RecordId& oldLocation, + const char* oldBuffer, + size_t oldSize) { moveCounter.increment(); - if (opDebug) { - opDebug->nmoved++; - opDebug->keysInserted += keysInserted; - opDebug->keysDeleted += keysDeleted; - } - - return newLocation; + _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION); + _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true); + return Status::OK(); } Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) { diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index e1c68377b10..cb7417c4871 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -250,14 +250,12 @@ public: * so should be ignored by the user as an internal maintenance operation and not a * real delete. * 'loc' key to uniquely identify a record in a collection. - * 'opDebug' Optional argument. When not null, will be used to record operation statistics. * 'cappedOK' if true, allows deletes on capped collections (Cloner::copyDB uses this). * 'noWarn' if unindexing the record causes an error, if noWarn is true the error * will not be logged. */ void deleteDocument(OperationContext* txn, const RecordId& loc, - OpDebug* opDebug, bool fromMigrate = false, bool noWarn = false); @@ -265,13 +263,10 @@ public: * Inserts all documents inside one WUOW. * Caller should ensure vector is appropriately sized for this. * If any errors occur (including WCE), caller should retry documents individually. - * - * 'opDebug' Optional argument. When not null, will be used to record operation statistics. */ Status insertDocuments(OperationContext* txn, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end, - OpDebug* opDebug, bool enforceQuota, bool fromMigrate = false); @@ -279,12 +274,10 @@ public: * this does NOT modify the doc before inserting * i.e. will not add an _id field for documents that are missing it * - * 'opDebug' Optional argument. When not null, will be used to record operation statistics. - * 'enforceQuota' If false, quotas will be ignored. + * If enforceQuota is false, quotas will be ignored. */ Status insertDocument(OperationContext* txn, const BSONObj& doc, - OpDebug* opDebug, bool enforceQuota, bool fromMigrate = false); @@ -305,7 +298,6 @@ public: * If the document fits in the old space, it is put there; if not, it is moved. * Sets 'args.updatedDoc' to the updated version of the document with damages applied, on * success. - * 'opDebug' Optional argument. When not null, will be used to record operation statistics. * @return the post update location of the doc (may or may not be the same as oldLocation) */ StatusWith<RecordId> updateDocument(OperationContext* txn, @@ -314,7 +306,7 @@ public: const BSONObj& newDoc, bool enforceQuota, bool indexesAffected, - OpDebug* opDebug, + OpDebug* debug, OplogUpdateEntryArgs* args); bool updateWithDamagesSupported() const; @@ -445,6 +437,11 @@ private: */ StatusWithMatchExpression parseValidator(const BSONObj& validator) const; + Status recordStoreGoingToMove(OperationContext* txn, + const RecordId& oldLocation, + const char* oldBuffer, + size_t oldSize); + Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc); Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data); @@ -459,21 +456,7 @@ private: Status _insertDocuments(OperationContext* txn, std::vector<BSONObj>::const_iterator begin, std::vector<BSONObj>::const_iterator end, - bool enforceQuota, - OpDebug* opDebug); - - - /** - * Perform update when document move will be required. - */ - StatusWith<RecordId> _updateDocumentWithMove(OperationContext* txn, - const RecordId& oldLocation, - const Snapshotted<BSONObj>& oldDoc, - const BSONObj& newDoc, - bool enforceQuota, - OpDebug* opDebug, - OplogUpdateEntryArgs* args, - const SnapshotId& sid); + bool enforceQuota); bool _enforceQuota(bool userEnforeQuota) const; diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index a64f43284ca..eb0dfa8a1b9 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -1135,8 +1135,7 @@ bool isDupsAllowed(IndexDescriptor* desc) { Status IndexCatalog::_indexFilteredRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut) { + const std::vector<BsonRecord>& bsonRecords) { InsertDeleteOptions options; options.logIfError = false; options.dupsAllowed = isDupsAllowed(index->descriptor()); @@ -1148,21 +1147,16 @@ Status IndexCatalog::_indexFilteredRecords(OperationContext* txn, txn, *bsonRecord.docPtr, bsonRecord.id, options, &inserted); if (!status.isOK()) return status; - - if (keysInsertedOut) { - *keysInsertedOut += inserted; - } } return Status::OK(); } Status IndexCatalog::_indexRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut) { + const std::vector<BsonRecord>& bsonRecords) { const MatchExpression* filter = index->getFilterExpression(); if (!filter) - return _indexFilteredRecords(txn, index, bsonRecords, keysInsertedOut); + return _indexFilteredRecords(txn, index, bsonRecords); std::vector<BsonRecord> filteredBsonRecords; for (auto bsonRecord : bsonRecords) { @@ -1170,15 +1164,14 @@ Status IndexCatalog::_indexRecords(OperationContext* txn, filteredBsonRecords.push_back(bsonRecord); } - return _indexFilteredRecords(txn, index, filteredBsonRecords, keysInsertedOut); + return _indexFilteredRecords(txn, index, filteredBsonRecords); } Status IndexCatalog::_unindexRecord(OperationContext* txn, IndexCatalogEntry* index, const BSONObj& obj, const RecordId& loc, - bool logIfError, - int64_t* keysDeletedOut) { + bool logIfError) { InsertDeleteOptions options; options.logIfError = logIfError; options.dupsAllowed = isDupsAllowed(index->descriptor()); @@ -1196,24 +1189,15 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn, << _collection->ns() << ". Status: " << status.toString(); } - if (keysDeletedOut) { - *keysDeletedOut += removed; - } - return Status::OK(); } Status IndexCatalog::indexRecords(OperationContext* txn, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut) { - if (keysInsertedOut) { - *keysInsertedOut = 0; - } - + const std::vector<BsonRecord>& bsonRecords) { for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end(); ++i) { - Status s = _indexRecords(txn, *i, bsonRecords, keysInsertedOut); + Status s = _indexRecords(txn, *i, bsonRecords); if (!s.isOK()) return s; } @@ -1224,19 +1208,14 @@ Status IndexCatalog::indexRecords(OperationContext* txn, void IndexCatalog::unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, - bool noWarn, - int64_t* keysDeletedOut) { - if (keysDeletedOut) { - *keysDeletedOut = 0; - } - + bool noWarn) { for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end(); ++i) { IndexCatalogEntry* entry = *i; // If it's a background index, we DO NOT want to log anything. bool logIfError = entry->isReady(txn) ? !noWarn : false; - _unindexRecord(txn, entry, obj, loc, logIfError, keysDeletedOut); + _unindexRecord(txn, entry, obj, loc, logIfError); } } diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index 2ccfef9a1d8..022aba664fa 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -263,25 +263,10 @@ public: // ----- data modifiers ------ - /** - * When 'keysInsertedOut' is not null, it will be set to the number of index keys inserted by - * this operation. - * - * This method may throw. - */ - Status indexRecords(OperationContext* txn, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut); + // this throws for now + Status indexRecords(OperationContext* txn, const std::vector<BsonRecord>& bsonRecords); - /** - * When 'keysDeletedOut' is not null, it will be set to the number of index keys removed by - * this operation. - */ - void unindexRecord(OperationContext* txn, - const BSONObj& obj, - const RecordId& loc, - bool noWarn, - int64_t* keysDeletedOut); + void unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, bool noWarn); // ------- temp internal ------- @@ -312,20 +297,17 @@ private: Status _indexFilteredRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut); + const std::vector<BsonRecord>& bsonRecords); Status _indexRecords(OperationContext* txn, IndexCatalogEntry* index, - const std::vector<BsonRecord>& bsonRecords, - int64_t* keysInsertedOut); + const std::vector<BsonRecord>& bsonRecords); Status _unindexRecord(OperationContext* txn, IndexCatalogEntry* index, const BSONObj& obj, const RecordId& loc, - bool logIfError, - int64_t* keysDeletedOut); + bool logIfError); /** * this does no sanity checks diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index cc09104ab10..e7789c2ac92 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -217,8 +217,7 @@ struct Cloner::Fun { WriteUnitOfWork wunit(txn); BSONObj doc = tmp; - OpDebug* const nullOpDebug = nullptr; - Status status = collection->insertDocument(txn, doc, nullOpDebug, true); + Status status = collection->insertDocument(txn, doc, true); if (!status.isOK()) { error() << "error: exception cloning object in " << from_collection << ' ' << status << " obj:" << doc; @@ -611,8 +610,7 @@ Status Cloner::copyDb(OperationContext* txn, // dupsAllowed in IndexCatalog::_unindexRecord and SERVER-17487. for (set<RecordId>::const_iterator it = dups.begin(); it != dups.end(); ++it) { WriteUnitOfWork wunit(txn); - OpDebug* const nullOpDebug = nullptr; - c->deleteDocument(txn, *it, nullOpDebug, false, true); + c->deleteDocument(txn, *it, false, true); wunit.commit(); } diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 7d89e39e548..17f4b3d6be5 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -245,7 +245,6 @@ public: const FindAndModifyRequest& args = parseStatus.getValue(); const NamespaceString& nsString = args.getNamespaceString(); - OpDebug* opDebug = &CurOp::get(txn)->debug(); if (args.isRemove()) { DeleteRequest request(nsString); @@ -270,8 +269,7 @@ public: css->checkShardVersionOrThrow(txn); Collection* const collection = autoColl.getCollection(); - auto statusWithPlanExecutor = - getExecutorDelete(txn, opDebug, collection, &parsedDelete); + auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } @@ -290,6 +288,8 @@ public: return parsedUpdateStatus; } + OpDebug* opDebug = &CurOp::get(txn)->debug(); + // Explain calls of the findAndModify command are read-only, but we take write // locks so that the timing information is more accurate. AutoGetCollection autoColl(txn, nsString, MODE_IX); @@ -303,7 +303,7 @@ public: Collection* collection = autoColl.getCollection(); auto statusWithPlanExecutor = - getExecutorUpdate(txn, opDebug, collection, &parsedUpdate); + getExecutorUpdate(txn, collection, &parsedUpdate, opDebug); if (!statusWithPlanExecutor.isOK()) { return statusWithPlanExecutor.getStatus(); } @@ -353,8 +353,6 @@ public: lastOpSetterGuard.Dismiss(); } - OpDebug* opDebug = &CurOp::get(txn)->debug(); - // Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it is // executing a findAndModify. This is done to ensure that we can always match, modify, and // return the document under concurrency, if a matching document exists. @@ -389,8 +387,7 @@ public: } Collection* const collection = autoDb.getDb()->getCollection(nsString.ns()); - auto statusWithPlanExecutor = - getExecutorDelete(txn, opDebug, collection, &parsedDelete); + auto statusWithPlanExecutor = getExecutorDelete(txn, collection, &parsedDelete); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } @@ -428,6 +425,8 @@ public: return appendCommandStatus(result, parsedUpdateStatus); } + OpDebug* opDebug = &CurOp::get(txn)->debug(); + AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX); Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX); @@ -477,7 +476,7 @@ public: } auto statusWithPlanExecutor = - getExecutorUpdate(txn, opDebug, collection, &parsedUpdate); + getExecutorUpdate(txn, collection, &parsedUpdate, opDebug); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 155525578ee..8f125382095 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -707,10 +707,7 @@ void State::insert(const string& ns, const BSONObj& o) { if (!res.getValue().isEmpty()) { bo = res.getValue(); } - - // TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261. - OpDebug* const nullOpDebug = nullptr; - uassertStatusOK(coll->insertDocument(_txn, bo, nullOpDebug, true)); + uassertStatusOK(coll->insertDocument(_txn, bo, true)); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insert", ns); @@ -740,10 +737,7 @@ void State::_insertToInc(BSONObj& o) { << ". size in bytes: " << o.objsize() << ", max size: " << BSONObjMaxUserSize); } - - // TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261. - OpDebug* const nullOpDebug = nullptr; - uassertStatusOK(coll->insertDocument(_txn, o, nullOpDebug, true, false)); + uassertStatusOK(coll->insertDocument(_txn, o, true, false)); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "M/R insertToInc", _config.incLong); diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index 5bcfe71e365..b286aa3f252 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -101,8 +101,7 @@ public: return false; } } - OpDebug* const nullOpDebug = nullptr; - Status status = collection->insertDocument(txn, obj, nullOpDebug, false); + Status status = collection->insertDocument(txn, obj, false); if (status.isOK()) { wunit.commit(); } diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index 7886c225b2a..e20696f3394 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -929,8 +929,7 @@ static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult state->getCollection()->ns().ns(), MODE_IX)); WriteUnitOfWork wunit(txn); - Status status = state->getCollection()->insertDocument( - txn, insertDoc, &CurOp::get(txn)->debug(), true); + Status status = state->getCollection()->insertDocument(txn, insertDoc, true); if (status.isOK()) { result->getStats().n++; @@ -1119,7 +1118,7 @@ static void multiUpdate(OperationContext* txn, try { invariant(collection); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, debug, collection, &parsedUpdate)); + uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug)); uassertStatusOK(exec->executePlan()); @@ -1206,8 +1205,6 @@ static void multiRemove(OperationContext* txn, lastOpSetterGuard.Dismiss(); } - OpDebug* opDebug = &CurOp::get(txn)->debug(); - int attempt = 1; while (1) { try { @@ -1242,7 +1239,7 @@ static void multiRemove(OperationContext* txn, auto collection = autoDb.getDb()->getCollection(nss); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, opDebug, collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); // Execute the delete and retrieve the number deleted. uassertStatusOK(exec->executePlan()); @@ -1253,7 +1250,6 @@ static void multiRemove(OperationContext* txn, if (collection) { collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed); } - CurOp::get(txn)->debug().setPlanSummaryMetrics(summary); if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) { diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp index ee86ad4be96..7e56675ef3c 100644 --- a/src/mongo/db/commands/write_commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands/write_commands.cpp @@ -178,8 +178,6 @@ Status WriteCmd::explain(OperationContext* txn, // Get a reference to the singleton batch item (it's the 0th item in the batch). BatchItemRef batchItem(&request, 0); - OpDebug* opDebug = &CurOp::get(txn)->debug(); - if (BatchedCommandRequest::BatchType_Update == _writeType) { // Create the update request. UpdateRequest updateRequest(request.getNS()); @@ -194,6 +192,8 @@ Status WriteCmd::explain(OperationContext* txn, // Explained updates can yield. updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO); + OpDebug* debug = &CurOp::get(txn)->debug(); + ParsedUpdate parsedUpdate(txn, &updateRequest); Status parseStatus = parsedUpdate.parseRequest(); if (!parseStatus.isOK()) { @@ -212,7 +212,7 @@ Status WriteCmd::explain(OperationContext* txn, } std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, opDebug, collection, &parsedUpdate)); + uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug)); // Explain the plan tree. Explain::explainStages(exec.get(), verbosity, out); @@ -248,7 +248,7 @@ Status WriteCmd::explain(OperationContext* txn, } std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, opDebug, collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); // Explain the plan tree. Explain::explainStages(exec.get(), verbosity, out); diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index 086f0215b82..6b8618b3843 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -517,6 +517,7 @@ string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockSt OPDEBUG_TOSTRING_HELP_BOOL(hasSortStage); OPDEBUG_TOSTRING_HELP_BOOL(fromMultiPlanner); OPDEBUG_TOSTRING_HELP_BOOL(replanned); + OPDEBUG_TOSTRING_HELP(nmoved); OPDEBUG_TOSTRING_HELP(nMatched); OPDEBUG_TOSTRING_HELP(nModified); OPDEBUG_TOSTRING_HELP(ninserted); @@ -527,18 +528,6 @@ string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockSt OPDEBUG_TOSTRING_HELP_BOOL(cursorExhausted); OPDEBUG_TOSTRING_HELP(keyUpdates); - if (nmoved > 0) { - s << " nmoved:" << nmoved; - } - - if (keysInserted > 0) { - s << " keysInserted:" << keysInserted; - } - - if (keysDeleted > 0) { - s << " keysDeleted:" << keysDeleted; - } - if (writeConflicts > 0) { s << " writeConflicts:" << writeConflicts; } @@ -649,6 +638,8 @@ void OpDebug::append(const CurOp& curop, OPDEBUG_APPEND_BOOL(hasSortStage); OPDEBUG_APPEND_BOOL(fromMultiPlanner); OPDEBUG_APPEND_BOOL(replanned); + OPDEBUG_APPEND_BOOL(moved); + OPDEBUG_APPEND_NUMBER(nmoved); OPDEBUG_APPEND_NUMBER(nMatched); OPDEBUG_APPEND_NUMBER(nModified); OPDEBUG_APPEND_NUMBER(ninserted); @@ -658,19 +649,6 @@ void OpDebug::append(const CurOp& curop, OPDEBUG_APPEND_BOOL(upsert); OPDEBUG_APPEND_BOOL(cursorExhausted); OPDEBUG_APPEND_NUMBER(keyUpdates); - OPDEBUG_APPEND_BOOL(moved); - - if (nmoved > 0) { - b.appendNumber("nmoved", nmoved); - } - - if (keysInserted > 0) { - b.appendNumber("keysInserted", keysInserted); - } - - if (keysDeleted > 0) { - b.appendNumber("keysDeleted", keysDeleted); - } if (writeConflicts > 0) { b.appendNumber("writeConflicts", writeConflicts); diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index 9d98191a0b5..06539aeaacd 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -184,20 +184,15 @@ public: long long nMatched{-1}; // number of records that match the query long long nModified{-1}; // number of records written (no no-ops) + long long nmoved{-1}; // updates resulted in a move (moves are expensive) long long ninserted{-1}; long long ndeleted{-1}; bool fastmod{false}; bool fastmodinsert{false}; // upsert of an $operation. builds a default object bool upsert{false}; // true if the update actually did an insert bool cursorExhausted{ - false}; // true if the cursor has been closed at end a find/getMore operation - int keyUpdates{-1}; // TODO SERVER-23272: Remove this metric. - - // The following metrics are initialized with 0 rather than -1 in order to simplify use by the - // CRUD path. - long long nmoved{0}; // updates resulted in a move (moves are expensive) - long long keysInserted{0}; // Number of index keys inserted. - long long keysDeleted{0}; // Number of index keys removed. + false}; // true if the cursor has been closed at end a find/getMore operation + int keyUpdates{-1}; long long writeConflicts{0}; // New Query Framework debugging/profiling info diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 84e696aabfd..6242ddaf8cc 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -256,9 +256,7 @@ static void logStartup(OperationContext* txn) { collection = db->getCollection(startupLogCollectionName); } invariant(collection); - - OpDebug* const nullOpDebug = nullptr; - uassertStatusOK(collection->insertDocument(txn, o, nullOpDebug, false)); + uassertStatusOK(collection->insertDocument(txn, o, false)); wunit.commit(); } diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index b266fa268e9..fa7824f0768 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -440,8 +440,7 @@ long long Helpers::removeRange(OperationContext* txn, if (callback) callback->goingToDelete(obj); - OpDebug* const nullOpDebug = nullptr; - collection->deleteDocument(txn, rloc, nullOpDebug, fromMigrate); + collection->deleteDocument(txn, rloc, fromMigrate); wuow.commit(); numDeleted++; } diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp index e1e88a3333a..0e9d7d28fa0 100644 --- a/src/mongo/db/exec/delete.cpp +++ b/src/mongo/db/exec/delete.cpp @@ -34,7 +34,6 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/concurrency/write_conflict_exception.h" -#include "mongo/db/curop.h" #include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set_common.h" #include "mongo/db/exec/write_stage_common.h" @@ -215,7 +214,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { if (!_params.isExplain) { try { WriteUnitOfWork wunit(getOpCtx()); - _collection->deleteDocument(getOpCtx(), recordId, _params.opDebug, _params.fromMigrate); + _collection->deleteDocument(getOpCtx(), recordId, _params.fromMigrate); wunit.commit(); } catch (const WriteConflictException& wce) { memberFreer.Dismiss(); // Keep this member around so we can retry deleting it. diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h index 5c2435a2122..9a71c597e63 100644 --- a/src/mongo/db/exec/delete.h +++ b/src/mongo/db/exec/delete.h @@ -35,7 +35,6 @@ namespace mongo { class CanonicalQuery; -class OpDebug; class OperationContext; class PlanExecutor; @@ -45,8 +44,7 @@ struct DeleteStageParams { fromMigrate(false), isExplain(false), returnDeleted(false), - canonicalQuery(nullptr), - opDebug(nullptr) {} + canonicalQuery(nullptr) {} // Should we delete all documents returned from the child (a "multi delete"), or at most one // (a "single delete")? @@ -67,9 +65,6 @@ struct DeleteStageParams { // The user-requested sort specification. Currently used just for findAndModify. BSONObj sort; - - // Optional. When not null, delete metrics are recorded here. - OpDebug* opDebug; }; /** diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp index 31b7147a90b..1b52796a5a5 100644 --- a/src/mongo/db/exec/update.cpp +++ b/src/mongo/db/exec/update.cpp @@ -740,7 +740,7 @@ void UpdateStage::doInsert() { invariant(_collection); const bool enforceQuota = !request->isGod(); uassertStatusOK(_collection->insertDocument( - getOpCtx(), newObj, _params.opDebug, enforceQuota, request->isFromMigration())); + getOpCtx(), newObj, enforceQuota, request->isFromMigration())); // Technically, we should save/restore state here, but since we are going to return // immediately after, it would just be wasted work. diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h index 5548d0f1192..c39717f9748 100644 --- a/src/mongo/db/exec/update.h +++ b/src/mongo/db/exec/update.h @@ -39,7 +39,6 @@ namespace mongo { class OperationContext; -class OpDebug; struct PlanSummaryStats; struct UpdateStageParams { diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index b10bc6add71..f99d3104c30 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -102,8 +102,8 @@ Status IndexAccessMethod::insert(OperationContext* txn, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numInserted) { - invariant(numInserted); *numInserted = 0; + BSONObjSet keys; // Delegate to the subclass. getKeys(obj, &keys); @@ -179,10 +179,9 @@ Status IndexAccessMethod::remove(OperationContext* txn, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numDeleted) { - invariant(numDeleted); - *numDeleted = 0; BSONObjSet keys; getKeys(obj, &keys); + *numDeleted = 0; for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { removeOneKey(txn, *i, loc, options.dupsAllowed); @@ -292,14 +291,7 @@ Status IndexAccessMethod::validateUpdate(OperationContext* txn, Status IndexAccessMethod::update(OperationContext* txn, const UpdateTicket& ticket, - int64_t* numInserted, - int64_t* numDeleted) { - invariant(numInserted); - invariant(numDeleted); - - *numInserted = 0; - *numDeleted = 0; - + int64_t* numUpdated) { if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } @@ -325,8 +317,7 @@ Status IndexAccessMethod::update(OperationContext* txn, } } - *numInserted = ticket.added.size(); - *numDeleted = ticket.removed.size(); + *numUpdated = ticket.added.size(); return Status::OK(); } diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index 1509500d916..f0376af716a 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -71,7 +71,7 @@ public: /** * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k -> - * 'loc') into the index. 'obj' is the object at the location 'loc'. + * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL, * 'numInserted' will be set to the number of keys added to the index for the document. If * there is more than one key for 'obj', either all keys will be inserted or none will. * @@ -84,8 +84,8 @@ public: int64_t* numInserted); /** - * Analogous to above, but remove the records instead of inserting them. - * 'numDeleted' will be set to the number of keys removed from the index for the document. + * Analogous to above, but remove the records instead of inserting them. If not NULL, + * numDeleted will be set to the number of keys removed from the index for the document. */ Status remove(OperationContext* txn, const BSONObj& obj, @@ -118,14 +118,8 @@ public: * 'from' will remain. Assumes that the index has not changed since validateUpdate was * called. If the index was changed, we may return an error, as our ticket may have been * invalidated. - * - * 'numInserted' will be set to the number of keys inserted into the index for the document. - * 'numDeleted' will be set to the number of keys removed from the index for the document. */ - Status update(OperationContext* txn, - const UpdateTicket& ticket, - int64_t* numInserted, - int64_t* numDeleted); + Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated); /** * Returns an unpositioned cursor over 'this' index. diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index 4ad5616781a..bce793e9c6c 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -719,7 +719,7 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess // The common case: no implicit collection creation if (!upsert || collection != NULL) { unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, &op.debug(), collection, &parsedUpdate)); + uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug())); // Run the plan and get stats out. uassertStatusOK(exec->executePlan()); @@ -786,7 +786,7 @@ void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Mess auto collection = ctx.db()->getCollection(nsString); invariant(collection); unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, &op.debug(), collection, &parsedUpdate)); + uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, &op.debug())); // Run the plan and get stats out. uassertStatusOK(exec->executePlan()); @@ -865,7 +865,7 @@ void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Mess auto collection = ctx.db()->getCollection(nsString); unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorDelete(txn, &op.debug(), collection, &parsedDelete)); + uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); // Run the plan and get the number of docs deleted. uassertStatusOK(exec->executePlan()); @@ -984,7 +984,7 @@ void insertMultiSingletons(OperationContext* txn, invariant(collection); } - uassertStatusOK(collection->insertDocument(txn, *it, &op.debug(), true)); + uassertStatusOK(collection->insertDocument(txn, *it, true)); wouw.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "insert", ns); @@ -1015,7 +1015,7 @@ void insertMultiVector(OperationContext* txn, invariant(collection); } - uassertStatusOK(collection->insertDocuments(txn, begin, end, &op.debug(), true, false)); + uassertStatusOK(collection->insertDocuments(txn, begin, end, true, false)); wunit.commit(); int inserted = end - begin; diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 55ae0dba84d..1893f38b87e 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -132,8 +132,7 @@ void profile(OperationContext* txn, NetworkOp op) { Collection* const coll = db->getCollection(db->getProfilingNS()); if (coll) { WriteUnitOfWork wuow(txn); - OpDebug* const nullOpDebug = nullptr; - coll->insertDocument(txn, p, nullOpDebug, false); + coll->insertDocument(txn, p, false); wuow.commit(); break; diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp index 7f509308ad4..870deb66dd5 100644 --- a/src/mongo/db/ops/delete.cpp +++ b/src/mongo/db/ops/delete.cpp @@ -66,8 +66,8 @@ long long deleteObjects(OperationContext* txn, auto client = txn->getClient(); auto lastOpAtOperationStart = repl::ReplClientInfo::forClient(client).getLastOp(); - std::unique_ptr<PlanExecutor> exec = uassertStatusOK( - getExecutorDelete(txn, &CurOp::get(txn)->debug(), collection, &parsedDelete)); + std::unique_ptr<PlanExecutor> exec = + uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete)); uassertStatusOK(exec->executePlan()); diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index 82a816e5b3c..010abf6be3b 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -114,7 +114,7 @@ UpdateResult update(OperationContext* txn, uassertStatusOK(parsedUpdate.parseRequest()); std::unique_ptr<PlanExecutor> exec = - uassertStatusOK(getExecutorUpdate(txn, opDebug, collection, &parsedUpdate)); + uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug)); uassertStatusOK(exec->executePlan()); if (repl::ReplClientInfo::forClient(client).getLastOp() != lastOpAtOperationStart) { diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 933f60f34f8..d2c70e1d673 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -637,7 +637,6 @@ StatusWith<unique_ptr<PlanStage>> applyProjection(OperationContext* txn, // StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, - OpDebug* opDebug, Collection* collection, ParsedDelete* parsedDelete) { const DeleteRequest* request = parsedDelete->getRequest(); @@ -673,7 +672,6 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, deleteStageParams.isExplain = request->isExplain(); deleteStageParams.returnDeleted = request->shouldReturnDeleted(); deleteStageParams.sort = request->getSort(); - deleteStageParams.opDebug = opDebug; unique_ptr<WorkingSet> ws = make_unique<WorkingSet>(); PlanExecutor::YieldPolicy policy = @@ -779,9 +777,9 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO } // namespace StatusWith<unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn, - OpDebug* opDebug, Collection* collection, - ParsedUpdate* parsedUpdate) { + ParsedUpdate* parsedUpdate, + OpDebug* opDebug) { const UpdateRequest* request = parsedUpdate->getRequest(); UpdateDriver* driver = parsedUpdate->getDriver(); diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h index 34c8cf0f0e8..49b83166ac5 100644 --- a/src/mongo/db/query/get_executor.h +++ b/src/mongo/db/query/get_executor.h @@ -145,7 +145,6 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn * If the query cannot be executed, returns a Status indicating why. */ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* txn, - OpDebug* opDebug, Collection* collection, ParsedDelete* parsedDelete); @@ -165,9 +164,9 @@ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorDelete(OperationContext* tx * If the query cannot be executed, returns a Status indicating why. */ StatusWith<std::unique_ptr<PlanExecutor>> getExecutorUpdate(OperationContext* txn, - OpDebug* opDebug, Collection* collection, - ParsedUpdate* parsedUpdate); + ParsedUpdate* parsedUpdate, + OpDebug* opDebug); /** * Get a PlanExecutor for a group operation. diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 50f0dc5f039..94602815666 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -488,9 +488,8 @@ OpTime writeOpsToOplog(OperationContext* txn, const std::vector<BSONObj>& ops) { OldClientContext ctx(txn, rsOplogName, _localDB); WriteUnitOfWork wunit(txn); - OpDebug* const nullOpDebug = nullptr; - checkOplogInsert(_localOplogCollection->insertDocuments( - txn, ops.begin(), ops.end(), nullOpDebug, false)); + checkOplogInsert( + _localOplogCollection->insertDocuments(txn, ops.begin(), ops.end(), false)); lastOptime = fassertStatusOK(ErrorCodes::InvalidBSON, OpTime::parseFromOplogEntry(ops.back())); wunit.commit(); @@ -821,9 +820,7 @@ Status applyOperation_inlock(OperationContext* txn, } WriteUnitOfWork wuow(txn); - OpDebug* const nullOpDebug = nullptr; - status = collection->insertDocuments( - txn, insertObjs.begin(), insertObjs.end(), nullOpDebug, true); + status = collection->insertDocuments(txn, insertObjs.begin(), insertObjs.end(), true); if (!status.isOK()) { return status; } @@ -854,8 +851,7 @@ Status applyOperation_inlock(OperationContext* txn, { WriteUnitOfWork wuow(txn); try { - OpDebug* const nullOpDebug = nullptr; - status = collection->insertDocument(txn, o, nullOpDebug, true); + status = collection->insertDocument(txn, o, true); } catch (DBException dbe) { status = dbe.toStatus(); } diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp index 4f1ba8a5cbe..a50570d4160 100644 --- a/src/mongo/db/repl/rs_rollback_test.cpp +++ b/src/mongo/db/repl/rs_rollback_test.cpp @@ -772,12 +772,9 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) { coll = autoDb.getDb()->createCollection(_txn.get(), "test.t"); } ASSERT(coll); - OpDebug* const nullOpDebug = nullptr; - ASSERT_OK( - coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), nullOpDebug, false)); - ASSERT_OK( - coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), nullOpDebug, false)); - ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), nullOpDebug, false)); + ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 1 << "v" << 2), false)); + ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 2 << "v" << 4), false)); + ASSERT_OK(coll->insertDocument(_txn.get(), BSON("_id" << 4), false)); wuow.commit(); } const auto commonOperation = diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index d151976c6f8..0c8d5af2264 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -946,8 +946,7 @@ bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { Collection* const coll = db->getOrCreateCollection(txn, nss.toString()); invariant(coll); - OpDebug* const nullOpDebug = nullptr; - Status status = coll->insertDocument(txn, missingObj, nullOpDebug, true); + Status status = coll->insertDocument(txn, missingObj, true); uassert(15917, str::stream() << "failed to insert missing doc: " << status.toString(), status.isOK()); diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp index 3ff3ae5ce96..084761c01ea 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp @@ -111,13 +111,13 @@ public: return StatusWith<RecordId>(RecordId(6, 4)); } - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { - return Status::OK(); + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { + return StatusWith<RecordId>(oldLocation); } virtual bool updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp index 7fa168612b0..6f1c48663dd 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp @@ -430,12 +430,12 @@ StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* return StatusWith<RecordId>(loc); } -Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn, - const RecordId& loc, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { +StatusWith<RecordId> EphemeralForTestRecordStore::updateRecord(OperationContext* txn, + const RecordId& loc, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { EphemeralForTestRecord* oldRecord = recordFor(loc); int oldLen = oldRecord->size; @@ -447,7 +447,7 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn, // doc-locking), and therefore must notify that it is updating a document. Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc); if (!callbackStatus.isOK()) { - return callbackStatus; + return StatusWith<RecordId>(callbackStatus); } } @@ -460,7 +460,7 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn, cappedDeleteAsNeeded(txn); - return Status::OK(); + return StatusWith<RecordId>(loc); } bool EphemeralForTestRecordStore::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h index 8f83cfe9dfd..324a30653eb 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h @@ -69,12 +69,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp index bff423434b4..6a6d1a8e437 100644 --- a/src/mongo/db/storage/kv/kv_catalog.cpp +++ b/src/mongo/db/storage/kv/kv_catalog.cpp @@ -290,8 +290,10 @@ void KVCatalog::putMetaData(OperationContext* opCtx, } LOG(3) << "recording new metadata: " << obj; - Status status = _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); - fassert(28521, status.isOK()); + StatusWith<RecordId> status = + _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); + fassert(28521, status.getStatus()); + invariant(status.getValue() == loc); } Status KVCatalog::renameCollection(OperationContext* opCtx, @@ -320,8 +322,10 @@ Status KVCatalog::renameCollection(OperationContext* opCtx, b.appendElementsUnique(old); BSONObj obj = b.obj(); - Status status = _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); - fassert(28522, status.isOK()); + StatusWith<RecordId> status = + _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL); + fassert(28522, status.getStatus()); + invariant(status.getValue() == loc); } stdx::lock_guard<stdx::mutex> lk(_identsLock); diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp index 20d5e99f89c..278474cabb0 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp @@ -358,30 +358,12 @@ void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationCo if (!_namespacesRecordStore) return; - RecordId currentRecordId = _namespacesRecordId; - - RecordData entry = _namespacesRecordStore->dataFor(txn, currentRecordId); + RecordData entry = _namespacesRecordStore->dataFor(txn, _namespacesRecordId); const BSONObj newEntry = applyUpdateOperators(entry.releaseToBson(), update); - - Status result = _namespacesRecordStore->updateRecord( - txn, currentRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL); - - if (ErrorCodes::NeedsDocumentMove == result) { - StatusWith<RecordId> newLocation = _namespacesRecordStore->insertRecord( - txn, newEntry.objdata(), newEntry.objsize(), false); - fassert(40051, newLocation.getStatus().isOK()); - currentRecordId = newLocation.getValue(); - setNamespacesRecordId(txn, currentRecordId); - - // Intentionally not deleting the old MMAPv1 record on move. The reasoning for this is: - // - It might be possible that there are other parts in the code base that reference this - // RecordId and removing could introduce an MMAPv1 bug. - // - It is not harmful leaving the old RecordId in place. On document move, the space - // allocated for the new document is double the old. This puts a practical limit on the - // potential number of old 'leaked' documents. - } else { - fassert(17486, result.isOK()); - } + StatusWith<RecordId> result = _namespacesRecordStore->updateRecord( + txn, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL); + fassert(17486, result.getStatus()); + setNamespacesRecordId(txn, result.getValue()); } void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) { diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h index 4e984a4469d..a496f1ff31e 100644 --- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h +++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h @@ -74,12 +74,12 @@ public: // ------------------------------ - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { invariant(false); } diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp index f5089ef787f..0105d10c4df 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp @@ -367,30 +367,49 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn, return StatusWith<RecordId>(loc.getValue().toRecordId()); } -Status RecordStoreV1Base::updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int dataSize, - bool enforceQuota, - UpdateNotifier* notifier) { +StatusWith<RecordId> RecordStoreV1Base::updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int dataSize, + bool enforceQuota, + UpdateNotifier* notifier) { MmapV1RecordHeader* oldRecord = recordFor(DiskLoc::fromRecordId(oldLocation)); if (oldRecord->netLength() >= dataSize) { // Make sure to notify other queries before we do an in-place update. if (notifier) { Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation); if (!callbackStatus.isOK()) - return callbackStatus; + return StatusWith<RecordId>(callbackStatus); } // we fit memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize); - return Status::OK(); + return StatusWith<RecordId>(oldLocation); } // We enforce the restriction of unchanging capped doc sizes above the storage layer. invariant(!isCapped()); - return {ErrorCodes::NeedsDocumentMove, "Update requires document move"}; + // we have to move + if (dataSize + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation) { + return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB"); + } + + StatusWith<RecordId> newLocation = _insertRecord(txn, data, dataSize, enforceQuota); + if (!newLocation.isOK()) + return newLocation; + + // insert worked, so we delete old record + if (notifier) { + Status moveStatus = notifier->recordStoreGoingToMove( + txn, oldLocation, oldRecord->data(), oldRecord->netLength()); + if (!moveStatus.isOK()) + return StatusWith<RecordId>(moveStatus); + } + + deleteRecord(txn, oldLocation); + + return newLocation; } bool RecordStoreV1Base::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h index 4520256f901..d34c5a9b3f0 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h @@ -199,12 +199,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 64f93e50c62..2beb8368bf4 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -76,6 +76,10 @@ public: class UpdateNotifier { public: virtual ~UpdateNotifier() {} + virtual Status recordStoreGoingToMove(OperationContext* txn, + const RecordId& oldLocation, + const char* oldBuffer, + size_t oldSize) = 0; virtual Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) = 0; }; @@ -382,23 +386,22 @@ public: } /** - * @param notifier - Only used by record stores which do not support doc-locking. Called only - * in the case of an in-place update. Called just before the in-place write - * occurs. - * @return Status - If a document move is required (MMAPv1 only) then a status of - * ErrorCodes::NeedsDocumentMove will be returned. On receipt of this status - * no update will be performed. It is the caller's responsibility to: - * 1. Remove the existing document and associated index keys. - * 2. Insert a new document and index keys. + * @param notifier - Only used by record stores which do not support doc-locking. + * In the case of a document move, this is called after the document + * has been written to the new location, but before it is deleted from + * the old location. + * In the case of an in-place update, this is called just before the + * in-place write occurs. + * @return Status or RecordId, RecordId might be different * * For capped record stores, the record size will never change. */ - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) = 0; + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) = 0; /** * @return Returns 'false' if this record store does not implement diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index 8fc234ab9cd..95fd4c993b0 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -257,23 +257,10 @@ TEST(RecordStoreTestHarness, Update1) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); - Status status = + StatusWith<RecordId> res = rs->updateRecord(opCtx.get(), loc, s2.c_str(), s2.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == status) { - // NeedsDocumentMove should only be possible under MMAPv1. We don't have the means - // to check storageEngine here but asserting 'supportsDocLocking()' is false - // provides an equivalent check as only MMAPv1 will/should return false. - ASSERT_FALSE(harnessHelper->supportsDocLocking()); - StatusWith<RecordId> newLocation = - rs->insertRecord(opCtx.get(), s2.c_str(), s2.size() + 1, false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - } else { - ASSERT_OK(status); - } - + ASSERT_OK(res.getStatus()); + loc = res.getValue(); uow.commit(); } } diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp index cd27acf9c69..0d7c9433503 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.cpp +++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp @@ -77,19 +77,10 @@ TEST(RecordStoreTestHarness, UpdateRecord) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); - Status res = + StatusWith<RecordId> res = rs->updateRecord(opCtx.get(), loc, data.c_str(), data.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = - rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - } else { - ASSERT_OK(res); - } - + ASSERT_OK(res.getStatus()); + loc = res.getValue(); uow.commit(); } } @@ -145,19 +136,10 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); - Status res = + StatusWith<RecordId> res = rs->updateRecord(opCtx.get(), locs[i], data.c_str(), data.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = - rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), locs[i]); - locs[i] = newLocation.getValue(); - } else { - ASSERT_OK(res); - } - + ASSERT_OK(res.getStatus()); + locs[i] = res.getValue(); uow.commit(); } } @@ -212,21 +194,21 @@ TEST(RecordStoreTestHarness, UpdateRecordWithMoveNotifier) { UpdateNotifierSpy umn(opCtx.get(), loc, oldData.c_str(), oldData.size()); WriteUnitOfWork uow(opCtx.get()); - Status res = rs->updateRecord( + StatusWith<RecordId> res = rs->updateRecord( opCtx.get(), loc, newData.c_str(), newData.size() + 1, false, &umn); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = - rs->insertRecord(opCtx.get(), newData.c_str(), newData.size() + 1, false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - ASSERT_EQUALS(0, umn.numInPlaceCallbacks()); - } else { - ASSERT_OK(res); + ASSERT_OK(res.getStatus()); + // UpdateNotifier::recordStoreGoingToMove() called only if + // the RecordId for the record changes + if (loc == res.getValue()) { + ASSERT_EQUALS(0, umn.numMoveCallbacks()); + // Only MMAP v1 is required to use the UpdateNotifier for in-place updates, + // so the number of callbacks is expected to be 0 for non-MMAP storage engines. ASSERT_GTE(1, umn.numInPlaceCallbacks()); + } else { + ASSERT_EQUALS(1, umn.numMoveCallbacks()); + ASSERT_EQUALS(0, umn.numInPlaceCallbacks()); } - + loc = res.getValue(); uow.commit(); } } diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h index be52887cf2b..f82feb6b592 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.h +++ b/src/mongo/db/storage/record_store_test_updaterecord.h @@ -41,10 +41,21 @@ namespace { class UpdateNotifierSpy : public UpdateNotifier { public: UpdateNotifierSpy(OperationContext* txn, const RecordId& loc, const char* buf, size_t size) - : _txn(txn), _loc(loc), _data(buf, size), nInPlaceCalls(0) {} + : _txn(txn), _loc(loc), _data(buf, size), nMoveCalls(0), nInPlaceCalls(0) {} ~UpdateNotifierSpy() {} + Status recordStoreGoingToMove(OperationContext* txn, + const RecordId& oldLocation, + const char* oldBuffer, + size_t oldSize) { + nMoveCalls++; + ASSERT_EQUALS(_txn, txn); + ASSERT_EQUALS(_loc, oldLocation); + ASSERT_EQUALS(_data, oldBuffer); + return Status::OK(); + } + Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) { nInPlaceCalls++; ASSERT_EQUALS(_txn, txn); @@ -52,6 +63,10 @@ public: return Status::OK(); } + int numMoveCallbacks() const { + return nMoveCalls; + } + int numInPlaceCallbacks() const { return nInPlaceCalls; } @@ -62,6 +77,7 @@ private: std::string _data; // To verify the number of callbacks to the notifier. + int nMoveCalls; int nInPlaceCalls; }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 636f2d86ffd..f2ebc977f34 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -1367,12 +1367,12 @@ StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn, return insertRecord(txn, buf.get(), len, enforceQuota); } -Status WiredTigerRecordStore::updateRecord(OperationContext* txn, - const RecordId& id, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier) { +StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, + const RecordId& id, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier) { WiredTigerCursor curwrap(_uri, _tableId, true, txn); curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); @@ -1402,7 +1402,7 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* txn, cappedDeleteAsNeeded(txn, id); } - return Status::OK(); + return StatusWith<RecordId>(id); } bool WiredTigerRecordStore::updateWithDamagesSupported() const { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index 9e7cc01f276..3335e774c4c 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -131,12 +131,12 @@ public: const DocWriter* doc, bool enforceQuota); - virtual Status updateRecord(OperationContext* txn, - const RecordId& oldLocation, - const char* data, - int len, - bool enforceQuota, - UpdateNotifier* notifier); + virtual StatusWith<RecordId> updateRecord(OperationContext* txn, + const RecordId& oldLocation, + const char* data, + int len, + bool enforceQuota, + UpdateNotifier* notifier); virtual bool updateWithDamagesSupported() const; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index e09ccdf3e65..7dcc4033f70 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -239,8 +239,8 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { rs->dataFor(t1.get(), id1); rs->dataFor(t2.get(), id1); - ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL)); - ASSERT_OK(rs->updateRecord(t1.get(), id2, "B", 2, false, NULL)); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id2, "B", 2, false, NULL).getStatus()); try { // this should fail @@ -289,7 +289,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { { WriteUnitOfWork w(t1.get()); - ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL)); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); w.commit(); } diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp index 2e28621b833..f6f0f48c858 100644 --- a/src/mongo/dbtests/counttests.cpp +++ b/src/mongo/dbtests/counttests.cpp @@ -90,7 +90,6 @@ protected: void insert(const char* s) { WriteUnitOfWork wunit(&_txn); const BSONObj o = fromjson(s); - OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; @@ -98,9 +97,9 @@ protected: oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); + _collection->insertDocument(&_txn, b.obj(), false); } else { - _collection->insertDocument(&_txn, o, nullOpDebug, false); + _collection->insertDocument(&_txn, o, false); } wunit.commit(); } diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index 3b3c3531ec9..e81c45bdd94 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -351,16 +351,13 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); - OpDebug* const nullOpDebug = nullptr; coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), - nullOpDebug, true); coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), - nullOpDebug, true); wunit.commit(); } @@ -397,16 +394,13 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); - OpDebug* const nullOpDebug = nullptr; coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), - nullOpDebug, true); coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), - nullOpDebug, true); wunit.commit(); } @@ -442,16 +436,13 @@ public: db->dropCollection(&_txn, _ns); coll = db->createCollection(&_txn, _ns); - OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << "dup"), - nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << "dup"), - nullOpDebug, true)); wunit.commit(); } @@ -497,9 +488,8 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents with enforceQuota=true. int32_t nDocs = 1000; - OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); + coll->insertDocument(&_txn, BSON("a" << i), true); } wunit.commit(); } @@ -530,9 +520,8 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; - OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("a" << i), nullOpDebug, true); + coll->insertDocument(&_txn, BSON("a" << i), true); } wunit.commit(); } @@ -566,9 +555,8 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; - OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); + coll->insertDocument(&_txn, BSON("_id" << i), true); } wunit.commit(); } @@ -602,9 +590,8 @@ public: coll->getIndexCatalog()->dropAllIndexes(&_txn, true); // Insert some documents. int32_t nDocs = 1000; - OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { - coll->insertDocument(&_txn, BSON("_id" << i), nullOpDebug, true); + coll->insertDocument(&_txn, BSON("_id" << i), true); } wunit.commit(); } diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp index 311e5a5e20d..943f0a124fd 100644 --- a/src/mongo/dbtests/pdfiletests.cpp +++ b/src/mongo/dbtests/pdfiletests.cpp @@ -76,14 +76,13 @@ public: BSONObj x = BSON("x" << 1); ASSERT(x["_id"].type() == 0); Collection* collection = _context.db()->getOrCreateCollection(&_txn, ns()); - OpDebug* const nullOpDebug = nullptr; - ASSERT(!collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); + ASSERT(!collection->insertDocument(&_txn, x, true).isOK()); StatusWith<BSONObj> fixed = fixDocumentForInsert(x); ASSERT(fixed.isOK()); x = fixed.getValue(); ASSERT(x["_id"].type() == jstOID); - ASSERT(collection->insertDocument(&_txn, x, nullOpDebug, true).isOK()); + ASSERT(collection->insertDocument(&_txn, x, true).isOK()); wunit.commit(); } }; diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index 865ee18dacc..be91a55fdf1 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -93,8 +93,7 @@ public: WriteUnitOfWork wuow(&_txn); const bool enforceQuota = false; - OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(collection->insertDocument(&_txn, obj, nullOpDebug, enforceQuota)); + ASSERT_OK(collection->insertDocument(&_txn, obj, enforceQuota)); wuow.commit(); } diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index f325004216f..de3d484dbaa 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -105,15 +105,13 @@ public: void insert(const BSONObj& doc) { WriteUnitOfWork wunit(&_txn); - OpDebug* const nullOpDebug = nullptr; - _coll->insertDocument(&_txn, doc, nullOpDebug, false); + _coll->insertDocument(&_txn, doc, false); wunit.commit(); } void remove(const RecordId& recordId) { WriteUnitOfWork wunit(&_txn); - OpDebug* const nullOpDebug = nullptr; - _coll->deleteDocument(&_txn, recordId, nullOpDebug); + _coll->deleteDocument(&_txn, recordId); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index 8b619d9dfce..12e8f33fb3c 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -64,8 +64,7 @@ public: void insert(const BSONObj& doc) { WriteUnitOfWork wunit(&_txn); - OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(_coll->insertDocument(&_txn, doc, nullOpDebug, false)); + ASSERT_OK(_coll->insertDocument(&_txn, doc, false)); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index fabf10ba05b..676da68abcd 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -459,11 +459,10 @@ public: // We should have read in the first 'firstRead' recordIds. Invalidate the first. exec->saveState(); - OpDebug* const nullOpDebug = nullptr; set<RecordId>::iterator it = recordIds.begin(); { WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++, nullOpDebug); + coll->deleteDocument(&_txn, *it++); wuow.commit(); } exec->restoreState(); @@ -479,7 +478,7 @@ public: while (it != recordIds.end()) { { WriteUnitOfWork wuow(&_txn); - coll->deleteDocument(&_txn, *it++, nullOpDebug); + coll->deleteDocument(&_txn, *it++); wuow.commit(); } } diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 8cf6d28d1a5..49c11d6dd44 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -99,16 +99,15 @@ protected: void insert(const BSONObj& o) { WriteUnitOfWork wunit(&_txn); - OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); - _collection->insertDocument(&_txn, b.obj(), nullOpDebug, false); + _collection->insertDocument(&_txn, b.obj(), false); } else { - _collection->insertDocument(&_txn, o, nullOpDebug, false); + _collection->insertDocument(&_txn, o, false); } wunit.commit(); } diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index ec092669747..0becd7c9fba 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -249,10 +249,9 @@ protected: coll = db->createCollection(&_txn, ns()); } - OpDebug* const nullOpDebug = nullptr; if (o.hasField("_id")) { _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, o, nullOpDebug, true); + coll->insertDocument(&_txn, o, true); _txn.setReplicatedWrites(true); wunit.commit(); return; @@ -264,7 +263,7 @@ protected: b.appendOID("_id", &id); b.appendElements(o); _txn.setReplicatedWrites(false); - coll->insertDocument(&_txn, b.obj(), nullOpDebug, true); + coll->insertDocument(&_txn, b.obj(), true); _txn.setReplicatedWrites(true); wunit.commit(); } diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index f437cb68c64..41405295e14 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -88,8 +88,7 @@ Status truncateCollection(OperationContext* txn, const NamespaceString& nss) { void insertRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); - OpDebug* const nullOpDebug = nullptr; - ASSERT_OK(coll->insertDocument(txn, data, nullOpDebug, false)); + ASSERT_OK(coll->insertDocument(txn, data, false)); } void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const BSONObj& data) { Collection* coll = dbHolder().get(txn, nss.db())->getCollection(nss.ns()); diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index 51f540aa41c..fe82b1c5589 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -106,14 +106,13 @@ public: Collection* coll; RecordId id1; { - OpDebug* const nullOpDebug = nullptr; WriteUnitOfWork wunit(&_txn); ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1), true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2), true)); wunit.commit(); } @@ -156,13 +155,12 @@ public: Collection* coll; RecordId id1; { - OpDebug* const nullOpDebug = nullptr; WriteUnitOfWork wunit(&_txn); ASSERT_OK(db->dropCollection(&_txn, _ns)); coll = db->createCollection(&_txn, _ns); - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 1 << "a" << 1), true)); id1 = coll->getCursor(&_txn)->next()->id; - ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), nullOpDebug, true)); + ASSERT_OK(coll->insertDocument(&_txn, BSON("_id" << 2 << "a" << 2), true)); wunit.commit(); } |