diff options
author | Hari Khalsa <hkhalsa@10gen.com> | 2014-05-15 15:07:14 -0400 |
---|---|---|
committer | Hari Khalsa <hkhalsa@10gen.com> | 2014-05-15 15:51:32 -0400 |
commit | a26f0197137e43e130ee7a8c5e42399f6ccd2005 (patch) | |
tree | 03b1b1b2b865e7a654d9c7bdf973df56fc8df150 | |
parent | 9628f0418502a97aff046e9ae28bebdab4d6b9cd (diff) | |
download | mongo-a26f0197137e43e130ee7a8c5e42399f6ccd2005.tar.gz |
SERVER-13641 remove recovery unit passthroughs from operationcontext
29 files changed, 262 insertions, 294 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 245d34fa151..d2f4f5f7190 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -426,7 +426,7 @@ namespace mongo { // update in place int sz = objNew.objsize(); - memcpy(txn->writingPtr(oldRecord->data(), sz), objNew.objdata(), sz); + memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), sz), objNew.objdata(), sz); return StatusWith<DiskLoc>( oldLocation ); } @@ -449,7 +449,7 @@ namespace mongo { const mutablebson::DamageVector::const_iterator end = damages.end(); for( ; where != end; ++where ) { const char* sourcePtr = damangeSource + where->sourceOffset; - void* targetPtr = txn->writingPtr(root + where->targetOffset, where->size); + void* targetPtr = txn->recoveryUnit()->writingPtr(root + where->targetOffset, where->size); std::memcpy(targetPtr, sourcePtr, where->size); } diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp index 8e1c28b7d82..37d82ac8550 100644 --- a/src/mongo/db/catalog/database.cpp +++ b/src/mongo/db/catalog/database.cpp @@ -553,7 +553,7 @@ namespace mongo { systemIndexCollection, indexName, false); IndexDetails& indexDetails = details->idx(indexI); - *txn->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur + *txn->recoveryUnit()->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur } { diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 9453f0351a6..140b98f5bc4 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -283,7 +283,7 @@ namespace mongo { warning() << "Internal error while reading system.indexes collection"; } - txn->writingInt(dfh->versionMinor) = PDFILE_VERSION_MINOR_24_AND_NEWER; + txn->recoveryUnit()->writingInt(dfh->versionMinor) = PDFILE_VERSION_MINOR_24_AND_NEWER; return Status::OK(); } @@ -444,8 +444,8 @@ namespace mongo { _collection->detailsWritable()->getNextIndexDetails( _txn, _collection ); try { - *_txn->writing( &indexDetails.info ) = systemIndexesEntry.getValue(); - *_txn->writing( &indexDetails.head ) = DiskLoc(); + *_txn->recoveryUnit()->writing( &indexDetails.info ) = systemIndexesEntry.getValue(); + *_txn->recoveryUnit()->writing( &indexDetails.head ) = DiskLoc(); } catch ( DBException& e ) { log() << "got exception trying to assign loc to IndexDetails" << e; @@ -455,7 +455,7 @@ namespace mongo { int before = _collection->detailsDeprecated()->_indexBuildsInProgress; try { - _txn->writingInt( _collection->detailsWritable()->_indexBuildsInProgress ) += 1; + _txn->recoveryUnit()->writingInt( _collection->detailsWritable()->_indexBuildsInProgress ) += 1; } catch ( DBException& e ) { log() << "got exception trying to incrementStats _indexBuildsInProgress: " << e; @@ -559,8 +559,8 @@ namespace mongo { idxNo = nsd->getCompletedIndexCount(); } - _txn->writingInt( nsd->_indexBuildsInProgress ) -= 1; - _txn->writingInt( nsd->_nIndexes ) += 1; + _txn->recoveryUnit()->writingInt( nsd->_indexBuildsInProgress ) -= 1; + _txn->recoveryUnit()->writingInt( nsd->_nIndexes ) += 1; _catalog->_collection->infoCache()->addedIndex(); @@ -980,13 +980,13 @@ namespace mongo { massert( 16631, "index does not have an 'expireAfterSeconds' field", false ); break; case NumberInt: - *txn->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds; + *txn->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds; break; case NumberDouble: - *txn->writing(reinterpret_cast<double*>(nonConstPtr)) = newExpireSeconds; + *txn->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) = newExpireSeconds; break; case NumberLong: - *txn->writing(reinterpret_cast<long long*>(nonConstPtr)) = newExpireSeconds; + *txn->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) = newExpireSeconds; break; default: massert( 16632, "current 'expireAfterSeconds' is not a number", false ); diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp index ed84e9414fe..31436041f3d 100644 --- a/src/mongo/db/catalog/index_catalog_entry.cpp +++ b/src/mongo/db/catalog/index_catalog_entry.cpp @@ -113,7 +113,7 @@ namespace mongo { NamespaceDetails* nsd = _collection->detailsWritable(); int idxNo = _indexNo(); IndexDetails& id = nsd->idx( idxNo ); - *txn->writing(&id.head) = newHead; + *txn->recoveryUnit()->writing(&id.head) = newHead; _head = newHead; } diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 03b2efa4d4c..14df7c2f961 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -166,7 +166,7 @@ namespace mongo { if ( logForRepl ) logOp(txn, "i", to_collection, js); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); RARELY if ( time( 0 ) - saveLast > 60 ) { log() << numSeen << " objects cloned so far from collection " << from_collection; @@ -252,7 +252,7 @@ namespace mongo { if ( logForRepl ) logOp(txn, "i", to_collection, spec); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); } } @@ -328,7 +328,7 @@ namespace mongo { copy(txn, ctx.ctx(), temp.c_str(), temp.c_str(), true, logForRepl, false, true, mayYield, mayBeInterrupted, BSON( "ns" << ns )); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); return true; } diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index 756569d806e..65cb87d165f 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -108,7 +108,7 @@ namespace mongo { toCollection->insertDocument( txn, obj, true ); if ( logForReplication ) logOp( txn, "i", toNs.c_str(), obj ); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); } } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index 5ecd9dd52e9..5794d23d3ff 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -569,7 +569,7 @@ namespace mongo { Lock::DBWrite lock( _config.outputOptions.finalNamespace ); BSONObj o = cursor->nextSafe(); Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o ); - _txn->commitIfNeeded(); + _txn->recoveryUnit()->commitIfNeeded(); pm.hit(); } _db.dropCollection( _config.tempNamespace ); @@ -611,7 +611,7 @@ namespace mongo { else { Helpers::upsert( _txn, _config.outputOptions.finalNamespace , temp ); } - _txn->commitIfNeeded(); + _txn->recoveryUnit()->commitIfNeeded(); pm.hit(); } pm.finished(); @@ -661,7 +661,7 @@ namespace mongo { coll->insertDocument( _txn, o, true ); logOp( _txn, "i", _config.incLong.c_str(), o ); - _txn->commitIfNeeded(); + _txn->recoveryUnit()->commitIfNeeded(); } State::State(OperationContext* txn, const Config& c) : diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index dd86b3eb476..8dfc371634e 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -1031,7 +1031,7 @@ namespace mongo { } else { logOp( txn, "i", insertNS.c_str(), docToInsert ); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); result->getStats().n = 1; } } diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index 3e5aee43993..93f174b2bb5 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -311,7 +311,7 @@ namespace mongo { } } - txn.writingInt(h->versionMinor) = 5; + txn.recoveryUnit()->writingInt(h->versionMinor) = 5; return; } diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp index aff142187c6..a2fbdf8eaa1 100644 --- a/src/mongo/db/instance.cpp +++ b/src/mongo/db/instance.cpp @@ -840,7 +840,7 @@ namespace mongo { for (i=0; i<objs.size(); i++){ try { checkAndInsert(txn, ctx, ns, objs[i]); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); } catch (const UserException&) { if (!keepGoing || i == objs.size()-1){ globalOpCounters.incInsertInWriteLock(i); @@ -977,7 +977,7 @@ namespace { verify( dbResponse.response ); dbResponse.response->concat(); // can get rid of this if we make response handling smarter response = *dbResponse.response; - _txn->commitIfNeeded(); + _txn->recoveryUnit()->commitIfNeeded(); return true; } @@ -987,7 +987,7 @@ namespace { lastError.startRequest( toSend, lastError._get() ); DbResponse dbResponse; assembleResponse( _txn, toSend, dbResponse , _clientHost ); - _txn->commitIfNeeded(); + _txn->recoveryUnit()->commitIfNeeded(); } auto_ptr<DBClientCursor> DBDirectClient::query(const string &ns, Query query, int nToReturn , int nToSkip , diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h index ac155fe0240..25da4ee3560 100644 --- a/src/mongo/db/operation_context.h +++ b/src/mongo/db/operation_context.h @@ -55,38 +55,6 @@ namespace mongo { */ virtual RecoveryUnit* recoveryUnit() const = 0; - // XXX: migrate callers use the recoveryUnit() directly - template <typename T> - T* writing(T* x) { - return recoveryUnit()->writing(x); - } - - int& writingInt(int& d) { - return recoveryUnit()->writingInt(d); - } - - void syncDataAndTruncateJournal() { - recoveryUnit()->syncDataAndTruncateJournal(); - } - - void createdFile(const std::string& filename, unsigned long long len) { - recoveryUnit()->createdFile(filename, len); - } - - void* writingPtr(void* data, size_t len) { - return recoveryUnit()->writingPtr(data, len); - } - - bool isCommitNeeded() const { - return recoveryUnit()->isCommitNeeded(); - } - - bool commitIfNeeded(bool force = false) { - return recoveryUnit()->commitIfNeeded(force); - } - // XXX: migrate callers use the recoveryUnit() directly - - // --- operation level info? --- /** diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp index 9c14c772bdc..f0ea6e36a41 100644 --- a/src/mongo/db/ops/delete_executor.cpp +++ b/src/mongo/db/ops/delete_executor.cpp @@ -167,7 +167,7 @@ namespace mongo { } if (!_request->isGod()) { - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); } if (debug && _request->isGod() && nDeleted == 100) { diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index 07e292f6320..ba375883e7b 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -681,7 +681,7 @@ namespace mongo { } // Opportunity for journaling to write during the update. - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); } // TODO: Can this be simplified? diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp index 3b9170b5c56..bb06045ed8c 100644 --- a/src/mongo/db/pdfile.cpp +++ b/src/mongo/db/pdfile.cpp @@ -174,7 +174,7 @@ namespace mongo { // // RWLockRecursive::Exclusive lk(MongoFile::mmmutex); - txn->syncDataAndTruncateJournal(); + txn->recoveryUnit()->syncDataAndTruncateJournal(); Database::closeDatabase( name, db->path() ); db = 0; // d is now deleted diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index aa69da2b021..0e5087c0b4c 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -244,7 +244,7 @@ namespace mongo { << "db: " << _dbName << " path: " << _pathString; try { - _txn->syncDataAndTruncateJournal(); + _txn->recoveryUnit()->syncDataAndTruncateJournal(); MongoFile::flushAll(true); // need both in case journaling is disabled { Client::Context tempContext( _dbName, _pathString ); @@ -283,7 +283,7 @@ namespace mongo { BackgroundOperation::assertNoBgOpInProgForDb(dbName); - txn->syncDataAndTruncateJournal(); // Must be done before and after repair + txn->recoveryUnit()->syncDataAndTruncateJournal(); // Must be done before and after repair intmax_t totalSize = dbSize( dbName ); intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath); @@ -407,7 +407,7 @@ namespace mongo { if ( !result.isOK() ) return result.getStatus(); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); txn->checkForInterrupt(false); } @@ -420,7 +420,7 @@ namespace mongo { } - txn->syncDataAndTruncateJournal(); + txn->recoveryUnit()->syncDataAndTruncateJournal(); MongoFile::flushAll(true); // need both in case journaling is disabled txn->checkForInterrupt(false); diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 0fa386adb00..5cc61b2de08 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -487,7 +487,7 @@ namespace mongo { continue; } - txn.commitIfNeeded(); + txn.recoveryUnit()->commitIfNeeded(); /* keep an archive of items rolled back */ shared_ptr<Helpers::RemoveSaver>& rs = removeSavers[d.ns]; diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 6071905e8f1..96e2207b3c6 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -112,7 +112,7 @@ namespace replset { // to suppress errors when replaying oplog entries. bool ok = !applyOperation_inlock(&txn, ctx.db(), op, true, convertUpdateToUpsert); opsAppliedStats.increment(); - txn.commitIfNeeded(); + txn.recoveryUnit()->commitIfNeeded(); return ok; } diff --git a/src/mongo/db/storage/data_file.cpp b/src/mongo/db/storage/data_file.cpp index b6466fd5b71..68897cca547 100644 --- a/src/mongo/db/storage/data_file.cpp +++ b/src/mongo/db/storage/data_file.cpp @@ -170,8 +170,8 @@ namespace mongo { int offset = header()->unused.getOfs(); DataFileHeader *h = header(); - *txn->writing(&h->unused) = DiskLoc( fileNo, offset + size ); - txn->writingInt(h->unusedLength) = h->unusedLength - size; + *txn->recoveryUnit()->writing(&h->unused) = DiskLoc( fileNo, offset + size ); + txn->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size; return DiskLoc( fileNo, offset ); } @@ -200,9 +200,9 @@ namespace mongo { } } - txn->createdFile(filename, filelength); + txn->recoveryUnit()->createdFile(filename, filelength); verify( HeaderSize == 8192 ); - DataFileHeader *h = txn->writing(this); + DataFileHeader *h = txn->recoveryUnit()->writing(this); h->fileLength = filelength; h->version = PDFILE_VERSION; h->versionMinor = PDFILE_VERSION_MINOR_22_AND_OLDER; // All dbs start like this @@ -221,8 +221,8 @@ namespace mongo { if ( freeListStart == minDiskLoc ) { // we are upgrading from 2.4 to 2.6 invariant( freeListEnd == minDiskLoc ); // both start and end should be (0,0) or real - *txn->writing( &freeListStart ) = DiskLoc(); - *txn->writing( &freeListEnd ) = DiskLoc(); + *txn->recoveryUnit()->writing( &freeListStart ) = DiskLoc(); + *txn->recoveryUnit()->writing( &freeListEnd ) = DiskLoc(); } } diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp index 85ad027b39b..a4c4fe12b23 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp @@ -268,9 +268,9 @@ namespace mongo { Extent *e = getExtent( loc, false ); verify( e ); - *txn->writing(&e->magic) = Extent::extentSignature; - *txn->writing(&e->myLoc) = loc; - *txn->writing(&e->length) = size; + *txn->recoveryUnit()->writing(&e->magic) = Extent::extentSignature; + *txn->recoveryUnit()->writing(&e->myLoc) = loc; + *txn->recoveryUnit()->writing(&e->length) = size; return loc; } @@ -389,9 +389,9 @@ namespace mongo { // remove from the free list if ( !best->xprev.isNull() ) - *txn->writing(&getExtent( best->xprev )->xnext) = best->xnext; + *txn->recoveryUnit()->writing(&getExtent( best->xprev )->xnext) = best->xnext; if ( !best->xnext.isNull() ) - *txn->writing(&getExtent( best->xnext )->xprev) = best->xprev; + *txn->recoveryUnit()->writing(&getExtent( best->xnext )->xprev) = best->xprev; if ( _getFreeListStart() == best->myLoc ) _setFreeListStart( txn, best->xnext ); if ( _getFreeListEnd() == best->myLoc ) @@ -425,10 +425,10 @@ namespace mongo { void MmapV1ExtentManager::freeExtent(OperationContext* txn, DiskLoc firstExt ) { Extent* e = getExtent( firstExt ); - txn->writing( &e->xnext )->Null(); - txn->writing( &e->xprev )->Null(); - txn->writing( &e->firstRecord )->Null(); - txn->writing( &e->lastRecord )->Null(); + txn->recoveryUnit()->writing( &e->xnext )->Null(); + txn->recoveryUnit()->writing( &e->xprev )->Null(); + txn->recoveryUnit()->writing( &e->firstRecord )->Null(); + txn->recoveryUnit()->writing( &e->lastRecord )->Null(); if( _getFreeListStart().isNull() ) { @@ -438,8 +438,8 @@ namespace mongo { else { DiskLoc a = _getFreeListStart(); invariant( getExtent( a )->xprev.isNull() ); - *txn->writing( &getExtent( a )->xprev ) = firstExt; - *txn->writing( &getExtent( firstExt )->xnext ) = a; + *txn->recoveryUnit()->writing( &getExtent( a )->xprev ) = firstExt; + *txn->recoveryUnit()->writing( &getExtent( firstExt )->xnext ) = a; _setFreeListStart( txn, firstExt ); } @@ -467,8 +467,8 @@ namespace mongo { else { DiskLoc a = _getFreeListStart(); invariant( getExtent( a )->xprev.isNull() ); - *txn->writing( &getExtent( a )->xprev ) = lastExt; - *txn->writing( &getExtent( lastExt )->xnext ) = a; + *txn->recoveryUnit()->writing( &getExtent( a )->xprev ) = lastExt; + *txn->recoveryUnit()->writing( &getExtent( lastExt )->xnext ) = a; _setFreeListStart( txn, firstExt ); } @@ -491,13 +491,13 @@ namespace mongo { void MmapV1ExtentManager::_setFreeListStart( OperationContext* txn, DiskLoc loc ) { invariant( !_files.empty() ); DataFile* file = _files[0]; - *txn->writing( &file->header()->freeListStart ) = loc; + *txn->recoveryUnit()->writing( &file->header()->freeListStart ) = loc; } void MmapV1ExtentManager::_setFreeListEnd( OperationContext* txn, DiskLoc loc ) { invariant( !_files.empty() ); DataFile* file = _files[0]; - *txn->writing( &file->header()->freeListEnd ) = loc; + *txn->recoveryUnit()->writing( &file->header()->freeListEnd ) = loc; } void MmapV1ExtentManager::freeListStats( int* numExtents, int64_t* totalFreeSize ) const { diff --git a/src/mongo/db/structure/btree/btree_logic.cpp b/src/mongo/db/structure/btree/btree_logic.cpp index ceb1e770db3..49b2f9fb464 100644 --- a/src/mongo/db/structure/btree/btree_logic.cpp +++ b/src/mongo/db/structure/btree/btree_logic.cpp @@ -43,20 +43,20 @@ namespace mongo { template <class BtreeLayout> typename BtreeLogic<BtreeLayout>::Builder* - BtreeLogic<BtreeLayout>::newBuilder(OperationContext* trans, bool dupsAllowed) { - return new Builder(this, trans, dupsAllowed); + BtreeLogic<BtreeLayout>::newBuilder(OperationContext* txn, bool dupsAllowed) { + return new Builder(this, txn, dupsAllowed); } template <class BtreeLayout> BtreeLogic<BtreeLayout>::Builder::Builder(BtreeLogic* logic, - OperationContext* trans, + OperationContext* txn, bool dupsAllowed) : _logic(logic), _dupsAllowed(dupsAllowed), _numAdded(0), - _trans(trans) { + _txn(txn) { - _first = _cur = _logic->addBucket(trans); + _first = _cur = _logic->addBucket(txn); _b = _getModifiableBucket(_cur); _committed = false; } @@ -113,7 +113,7 @@ namespace mongo { template <class BtreeLayout> void BtreeLogic<BtreeLayout>::Builder::newBucket() { - DiskLoc newBucketLoc = _logic->addBucket(_trans); + DiskLoc newBucketLoc = _logic->addBucket(_txn); _b->parent = newBucketLoc; _cur = newBucketLoc; _b = _getModifiableBucket(_cur); @@ -124,23 +124,23 @@ namespace mongo { for (;;) { if (_getBucket(loc)->parent.isNull()) { // only 1 bucket at this level. we are done. - _logic->_headManager->setHead(_trans, loc); + _logic->_headManager->setHead(_txn, loc); break; } - DiskLoc upLoc = _logic->addBucket(_trans); + DiskLoc upLoc = _logic->addBucket(_txn); DiskLoc upStart = upLoc; BucketType* up = _getModifiableBucket(upLoc); DiskLoc xloc = loc; while (!xloc.isNull()) { - if (_trans->commitIfNeeded()) { + if (_txn->recoveryUnit()->commitIfNeeded()) { _b = _getModifiableBucket(_cur); up = _getModifiableBucket(upLoc); } if (mayInterrupt) { - _trans->checkForInterrupt(); + _txn->checkForInterrupt(); } BucketType* x = _getModifiableBucket(xloc); @@ -152,7 +152,7 @@ namespace mongo { if (!_logic->_pushBack(up, r, k, keepLoc)) { // current bucket full - DiskLoc n = _logic->addBucket(_trans); + DiskLoc n = _logic->addBucket(_txn); up->parent = n; upLoc = n; up = _getModifiableBucket(upLoc); @@ -168,7 +168,7 @@ namespace mongo { DiskLoc ll = x->nextChild; _getModifiableBucket(ll)->parent = upLoc; } - _logic->deallocBucket(_trans, x, xloc); + _logic->deallocBucket(_txn, x, xloc); } xloc = nextLoc; } @@ -180,7 +180,7 @@ namespace mongo { template <class BtreeLayout> void BtreeLogic<BtreeLayout>::Builder::mayCommitProgressDurably() { - if (_trans->commitIfNeeded()) { + if (_txn->recoveryUnit()->commitIfNeeded()) { _b = _getModifiableBucket(_cur); } } @@ -188,7 +188,7 @@ namespace mongo { template <class BtreeLayout> typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) { - return _logic->btreemod(_trans, _logic->getBucket(loc)); + return _logic->btreemod(_txn, _logic->getBucket(loc)); } template <class BtreeLayout> @@ -241,8 +241,8 @@ namespace mongo { template <class BtreeLayout> typename BtreeLogic<BtreeLayout>::BucketType* - BtreeLogic<BtreeLayout>::btreemod(OperationContext* trans, BucketType* bucket) { - trans->writingPtr(bucket, BtreeLayout::BucketSize); + BtreeLogic<BtreeLayout>::btreemod(OperationContext* txn, BucketType* bucket) { + txn->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize); return bucket; } @@ -426,7 +426,7 @@ namespace mongo { * Returns false if a split is required. */ template <class BtreeLayout> - bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* trans, + bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int& keypos, @@ -437,7 +437,7 @@ namespace mongo { int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType); if (bytesNeeded > bucket->emptySize) { - _pack(trans, bucket, bucketLoc, keypos); + _pack(txn, bucket, bucketLoc, keypos); if (bytesNeeded > bucket->emptySize) { return false; } @@ -451,7 +451,7 @@ namespace mongo { char* end = reinterpret_cast<char*>(&getKeyHeader(bucket, bucket->n + 1)); // Declare that we will write to [k(keypos),k(n)] - trans->writingPtr(start, end - start); + txn->recoveryUnit()->writingPtr(start, end - start); } // e.g. for n==3, keypos==2 @@ -461,7 +461,7 @@ namespace mongo { } size_t writeLen = sizeof(bucket->emptySize) + sizeof(bucket->topSize) + sizeof(bucket->n); - trans->writingPtr(&bucket->emptySize, writeLen); + txn->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen); bucket->emptySize -= sizeof(KeyHeaderType); bucket->n++; @@ -471,7 +471,7 @@ namespace mongo { kn.recordLoc = recordLoc; kn.setKeyDataOfs((short) _alloc(bucket, key.dataSize())); char *p = dataAt(bucket, kn.keyDataOfs()); - trans->writingPtr(p, key.dataSize()); + txn->recoveryUnit()->writingPtr(p, key.dataSize()); memcpy(p, key.data(), key.dataSize()); return true; } @@ -510,7 +510,7 @@ namespace mongo { * it. */ template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::_pack(OperationContext* trans, + void BtreeLogic<BtreeLayout>::_pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int &refPos) { @@ -521,7 +521,7 @@ namespace mongo { return; } - _packReadyForMod(btreemod(trans, bucket), refPos); + _packReadyForMod(btreemod(txn, bucket), refPos); } /** @@ -1256,7 +1256,7 @@ namespace mongo { } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::delBucket(OperationContext* trans, + void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) { invariant(bucketLoc != getRootLoc()); @@ -1265,8 +1265,8 @@ namespace mongo { BucketType* p = getBucket(bucket->parent); int parentIdx = indexInParent(bucket, bucketLoc); - *trans->writing(&childLocForPos(p, parentIdx)) = DiskLoc(); - deallocBucket(trans, bucket, bucketLoc); + *txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc(); + deallocBucket(txn, bucket, bucketLoc); } template <class BtreeLayout> @@ -1336,7 +1336,7 @@ namespace mongo { * May delete the bucket 'bucket' rendering 'bucketLoc' invalid. */ template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* trans, + void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int p) { @@ -1350,25 +1350,25 @@ namespace mongo { // we don't delete the top bucket ever } else { - if (!mayBalanceWithNeighbors(trans, bucket, bucketLoc)) { - // An empty bucket is only allowed as a transient state. If + if (!mayBalanceWithNeighbors(txn, bucket, bucketLoc)) { + // An empty bucket is only allowed as a txnient state. If // there are no neighbors to balance with, we delete ourself. // This condition is only expected in legacy btrees. - delBucket(trans, bucket, bucketLoc); + delBucket(txn, bucket, bucketLoc); } } return; } - deleteInternalKey(trans, bucket, bucketLoc, p); + deleteInternalKey(txn, bucket, bucketLoc, p); return; } if (left.isNull()) { _delKeyAtPos(bucket, p); - mayBalanceWithNeighbors(trans, bucket, bucketLoc); + mayBalanceWithNeighbors(txn, bucket, bucketLoc); } else { - deleteInternalKey(trans, bucket, bucketLoc, p); + deleteInternalKey(txn, bucket, bucketLoc, p); } } @@ -1396,7 +1396,7 @@ namespace mongo { * legacy btree. */ template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* trans, + void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos) { @@ -1422,32 +1422,32 @@ namespace mongo { // Because advanceLoc is a descendant of thisLoc, updating thisLoc will // not affect packing or keys of advanceLoc and kn will be stable // during the following setInternalKey() - setInternalKey(trans, bucket, bucketLoc, keypos, kn.recordLoc, kn.data, + setInternalKey(txn, bucket, bucketLoc, keypos, kn.recordLoc, kn.data, childLocForPos(bucket, keypos), childLocForPos(bucket, keypos + 1)); - delKeyAtPos(trans, btreemod(trans, advanceBucket), advanceLoc, advanceKeyOfs); + delKeyAtPos(txn, btreemod(txn, advanceBucket), advanceLoc, advanceKeyOfs); } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* trans, + void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) { invariant(bucket->n == 0 && !bucket->nextChild.isNull() ); if (bucket->parent.isNull()) { invariant(getRootLoc() == bucketLoc); - _headManager->setHead(trans, bucket->nextChild); + _headManager->setHead(txn, bucket->nextChild); } else { BucketType* parentBucket = getBucket(bucket->parent); int bucketIndexInParent = indexInParent(bucket, bucketLoc); - *trans->writing(&childLocForPos(parentBucket, bucketIndexInParent)) = + *txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) = bucket->nextChild; } - *trans->writing(&getBucket(bucket->nextChild)->parent) = bucket->parent; + *txn->recoveryUnit()->writing(&getBucket(bucket->nextChild)->parent) = bucket->parent; _bucketDeletion->aboutToDeleteBucket(bucketLoc); - deallocBucket(trans, bucket, bucketLoc); + deallocBucket(txn, bucket, bucketLoc); } template <class BtreeLayout> @@ -1539,15 +1539,15 @@ namespace mongo { } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* trans, + void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex) { DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex); DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1); - BucketType* l = btreemod(trans, getBucket(leftNodeLoc)); - BucketType* r = btreemod(trans, getBucket(rightNodeLoc)); + BucketType* l = btreemod(txn, getBucket(leftNodeLoc)); + BucketType* r = btreemod(txn, getBucket(rightNodeLoc)); int pos = 0; _packReadyForMod(l, pos); @@ -1565,8 +1565,8 @@ namespace mongo { } l->nextChild = r->nextChild; - fixParentPtrs(trans, l, leftNodeLoc, oldLNum); - delBucket(trans, r, rightNodeLoc); + fixParentPtrs(txn, l, leftNodeLoc, oldLNum); + delBucket(txn, r, rightNodeLoc); childLocForPos(bucket, leftIndex + 1) = leftNodeLoc; childLocForPos(bucket, leftIndex) = DiskLoc(); @@ -1577,10 +1577,10 @@ namespace mongo { // // TODO To ensure all leaves are of equal height, we should ensure this is only called // on the root. - replaceWithNextChild(trans, bucket, bucketLoc); + replaceWithNextChild(txn, bucket, bucketLoc); } else { - mayBalanceWithNeighbors(trans, bucket, bucketLoc); + mayBalanceWithNeighbors(txn, bucket, bucketLoc); } } @@ -1609,7 +1609,7 @@ namespace mongo { } template <class BtreeLayout> - bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* trans, + bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex) { @@ -1620,12 +1620,12 @@ namespace mongo { return false; } - doBalanceChildren(trans, btreemod(trans, bucket), bucketLoc, leftIndex); + doBalanceChildren(txn, btreemod(txn, bucket), bucketLoc, leftIndex); return true; } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* trans, + void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex, @@ -1650,14 +1650,14 @@ namespace mongo { FullKey leftIndexKN = getFullKey(bucket, leftIndex); setKey(r, rAdd - 1, leftIndexKN.recordLoc, leftIndexKN.data, l->nextChild); - fixParentPtrs(trans, r, rchild, 0, rAdd - 1); + fixParentPtrs(txn, r, rchild, 0, rAdd - 1); FullKey kn = getFullKey(l, split); l->nextChild = kn.prevChildBucket; // Because lchild is a descendant of thisLoc, updating thisLoc will not affect packing or // keys of lchild and kn will be stable during the following setInternalKey() - setInternalKey(trans, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild); + setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild); // lchild and rchild cannot be merged, so there must be >0 (actually more) keys to the left // of split. @@ -1666,7 +1666,7 @@ namespace mongo { } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* trans, + void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex, @@ -1696,11 +1696,11 @@ namespace mongo { FullKey kn = getFullKey(r, split - lN - 1); l->nextChild = kn.prevChildBucket; // Child lN was lchild's old nextChild, and don't need to fix that one. - fixParentPtrs(trans, l, lchild, lN + 1, l->n); + fixParentPtrs(txn, l, lchild, lN + 1, l->n); // Because rchild is a descendant of thisLoc, updating thisLoc will // not affect packing or keys of rchild and kn will be stable // during the following setInternalKey() - setInternalKey(trans, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild); + setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild); } // lchild and rchild cannot be merged, so there must be >0 (actually more) @@ -1710,7 +1710,7 @@ namespace mongo { } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* trans, + void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex) { @@ -1719,10 +1719,10 @@ namespace mongo { DiskLoc rchild = childLocForPos(bucket, leftIndex + 1); int zeropos = 0; - BucketType* l = btreemod(trans, getBucket(lchild)); + BucketType* l = btreemod(txn, getBucket(lchild)); _packReadyForMod(l, zeropos); - BucketType* r = btreemod(trans, getBucket(rchild)); + BucketType* r = btreemod(txn, getBucket(rchild)); _packReadyForMod(r, zeropos); int split = rebalancedSeparatorPos(bucket, bucketLoc, leftIndex); @@ -1731,15 +1731,15 @@ namespace mongo { // then we must actively balance. invariant(split != l->n); if (split < l->n) { - doBalanceLeftToRight(trans, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild); + doBalanceLeftToRight(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild); } else { - doBalanceRightToLeft(trans, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild); + doBalanceRightToLeft(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild); } } template <class BtreeLayout> - bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* trans, + bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) { if (bucket->parent.isNull()) { @@ -1761,21 +1761,21 @@ namespace mongo { // Balance if possible on one side - we merge only if absolutely necessary to preserve btree // bucket utilization constraints since that's a more heavy duty operation (especially if we // must re-split later). - if (mayBalanceRight && tryBalanceChildren(trans, p, bucket->parent, parentIdx)) { + if (mayBalanceRight && tryBalanceChildren(txn, p, bucket->parent, parentIdx)) { return true; } - if (mayBalanceLeft && tryBalanceChildren(trans, p, bucket->parent, parentIdx - 1)) { + if (mayBalanceLeft && tryBalanceChildren(txn, p, bucket->parent, parentIdx - 1)) { return true; } - BucketType* pm = btreemod(trans, getBucket(bucket->parent)); + BucketType* pm = btreemod(txn, getBucket(bucket->parent)); if (mayBalanceRight) { - doMergeChildren(trans, pm, bucket->parent, parentIdx); + doMergeChildren(txn, pm, bucket->parent, parentIdx); return true; } else if (mayBalanceLeft) { - doMergeChildren(trans, pm, bucket->parent, parentIdx - 1); + doMergeChildren(txn, pm, bucket->parent, parentIdx - 1); return true; } @@ -1783,7 +1783,7 @@ namespace mongo { } template <class BtreeLayout> - bool BtreeLogic<BtreeLayout>::unindex(OperationContext* trans, + bool BtreeLogic<BtreeLayout>::unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& recordLoc) { int pos; @@ -1791,8 +1791,8 @@ namespace mongo { KeyDataOwnedType ownedKey(key); DiskLoc loc = _locate(getRootLoc(), ownedKey, &pos, &found, recordLoc, 1); if (found) { - BucketType* bucket = btreemod(trans, getBucket(loc)); - delKeyAtPos(trans, bucket, loc, pos); + BucketType* bucket = btreemod(txn, getBucket(loc)); + delKeyAtPos(txn, bucket, loc, pos); assertValid(_indexName, getRoot(), _ordering); } return found; @@ -1804,11 +1804,11 @@ namespace mongo { } template <class BtreeLayout> - inline void BtreeLogic<BtreeLayout>::fix(OperationContext* trans, + inline void BtreeLogic<BtreeLayout>::fix(OperationContext* txn, const DiskLoc bucketLoc, const DiskLoc child) { if (!child.isNull()) { - *trans->writing(&getBucket(child)->parent) = bucketLoc; + *txn->recoveryUnit()->writing(&getBucket(child)->parent) = bucketLoc; } } @@ -1817,7 +1817,7 @@ namespace mongo { * Maybe get rid of parent ptrs? */ template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* trans, + void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int firstIndex, @@ -1830,12 +1830,12 @@ namespace mongo { } for (int i = firstIndex; i <= lastIndex; i++) { - fix(trans, bucketLoc, childLocForPos(bucket, i)); + fix(txn, bucketLoc, childLocForPos(bucket, i)); } } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* trans, + void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos, @@ -1844,7 +1844,7 @@ namespace mongo { const DiskLoc lchild, const DiskLoc rchild) { childLocForPos(bucket, keypos).Null(); - // This may leave the bucket empty (n == 0) which is ok only as a transient state. In the + // This may leave the bucket empty (n == 0) which is ok only as a txnient state. In the // instant case, the implementation of insertHere behaves correctly when n == 0 and as a // side effect increments n. _delKeyAtPos(bucket, keypos, true); @@ -1855,7 +1855,7 @@ namespace mongo { // Just set temporarily - required to pass validation in insertHere() childLocForPos(bucket, keypos) = lchild; - insertHere(trans, bucketLoc, keypos, key, recordLoc, lchild, rchild); + insertHere(txn, bucketLoc, keypos, key, recordLoc, lchild, rchild); } /** @@ -1869,7 +1869,7 @@ namespace mongo { * intent code in basicInsert(). */ template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::insertHere(OperationContext* trans, + void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn, const DiskLoc bucketLoc, int pos, const KeyDataType& key, @@ -1879,9 +1879,9 @@ namespace mongo { BucketType* bucket = getBucket(bucketLoc); - if (!basicInsert(trans, bucket, bucketLoc, pos, key, recordLoc)) { + if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) { // If basicInsert() fails, the bucket will be packed as required by split(). - split(trans, btreemod(trans, bucket), bucketLoc, pos, recordLoc, key, leftChildLoc, rightChildLoc); + split(txn, btreemod(txn, bucket), bucketLoc, pos, recordLoc, key, leftChildLoc, rightChildLoc); return; } @@ -1894,9 +1894,9 @@ namespace mongo { } kn->prevChildBucket = bucket->nextChild; invariant(kn->prevChildBucket == leftChildLoc); - *trans->writing(&bucket->nextChild) = rightChildLoc; + *txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc; if (!rightChildLoc.isNull()) { - *trans->writing(&getBucket(rightChildLoc)->parent) = bucketLoc; + *txn->recoveryUnit()->writing(&getBucket(rightChildLoc)->parent) = bucketLoc; } } else { @@ -1909,13 +1909,13 @@ namespace mongo { // Intent declared in basicInsert() *const_cast<LocType*>(pc) = rightChildLoc; if (!rightChildLoc.isNull()) { - *trans->writing(&getBucket(rightChildLoc)->parent) = bucketLoc; + *txn->recoveryUnit()->writing(&getBucket(rightChildLoc)->parent) = bucketLoc; } } } template <class BtreeLayout> - void BtreeLogic<BtreeLayout>::split(OperationContext* trans, + void BtreeLogic<BtreeLayout>::split(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos, @@ -1925,8 +1925,8 @@ namespace mongo { const DiskLoc rchild) { int split = splitPos(bucket, keypos); - DiskLoc rLoc = addBucket(trans); - BucketType* r = btreemod(trans, getBucket(rLoc)); + DiskLoc rLoc = addBucket(txn); + BucketType* r = btreemod(txn, getBucket(rLoc)); for (int i = split + 1; i < bucket->n; i++) { FullKey kn = getFullKey(bucket, i); @@ -1936,7 +1936,7 @@ namespace mongo { assertValid(_indexName, r, _ordering); r = NULL; - fixParentPtrs(trans, getBucket(rLoc), rLoc); + fixParentPtrs(txn, getBucket(rLoc), rLoc); FullKey splitkey = getFullKey(bucket, split); // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r) @@ -1947,20 +1947,20 @@ namespace mongo { if (bucket->parent.isNull()) { // promote splitkey to a parent this->node make a new parent if we were the root - DiskLoc L = addBucket(trans); - BucketType* p = btreemod(trans, getBucket(L)); + DiskLoc L = addBucket(txn); + BucketType* p = btreemod(txn, getBucket(L)); pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc); p->nextChild = rLoc; assertValid(_indexName, p, _ordering); bucket->parent = L; - _headManager->setHead(trans, L); - *trans->writing(&getBucket(rLoc)->parent) = bucket->parent; + _headManager->setHead(txn, L); + *txn->recoveryUnit()->writing(&getBucket(rLoc)->parent) = bucket->parent; } else { // set this before calling _insert - if it splits it will do fixParent() logic and // change the value. - *trans->writing(&getBucket(rLoc)->parent) = bucket->parent; - _insert(trans, + *txn->recoveryUnit()->writing(&getBucket(rLoc)->parent) = bucket->parent; + _insert(txn, getBucket(bucket->parent), bucket->parent, splitkey.data, @@ -1976,12 +1976,12 @@ namespace mongo { // add our this->new key, there is room this->now if (keypos <= split) { - insertHere(trans, bucketLoc, newpos, key, recordLoc, lchild, rchild); + insertHere(txn, bucketLoc, newpos, key, recordLoc, lchild, rchild); } else { int kp = keypos - split - 1; invariant(kp >= 0); - insertHere(trans, rLoc, kp, key, recordLoc, lchild, rchild); + insertHere(txn, rLoc, kp, key, recordLoc, lchild, rchild); } } @@ -1995,22 +1995,22 @@ namespace mongo { }; template <class BtreeLayout> - Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* trans) { + Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) { if (!_headManager->getHead().isNull()) { return Status(ErrorCodes::InternalError, "index already initialized"); } - _headManager->setHead(trans, addBucket(trans)); + _headManager->setHead(txn, addBucket(txn)); return Status::OK(); } template <class BtreeLayout> - DiskLoc BtreeLogic<BtreeLayout>::addBucket(OperationContext* trans) { + DiskLoc BtreeLogic<BtreeLayout>::addBucket(OperationContext* txn) { DummyDocWriter docWriter(BtreeLayout::BucketSize); - StatusWith<DiskLoc> loc = _recordStore->insertRecord(trans, &docWriter, 0); + StatusWith<DiskLoc> loc = _recordStore->insertRecord(txn, &docWriter, 0); // XXX: remove this(?) or turn into massert or sanely bubble it back up. uassertStatusOK(loc.getStatus()); - BucketType* b = btreemod(trans, getBucket(loc.getValue())); + BucketType* b = btreemod(txn, getBucket(loc.getValue())); init(b); return loc.getValue(); } @@ -2200,7 +2200,7 @@ namespace mongo { } template <class BtreeLayout> - Status BtreeLogic<BtreeLayout>::insert(OperationContext* trans, + Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn, const BSONObj& rawKey, const DiskLoc& value, bool dupsAllowed) { @@ -2213,7 +2213,7 @@ namespace mongo { return Status(ErrorCodes::KeyTooLong, msg); } - Status status = _insert(trans, + Status status = _insert(txn, getRoot(), getRootLoc(), key, @@ -2227,7 +2227,7 @@ namespace mongo { } template <class BtreeLayout> - Status BtreeLogic<BtreeLayout>::_insert(OperationContext* trans, + Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, const KeyDataType& key, @@ -2250,7 +2250,7 @@ namespace mongo { LOG(4) << "btree _insert: reusing unused key" << endl; massert(17433, "_insert: reuse key but lchild is not null", leftChild.isNull()); massert(17434, "_insert: reuse key but rchild is not null", rightChild.isNull()); - trans->writing(&header)->setUsed(); + txn->recoveryUnit()->writing(&header)->setUsed(); return Status::OK(); } return Status(ErrorCodes::UniqueIndexViolation, "FIXME"); @@ -2262,11 +2262,11 @@ namespace mongo { // promoting a split key. These are the only two cases where _insert() is called // currently. if (childLoc.isNull() || !rightChild.isNull()) { - insertHere(trans, bucketLoc, pos, key, recordLoc, leftChild, rightChild); + insertHere(txn, bucketLoc, pos, key, recordLoc, leftChild, rightChild); return Status::OK(); } else { - return _insert(trans, + return _insert(txn, getBucket(childLoc), childLoc, key, diff --git a/src/mongo/db/structure/btree/btree_logic.h b/src/mongo/db/structure/btree/btree_logic.h index de36c8aa11c..872384e6f88 100644 --- a/src/mongo/db/structure/btree/btree_logic.h +++ b/src/mongo/db/structure/btree/btree_logic.h @@ -101,7 +101,7 @@ namespace mongo { private: friend class BtreeLogic; - Builder(BtreeLogic* logic, OperationContext* trans, bool dupsAllowed); + Builder(BtreeLogic* logic, OperationContext* txn, bool dupsAllowed); // Direct ports of functionality void newBucket(); @@ -124,18 +124,18 @@ namespace mongo { auto_ptr<KeyDataOwnedType> _keyLast; // Not owned. - OperationContext* _trans; + OperationContext* _txn; }; /** * Caller owns the returned pointer. * 'this' must outlive the returned pointer. */ - Builder* newBuilder(OperationContext* trans, bool dupsAllowed); + Builder* newBuilder(OperationContext* txn, bool dupsAllowed); Status dupKeyCheck(const BSONObj& key, const DiskLoc& loc) const; - Status insert(OperationContext* trans, + Status insert(OperationContext* txn, const BSONObj& rawKey, const DiskLoc& value, bool dupsAllowed); @@ -158,7 +158,7 @@ namespace mongo { bool exists(const KeyDataType& key) const; - bool unindex(OperationContext* trans, + bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& recordLoc); @@ -213,7 +213,7 @@ namespace mongo { /** * Returns OK if the index was uninitialized before, error status otherwise. */ - Status initAsEmpty(OperationContext* trans); + Status initAsEmpty(OperationContext* txn); private: friend class BtreeLogic::Builder; @@ -286,7 +286,7 @@ namespace mongo { static void setNotPacked(BucketType* bucket); - static BucketType* btreemod(OperationContext* trans, BucketType* bucket); + static BucketType* btreemod(OperationContext* txn, BucketType* bucket); static int splitPos(BucketType* bucket, int keypos); @@ -312,7 +312,7 @@ namespace mongo { // information). // - bool basicInsert(OperationContext* trans, + bool basicInsert(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int& keypos, @@ -321,7 +321,7 @@ namespace mongo { void dropFront(BucketType* bucket, int nDrop, int& refpos); - void _pack(OperationContext* trans, BucketType* bucket, const DiskLoc thisLoc, int &refPos); + void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int &refPos); void customLocate(DiskLoc* locInOut, int* keyOfsInOut, @@ -383,7 +383,7 @@ namespace mongo { bool dumpBuckets, unsigned depth); - DiskLoc addBucket(OperationContext* trans); + DiskLoc addBucket(OperationContext* txn); bool canMergeChildren(BucketType* bucket, const DiskLoc bucketLoc, @@ -398,7 +398,7 @@ namespace mongo { void truncateTo(BucketType* bucket, int N, int &refPos); - void split(OperationContext* trans, + void split(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos, @@ -407,7 +407,7 @@ namespace mongo { const DiskLoc lchild, const DiskLoc rchild); - Status _insert(OperationContext* trans, + Status _insert(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, const KeyDataType& key, @@ -417,7 +417,7 @@ namespace mongo { const DiskLoc rightChild); // TODO take a BucketType*? - void insertHere(OperationContext* trans, + void insertHere(OperationContext* txn, const DiskLoc bucketLoc, int pos, const KeyDataType& key, @@ -427,7 +427,7 @@ namespace mongo { string dupKeyError(const KeyDataType& key) const; - void setInternalKey(OperationContext* trans, + void setInternalKey(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos, @@ -436,22 +436,22 @@ namespace mongo { const DiskLoc lchild, const DiskLoc rchild); - void fix(OperationContext* trans, const DiskLoc bucketLoc, const DiskLoc child); + void fix(OperationContext* txn, const DiskLoc bucketLoc, const DiskLoc child); - void fixParentPtrs(OperationContext* trans, + void fixParentPtrs(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int firstIndex = 0, int lastIndex = -1); - bool mayBalanceWithNeighbors(OperationContext* trans, BucketType* bucket, const DiskLoc bucketLoc); + bool mayBalanceWithNeighbors(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc); - void doBalanceChildren(OperationContext* trans, + void doBalanceChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex); - void doBalanceLeftToRight(OperationContext* trans, + void doBalanceLeftToRight(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int leftIndex, @@ -461,7 +461,7 @@ namespace mongo { BucketType* r, const DiskLoc rchild); - void doBalanceRightToLeft(OperationContext* trans, + void doBalanceRightToLeft(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex, @@ -471,37 +471,37 @@ namespace mongo { BucketType* r, const DiskLoc rchild); - bool tryBalanceChildren(OperationContext* trans, + bool tryBalanceChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex); int indexInParent(BucketType* bucket, const DiskLoc bucketLoc) const; - void doMergeChildren(OperationContext* trans, + void doMergeChildren(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int leftIndex); - void replaceWithNextChild(OperationContext* trans, + void replaceWithNextChild(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc); - void deleteInternalKey(OperationContext* trans, + void deleteInternalKey(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int keypos); - void delKeyAtPos(OperationContext* trans, + void delKeyAtPos(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int p); - void delBucket(OperationContext* trans, + void delBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc); - void deallocBucket(OperationContext* trans, + void deallocBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc); diff --git a/src/mongo/db/structure/catalog/hashtab.h b/src/mongo/db/structure/catalog/hashtab.h index 66ecb5af9dc..13e24ea199d 100644 --- a/src/mongo/db/structure/catalog/hashtab.h +++ b/src/mongo/db/structure/catalog/hashtab.h @@ -127,7 +127,7 @@ namespace mongo { int i = _find(k, found); if ( i >= 0 && found ) { Node* n = &nodes(i); - n = txn->writing(n); + n = txn->recoveryUnit()->writing(n); n->k.kill(); n->setUnused(); } @@ -139,7 +139,7 @@ namespace mongo { int i = _find(k, found); if ( i < 0 ) return false; - Node* n = txn->writing( &nodes(i) ); + Node* n = txn->recoveryUnit()->writing( &nodes(i) ); if ( !found ) { n->k = k; n->hash = k.hash(); diff --git a/src/mongo/db/structure/catalog/namespace_details.cpp b/src/mongo/db/structure/catalog/namespace_details.cpp index 8eb6132890e..e94f6118737 100644 --- a/src/mongo/db/structure/catalog/namespace_details.cpp +++ b/src/mongo/db/structure/catalog/namespace_details.cpp @@ -108,7 +108,7 @@ namespace mongo { long ofs = e->ofsFrom(this); if( i == 0 ) { verify( _extraOffset == 0 ); - *txn->writing(&_extraOffset) = ofs; + *txn->recoveryUnit()->writing(&_extraOffset) = ofs; verify( extra() == e ); } else { @@ -130,7 +130,7 @@ namespace mongo { return false; } - *txn->writing(&_multiKeyIndexBits) |= mask; + *txn->recoveryUnit()->writing(&_multiKeyIndexBits) |= mask; } else { // Shortcut if the bit is already set correctly @@ -140,7 +140,7 @@ namespace mongo { // Invert mask: all 1's except a 0 at the ith bit mask = ~mask; - *txn->writing(&_multiKeyIndexBits) &= mask; + *txn->recoveryUnit()->writing(&_multiKeyIndexBits) &= mask; } return true; @@ -243,14 +243,14 @@ namespace mongo { } NamespaceDetails* NamespaceDetails::writingWithoutExtra( OperationContext* txn ) { - return txn->writing( this ); + return txn->recoveryUnit()->writing( this ); } // XXX - this method should go away NamespaceDetails *NamespaceDetails::writingWithExtra( OperationContext* txn ) { for( Extra *e = extra(); e; e = e->next( this ) ) { - txn->writing( e ); + txn->recoveryUnit()->writing( e ); } return writingWithoutExtra( txn ); } @@ -288,7 +288,7 @@ namespace mongo { void NamespaceDetails::setLastExtentSize( OperationContext* txn, int newMax ) { if ( _lastExtentSize == newMax ) return; - txn->writingInt(_lastExtentSize) = newMax; + txn->recoveryUnit()->writingInt(_lastExtentSize) = newMax; } void NamespaceDetails::incrementStats( OperationContext* txn, @@ -296,7 +296,7 @@ namespace mongo { long long numRecordsIncrement ) { // durability todo : this could be a bit annoying / slow to record constantly - Stats* s = txn->writing( &_stats ); + Stats* s = txn->recoveryUnit()->writing( &_stats ); s->datasize += dataSizeIncrement; s->nrecords += numRecordsIncrement; } @@ -304,49 +304,49 @@ namespace mongo { void NamespaceDetails::setStats( OperationContext* txn, long long dataSize, long long numRecords ) { - Stats* s = txn->writing( &_stats ); + Stats* s = txn->recoveryUnit()->writing( &_stats ); s->datasize = dataSize; s->nrecords = numRecords; } void NamespaceDetails::setFirstExtent( OperationContext* txn, const DiskLoc& loc ) { - *txn->writing( &_firstExtent ) = loc; + *txn->recoveryUnit()->writing( &_firstExtent ) = loc; } void NamespaceDetails::setLastExtent( OperationContext* txn, const DiskLoc& loc ) { - *txn->writing( &_lastExtent ) = loc; + *txn->recoveryUnit()->writing( &_lastExtent ) = loc; } void NamespaceDetails::setCapExtent( OperationContext* txn, const DiskLoc& loc ) { - *txn->writing( &_capExtent ) = loc; + *txn->recoveryUnit()->writing( &_capExtent ) = loc; } void NamespaceDetails::setCapFirstNewRecord( OperationContext* txn, const DiskLoc& loc ) { - *txn->writing( &_capFirstNewRecord ) = loc; + *txn->recoveryUnit()->writing( &_capFirstNewRecord ) = loc; } void NamespaceDetails::setFirstExtentInvalid( OperationContext* txn ) { - *txn->writing( &_firstExtent ) = DiskLoc().setInvalid(); + *txn->recoveryUnit()->writing( &_firstExtent ) = DiskLoc().setInvalid(); } void NamespaceDetails::setLastExtentInvalid( OperationContext* txn ) { - *txn->writing( &_lastExtent ) = DiskLoc().setInvalid(); + *txn->recoveryUnit()->writing( &_lastExtent ) = DiskLoc().setInvalid(); } void NamespaceDetails::setDeletedListEntry( OperationContext* txn, int bucket, const DiskLoc& loc ) { - *txn->writing( &_deletedList[bucket] ) = loc; + *txn->recoveryUnit()->writing( &_deletedList[bucket] ) = loc; } bool NamespaceDetails::setUserFlag( OperationContext* txn, int flags ) { if ( ( _userFlags & flags ) == flags ) return false; - txn->writingInt(_userFlags) |= flags; + txn->recoveryUnit()->writingInt(_userFlags) |= flags; return true; } @@ -354,7 +354,7 @@ namespace mongo { if ( ( _userFlags & flags ) == 0 ) return false; - txn->writingInt(_userFlags) &= ~flags; + txn->recoveryUnit()->writingInt(_userFlags) &= ~flags; return true; } @@ -362,7 +362,7 @@ namespace mongo { if ( flags == _userFlags ) return false; - txn->writingInt(_userFlags) = flags; + txn->recoveryUnit()->writingInt(_userFlags) = flags; return true; } @@ -373,7 +373,7 @@ namespace mongo { if ( isCapped() ) return; - *txn->writing(&_paddingFactor) = paddingFactor; + *txn->recoveryUnit()->writing(&_paddingFactor) = paddingFactor; } /* remove bit from a bit array - actually remove its slot, not a clear @@ -411,8 +411,8 @@ namespace mongo { // flip main meta data IndexDetails temp = idx(a); - *txn->writing(&idx(a)) = idx(b); - *txn->writing(&idx(b)) = temp; + *txn->recoveryUnit()->writing(&idx(a)) = idx(b); + *txn->recoveryUnit()->writing(&idx(b)) = temp; // flip multi key bits bool tempMultikey = isMultikey(a); @@ -422,7 +422,7 @@ namespace mongo { void NamespaceDetails::orphanDeletedList( OperationContext* txn ) { for( int i = 0; i < Buckets; i++ ) { - *txn->writing(&_deletedList[i]) = DiskLoc(); + *txn->recoveryUnit()->writing(&_deletedList[i]) = DiskLoc(); } } @@ -440,7 +440,7 @@ namespace mongo { void NamespaceDetails::Extra::setNext( OperationContext* txn, long ofs ) { - *txn->writing(&_next) = ofs; + *txn->recoveryUnit()->writing(&_next) = ofs; } /* ------------------------------------------------------------------------- */ diff --git a/src/mongo/db/structure/catalog/namespace_index.cpp b/src/mongo/db/structure/catalog/namespace_index.cpp index d7863519423..99c6a5403ac 100644 --- a/src/mongo/db/structure/catalog/namespace_index.cpp +++ b/src/mongo/db/structure/catalog/namespace_index.cpp @@ -169,7 +169,7 @@ namespace mongo { maybeMkdir(); unsigned long long l = storageGlobalParams.lenForNewNsFiles; if ( _f.create(pathString, l, true) ) { - txn->createdFile(pathString, l); // always a new file + txn->recoveryUnit()->createdFile(pathString, l); // always a new file len = l; verify(len == storageGlobalParams.lenForNewNsFiles); p = _f.getView(); @@ -178,7 +178,7 @@ namespace mongo { // we do this so the durability system isn't mad at us for // only initiating file and not doing a write // grep for 17388 - txn->writingPtr( p, 5 ); // throw away + txn->recoveryUnit()->writingPtr( p, 5 ); // throw away } } } diff --git a/src/mongo/db/structure/record_store_v1_base.cpp b/src/mongo/db/structure/record_store_v1_base.cpp index 97a421de8d0..09c50f70e96 100644 --- a/src/mongo/db/structure/record_store_v1_base.cpp +++ b/src/mongo/db/structure/record_store_v1_base.cpp @@ -175,7 +175,7 @@ namespace mongo { ofs = newOfs; } - DeletedRecord* empty = txn->writing(drec(emptyLoc)); + DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc)); empty->lengthWithHeaders() = delRecLength; empty->extentOfs() = e->myLoc.getOfs(); empty->nextDeleted().Null(); @@ -224,7 +224,7 @@ namespace mongo { Record *r = recordFor( loc.getValue() ); fassert( 17319, r->lengthWithHeaders() >= lenWHdr ); - r = reinterpret_cast<Record*>( txn->writingPtr(r, lenWHdr) ); + r = reinterpret_cast<Record*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) ); doc->writeDocument( r->data() ); _addRecordToRecListInExtent(txn, r, loc.getValue()); @@ -255,7 +255,7 @@ namespace mongo { fassert( 17210, r->lengthWithHeaders() >= lenWHdr ); // copy the data - r = reinterpret_cast<Record*>( txn->writingPtr(r, lenWHdr) ); + r = reinterpret_cast<Record*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) ); memcpy( r->data(), data, len ); _addRecordToRecListInExtent(txn, r, loc.getValue()); @@ -275,19 +275,19 @@ namespace mongo { if ( todelete->prevOfs() != DiskLoc::NullOfs ) { DiskLoc prev = getPrevRecordInExtent( dl ); Record* prevRecord = recordFor( prev ); - txn->writingInt( prevRecord->nextOfs() ) = todelete->nextOfs(); + txn->recoveryUnit()->writingInt( prevRecord->nextOfs() ) = todelete->nextOfs(); } if ( todelete->nextOfs() != DiskLoc::NullOfs ) { DiskLoc next = getNextRecord( dl ); Record* nextRecord = recordFor( next ); - txn->writingInt( nextRecord->prevOfs() ) = todelete->prevOfs(); + txn->recoveryUnit()->writingInt( nextRecord->prevOfs() ) = todelete->prevOfs(); } } /* remove ourself from extent pointers */ { - Extent *e = txn->writing( _getExtent( _getExtentLocForRecord( dl ) ) ); + Extent *e = txn->recoveryUnit()->writing( _getExtent( _getExtentLocForRecord( dl ) ) ); if ( e->firstRecord == dl ) { if ( todelete->nextOfs() == DiskLoc::NullOfs ) e->firstRecord.Null(); @@ -312,13 +312,13 @@ namespace mongo { to this disk location. so an incorrectly done remove would cause a lot of problems. */ - memset( txn->writingPtr(todelete, todelete->lengthWithHeaders() ), + memset( txn->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders() ), 0, todelete->lengthWithHeaders() ); } else { // this is defensive so we can detect if we are still using a location // that was deleted - memset(txn->writingPtr(todelete->data(), 4), 0xee, 4); + memset(txn->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4); addDeletedRec(txn, dl); } } @@ -335,16 +335,16 @@ namespace mongo { dassert( recordFor(loc) == r ); Extent *e = _getExtent( _getExtentLocForRecord( loc ) ); if ( e->lastRecord.isNull() ) { - *txn->writing(&e->firstRecord) = loc; - *txn->writing(&e->lastRecord) = loc; + *txn->recoveryUnit()->writing(&e->firstRecord) = loc; + *txn->recoveryUnit()->writing(&e->lastRecord) = loc; r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs; } else { Record *oldlast = recordFor(e->lastRecord); r->prevOfs() = e->lastRecord.getOfs(); r->nextOfs() = DiskLoc::NullOfs; - txn->writingInt(oldlast->nextOfs()) = loc.getOfs(); - *txn->writing(&e->lastRecord) = loc; + txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs(); + *txn->recoveryUnit()->writing(&e->lastRecord) = loc; } } @@ -359,12 +359,12 @@ namespace mongo { Extent *e = _extentManager->getExtent( eloc ); invariant( e ); - *txn->writing( &e->nsDiagnostic ) = _ns; + *txn->recoveryUnit()->writing( &e->nsDiagnostic ) = _ns; - txn->writing( &e->xnext )->Null(); - txn->writing( &e->xprev )->Null(); - txn->writing( &e->firstRecord )->Null(); - txn->writing( &e->lastRecord )->Null(); + txn->recoveryUnit()->writing( &e->xnext )->Null(); + txn->recoveryUnit()->writing( &e->xprev )->Null(); + txn->recoveryUnit()->writing( &e->firstRecord )->Null(); + txn->recoveryUnit()->writing( &e->lastRecord )->Null(); DiskLoc emptyLoc = _findFirstSpot( txn, eloc, e ); @@ -378,8 +378,8 @@ namespace mongo { } else { invariant( !_details->firstExtent().isNull() ); - *txn->writing(&e->xprev) = _details->lastExtent(); - *txn->writing(&_extentManager->getExtent(_details->lastExtent())->xnext) = eloc; + *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(); + *txn->recoveryUnit()->writing(&_extentManager->getExtent(_details->lastExtent())->xnext) = eloc; _details->setLastExtent( txn, eloc ); } diff --git a/src/mongo/db/structure/record_store_v1_capped.cpp b/src/mongo/db/structure/record_store_v1_capped.cpp index 9b1d4d3f3e4..d477fb5d49a 100644 --- a/src/mongo/db/structure/record_store_v1_capped.cpp +++ b/src/mongo/db/structure/record_store_v1_capped.cpp @@ -201,11 +201,11 @@ namespace mongo { int left = regionlen - lenToAlloc; /* split off some for further use. */ - txn->writingInt(r->lengthWithHeaders()) = lenToAlloc; + txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc; DiskLoc newDelLoc = loc; newDelLoc.inc(lenToAlloc); DeletedRecord* newDel = drec( newDelLoc ); - DeletedRecord* newDelW = txn->writing(newDel); + DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel); newDelW->extentOfs() = r->extentOfs(); newDelW->lengthWithHeaders() = left; newDelW->nextDeleted().Null(); @@ -239,8 +239,8 @@ namespace mongo { extLoc = ext->xnext ) { ext = _extentManager->getExtent(extLoc); - txn->writing( &ext->firstRecord )->Null(); - txn->writing( &ext->lastRecord )->Null(); + txn->recoveryUnit()->writing( &ext->firstRecord )->Null(); + txn->recoveryUnit()->writing( &ext->lastRecord )->Null(); addDeletedRec( txn, _findFirstSpot( txn, extLoc, ext ) ); } @@ -291,7 +291,7 @@ namespace mongo { a.getOfs() + drec( a )->lengthWithHeaders() == b.getOfs() ) { // a & b are adjacent. merge. - txn->writingInt( drec(a)->lengthWithHeaders() ) += drec(b)->lengthWithHeaders(); + txn->recoveryUnit()->writingInt( drec(a)->lengthWithHeaders() ) += drec(b)->lengthWithHeaders(); j++; if ( j == drecs.end() ) { DDD( "\t compact adddelrec2" ); @@ -319,7 +319,7 @@ namespace mongo { if ( cappedLastDelRecLastExtent().isNull() ) setListOfAllDeletedRecords( txn, loc ); else - *txn->writing( &drec(cappedLastDelRecLastExtent())->nextDeleted() ) = loc; + *txn->recoveryUnit()->writing( &drec(cappedLastDelRecLastExtent())->nextDeleted() ) = loc; } void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) { @@ -333,7 +333,7 @@ namespace mongo { continue; DiskLoc last = first; for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted() ); - *txn->writing(&drec(last)->nextDeleted()) = cappedListOfAllDeletedRecords(); + *txn->recoveryUnit()->writing(&drec(last)->nextDeleted()) = cappedListOfAllDeletedRecords(); setListOfAllDeletedRecords( txn, first ); _details->setDeletedListEntry(txn, i, DiskLoc()); } @@ -406,8 +406,8 @@ namespace mongo { if ( prev.isNull() ) setListOfAllDeletedRecords( txn, drec(ret)->nextDeleted() ); else - *txn->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted(); - *txn->writing(&drec(ret)->nextDeleted()) = DiskLoc().setInvalid(); // defensive. + *txn->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted(); + *txn->recoveryUnit()->writing(&drec(ret)->nextDeleted()) = DiskLoc().setInvalid(); // defensive. invariant( drec(ret)->extentOfs() < ret.getOfs() ); } @@ -454,7 +454,7 @@ namespace mongo { // 'end' has been found and removed, so break. break; } - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); // 'curr' will point to the newest document in the collection. DiskLoc curr = theCapExtent()->lastRecord; invariant( !curr.isNull() ); @@ -568,7 +568,7 @@ namespace mongo { } void CappedRecordStoreV1::addDeletedRec( OperationContext* txn, const DiskLoc& dloc ) { - DeletedRecord* d = txn->writing( drec( dloc ) ); + DeletedRecord* d = txn->recoveryUnit()->writing( drec( dloc ) ); DEBUGGING log() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs() << endl; if ( !cappedLastDelRecLastExtent().isValid() ) { @@ -580,7 +580,7 @@ namespace mongo { DiskLoc i = cappedListOfAllDeletedRecords(); for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted() ) ; - *txn->writing(&drec(i)->nextDeleted()) = dloc; + *txn->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc; } } else { diff --git a/src/mongo/db/structure/record_store_v1_simple.cpp b/src/mongo/db/structure/record_store_v1_simple.cpp index fa43a2087a7..a1a74872429 100644 --- a/src/mongo/db/structure/record_store_v1_simple.cpp +++ b/src/mongo/db/structure/record_store_v1_simple.cpp @@ -155,7 +155,7 @@ namespace mongo { // unlink ourself from the deleted list DeletedRecord *bmr = drec(bestmatch); if ( bestprev ) { - *txn->writing(bestprev) = bmr->nextDeleted(); + *txn->recoveryUnit()->writing(bestprev) = bmr->nextDeleted(); } else { // should be the front of a free-list @@ -163,7 +163,7 @@ namespace mongo { invariant( _details->deletedListEntry(myBucket) == bestmatch ); _details->setDeletedListEntry(txn, myBucket, bmr->nextDeleted()); } - *txn->writing(&bmr->nextDeleted()) = DiskLoc().setInvalid(); // defensive. + *txn->recoveryUnit()->writing(&bmr->nextDeleted()) = DiskLoc().setInvalid(); // defensive. invariant(bmr->extentOfs() < bestmatch.getOfs()); freelistIterations.increment( 1 + chain ); @@ -204,11 +204,11 @@ namespace mongo { } /* split off some for further use. */ - txn->writingInt(r->lengthWithHeaders()) = lenToAlloc; + txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc; DiskLoc newDelLoc = loc; newDelLoc.inc(lenToAlloc); DeletedRecord* newDel = drec(newDelLoc); - DeletedRecord* newDelW = txn->writing(newDel); + DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel); newDelW->extentOfs() = r->extentOfs(); newDelW->lengthWithHeaders() = left; newDelW->nextDeleted().Null(); @@ -270,7 +270,7 @@ namespace mongo { DEBUGGING log() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs() << endl; int b = bucket(d->lengthWithHeaders()); - *txn->writing(&d->nextDeleted()) = _details->deletedListEntry(b); + *txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b); _details->setDeletedListEntry(txn, b, dloc); } @@ -415,11 +415,11 @@ namespace mongo { // remove the old records (orphan them) periodically so our commit block doesn't get too large bool stopping = false; RARELY stopping = !txn->checkForInterruptNoAssert().isOK(); - if( stopping || txn->isCommitNeeded() ) { - *txn->writing(&e->firstRecord) = L; + if( stopping || txn->recoveryUnit()->isCommitNeeded() ) { + *txn->recoveryUnit()->writing(&e->firstRecord) = L; Record *r = recordFor(L); - txn->writingInt(r->prevOfs()) = DiskLoc::NullOfs; - txn->commitIfNeeded(); + txn->recoveryUnit()->writingInt(r->prevOfs()) = DiskLoc::NullOfs; + txn->recoveryUnit()->commitIfNeeded(); txn->checkForInterrupt(); } } @@ -429,10 +429,10 @@ namespace mongo { invariant( _details->lastExtent() != diskloc ); DiskLoc newFirst = e->xnext; _details->setFirstExtent( txn, newFirst ); - *txn->writing(&_extentManager->getExtent( newFirst )->xprev) = DiskLoc(); + *txn->recoveryUnit()->writing(&_extentManager->getExtent( newFirst )->xprev) = DiskLoc(); _extentManager->freeExtent( txn, diskloc ); - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); { double op = 1.0; @@ -452,7 +452,7 @@ namespace mongo { CompactStats* stats ) { // this is a big job, so might as well make things tidy before we start just to be nice. - txn->commitIfNeeded(); + txn->recoveryUnit()->commitIfNeeded(); list<DiskLoc> extents; for( DiskLoc extLocation = _details->firstExtent(); diff --git a/src/mongo/db/structure/record_store_v1_simple_test.cpp b/src/mongo/db/structure/record_store_v1_simple_test.cpp index 08a32c5d137..6d6e5ed8a29 100644 --- a/src/mongo/db/structure/record_store_v1_simple_test.cpp +++ b/src/mongo/db/structure/record_store_v1_simple_test.cpp @@ -228,7 +228,7 @@ namespace { ASSERT_GREATER_THAN_OR_EQUALS( rs->deletedRecordFor( deleted )->lengthWithHeaders(), newDeletedRecordSize ); DeletedRecord* dr = const_cast<DeletedRecord*>( rs->deletedRecordFor( deleted ) ); - txn->writingInt( dr->lengthWithHeaders() ) = newDeletedRecordSize; + txn->recoveryUnit()->writingInt( dr->lengthWithHeaders() ) = newDeletedRecordSize; // Re-insert the DeletedRecord into the deletedList bucket appropriate for its // new size. diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index d57aef1877c..52e8eece333 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -133,7 +133,7 @@ namespace QueryStageCollectionScan { BSONObj o = b.done(); int len = o.objsize(); Extent *e = extentManager()->getExtent(ext); - e = getDur().writing(e); + e = _txn.recoveryUnit()->writing(e); int ofs; if ( e->lastRecord.isNull() ) { ofs = ext.getOfs() + ( e->_extentData - (char *)e ); @@ -144,7 +144,7 @@ namespace QueryStageCollectionScan { } DiskLoc dl( ext.a(), ofs ); Record *r = recordStore()->recordFor(dl); - r = (Record*) getDur().writingPtr(r, Record::HeaderSize + len); + r = (Record*) _txn.recoveryUnit()->writingPtr(r, Record::HeaderSize + len); r->lengthWithHeaders() = Record::HeaderSize + len; r->extentOfs() = e->myLoc.getOfs(); r->nextOfs() = DiskLoc::NullOfs; @@ -153,7 +153,7 @@ namespace QueryStageCollectionScan { if ( e->firstRecord.isNull() ) e->firstRecord = dl; else - getDur().writingInt(recordStore()->recordFor(e->lastRecord)->nextOfs()) = ofs; + _txn.recoveryUnit()->writingInt(recordStore()->recordFor(e->lastRecord)->nextOfs()) = ofs; e->lastRecord = dl; return dl; } |