summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-04-28 20:55:18 -0400
committerMathias Stearn <mathias@10gen.com>2014-04-29 09:14:59 -0400
commitde5ea8fd8682d086de86636d9fff80720939790e (patch)
treebc1ad8171f539a97333df978fbae16aa53a490f9 /src
parenta16b3afd4f6760a8818da0888ebd330e92d381a2 (diff)
downloadmongo-de5ea8fd8682d086de86636d9fff80720939790e.tar.gz
SERVER-13643 Move Txn pluming above Database and Collection
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/collection.cpp67
-rw-r--r--src/mongo/db/catalog/collection.h53
-rw-r--r--src/mongo/db/catalog/database.cpp91
-rw-r--r--src/mongo/db/catalog/database.h34
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp18
-rw-r--r--src/mongo/db/catalog/index_create.cpp8
-rw-r--r--src/mongo/db/cloner.cpp15
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp26
-rw-r--r--src/mongo/db/commands/compact.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp6
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp8
-rw-r--r--src/mongo/db/commands/mr.cpp17
-rw-r--r--src/mongo/db/commands/rename_collection.cpp18
-rw-r--r--src/mongo/db/commands/test_commands.cpp12
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp16
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/dbcommands.cpp17
-rw-r--r--src/mongo/db/dbhelpers.cpp18
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.cpp4
-rw-r--r--src/mongo/db/instance.cpp36
-rw-r--r--src/mongo/db/introspect.cpp24
-rw-r--r--src/mongo/db/introspect.h6
-rw-r--r--src/mongo/db/ops/delete.cpp9
-rw-r--r--src/mongo/db/ops/delete.h5
-rw-r--r--src/mongo/db/ops/delete_executor.cpp4
-rw-r--r--src/mongo/db/ops/delete_executor.h3
-rw-r--r--src/mongo/db/ops/update.cpp18
-rw-r--r--src/mongo/db/ops/update.h6
-rw-r--r--src/mongo/db/ops/update_executor.cpp5
-rw-r--r--src/mongo/db/ops/update_executor.h3
-rw-r--r--src/mongo/db/pdfile.cpp5
-rw-r--r--src/mongo/db/pdfile.h4
-rw-r--r--src/mongo/db/repair_database.cpp8
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp22
-rw-r--r--src/mongo/db/repl/rs.cpp4
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp18
-rw-r--r--src/mongo/db/repl/rs_sync.cpp4
-rw-r--r--src/mongo/db/repl/sync.cpp4
-rw-r--r--src/mongo/db/structure/btree/btree_logic.cpp4
-rw-r--r--src/mongo/db/structure/collection_compact.cpp5
-rw-r--r--src/mongo/db/structure/record_store_v1_capped.cpp4
-rw-r--r--src/mongo/db/ttl.cpp4
-rw-r--r--src/mongo/dbtests/counttests.cpp12
-rw-r--r--src/mongo/dbtests/extsorttests.cpp7
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp7
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp26
-rw-r--r--src/mongo/dbtests/namespacetests.cpp65
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp6
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp49
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp22
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp16
-rw-r--r--src/mongo/dbtests/querytests.cpp20
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp6
-rw-r--r--src/mongo/dbtests/replsettests.cpp27
-rw-r--r--src/mongo/dbtests/repltests.cpp27
-rw-r--r--src/mongo/s/d_migrate.cpp6
62 files changed, 624 insertions, 357 deletions
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 8d40633297c..5f14fa9e610 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/ops/update.h"
+#include "mongo/db/storage/transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/structure/catalog/namespace_details_rsv1_metadata.h"
#include "mongo/db/structure/record_store_v1_capped.h"
@@ -47,7 +48,6 @@
#include "mongo/db/repl/rs.h"
#include "mongo/db/storage/extent.h"
#include "mongo/db/storage/extent_manager.h"
-#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
@@ -74,14 +74,14 @@ namespace mongo {
// ----
- Collection::Collection( const StringData& fullNS,
+ Collection::Collection( TransactionExperiment* txn,
+ const StringData& fullNS,
NamespaceDetails* details,
Database* database )
: _ns( fullNS ),
_infoCache( this ),
_indexCatalog( this, details ),
_cursorCache( fullNS ) {
- DurTransaction txn[1];
_details = details;
_database = database;
@@ -167,8 +167,9 @@ namespace mongo {
return BSONObj( rec->accessed()->data() );
}
- StatusWith<DiskLoc> Collection::insertDocument( const DocWriter* doc, bool enforceQuota ) {
- DurTransaction txn[1];
+ StatusWith<DiskLoc> Collection::insertDocument( TransactionExperiment* txn,
+ const DocWriter* doc,
+ bool enforceQuota ) {
verify( _indexCatalog.numIndexesTotal() == 0 ); // eventually can implement, just not done
StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
@@ -182,7 +183,9 @@ namespace mongo {
return StatusWith<DiskLoc>( loc );
}
- StatusWith<DiskLoc> Collection::insertDocument( const BSONObj& docToInsert, bool enforceQuota ) {
+ StatusWith<DiskLoc> Collection::insertDocument( TransactionExperiment* txn,
+ const BSONObj& docToInsert,
+ bool enforceQuota ) {
if ( _indexCatalog.findIdIndex() ) {
if ( docToInsert["_id"].eoo() ) {
return StatusWith<DiskLoc>( ErrorCodes::InternalError,
@@ -198,7 +201,7 @@ namespace mongo {
return StatusWith<DiskLoc>( ret );
}
- StatusWith<DiskLoc> status = _insertDocument( docToInsert, enforceQuota );
+ StatusWith<DiskLoc> status = _insertDocument( txn, docToInsert, enforceQuota );
if ( status.isOK() ) {
_details->paddingFits();
}
@@ -206,9 +209,9 @@ namespace mongo {
return status;
}
- StatusWith<DiskLoc> Collection::insertDocument( const BSONObj& doc,
+ StatusWith<DiskLoc> Collection::insertDocument( TransactionExperiment* txn,
+ const BSONObj& doc,
MultiIndexBlock& indexBlock ) {
- DurTransaction txn[1];
StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
doc.objdata(),
doc.objsize(),
@@ -229,14 +232,14 @@ namespace mongo {
}
- StatusWith<DiskLoc> Collection::_insertDocument( const BSONObj& docToInsert,
+ StatusWith<DiskLoc> Collection::_insertDocument( TransactionExperiment* txn,
+ const BSONObj& docToInsert,
bool enforceQuota ) {
// TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
// under the RecordStore, this feels broken since that should be a
// collection access method probably
- DurTransaction txn[1];
StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
docToInsert.objdata(),
docToInsert.objsize(),
@@ -266,9 +269,11 @@ namespace mongo {
return loc;
}
- void Collection::deleteDocument( const DiskLoc& loc, bool cappedOK, bool noWarn,
+ void Collection::deleteDocument( TransactionExperiment* txn,
+ const DiskLoc& loc,
+ bool cappedOK,
+ bool noWarn,
BSONObj* deletedId ) {
- DurTransaction txn[1];
if ( _details->isCapped() && !cappedOK ) {
log() << "failing remove on a capped ns " << _ns << endl;
uasserted( 10089, "cannot remove from a capped collection" );
@@ -297,11 +302,11 @@ namespace mongo {
Counter64 moveCounter;
ServerStatusMetricField<Counter64> moveCounterDisplay( "record.moves", &moveCounter );
- StatusWith<DiskLoc> Collection::updateDocument( const DiskLoc& oldLocation,
+ StatusWith<DiskLoc> Collection::updateDocument( TransactionExperiment* txn,
+ const DiskLoc& oldLocation,
const BSONObj& objNew,
bool enforceQuota,
OpDebug* debug ) {
- DurTransaction txn[1];
Record* oldRecord = _recordStore->recordFor( oldLocation );
BSONObj objOld( oldRecord->accessed()->data() );
@@ -368,7 +373,7 @@ namespace mongo {
debug->nmoved += 1;
}
- StatusWith<DiskLoc> loc = _insertDocument( objNew, enforceQuota );
+ StatusWith<DiskLoc> loc = _insertDocument( txn, objNew, enforceQuota );
if ( loc.isOK() ) {
// insert successful, now lets deallocate the old location
@@ -412,7 +417,8 @@ namespace mongo {
return StatusWith<DiskLoc>( oldLocation );
}
- Status Collection::updateDocumentWithDamages( const DiskLoc& loc,
+ Status Collection::updateDocumentWithDamages( TransactionExperiment* txn,
+ const DiskLoc& loc,
const char* damangeSource,
const mutablebson::DamageVector& damages ) {
@@ -429,7 +435,7 @@ namespace mongo {
const mutablebson::DamageVector::const_iterator end = damages.end();
for( ; where != end; ++where ) {
const char* sourcePtr = damangeSource + where->sourceOffset;
- void* targetPtr = getDur().writingPtr(root + where->targetOffset, where->size);
+ void* targetPtr = txn->writingPtr(root + where->targetOffset, where->size);
std::memcpy(targetPtr, sourcePtr, where->size);
}
@@ -474,8 +480,7 @@ namespace mongo {
return &_database->getExtentManager();
}
- void Collection::increaseStorageSize( int size, bool enforceQuota ) {
- DurTransaction txn[1];
+ void Collection::increaseStorageSize(TransactionExperiment* txn, int size, bool enforceQuota) {
_recordStore->increaseStorageSize(txn, size, enforceQuota ? largestFileNumberInQuota() : 0);
}
@@ -511,8 +516,7 @@ namespace mongo {
* 3) truncate record store
* 4) re-write indexes
*/
- Status Collection::truncate() {
- DurTransaction txn[1];
+ Status Collection::truncate(TransactionExperiment* txn) {
massert( 17445, "index build in progress", _indexCatalog.numIndexesInProgress() == 0 );
// 1) store index specs
@@ -547,8 +551,9 @@ namespace mongo {
return Status::OK();
}
- void Collection::temp_cappedTruncateAfter( DiskLoc end, bool inclusive) {
- DurTransaction txn[1];
+ void Collection::temp_cappedTruncateAfter(TransactionExperiment* txn,
+ DiskLoc end,
+ bool inclusive) {
invariant( isCapped() );
reinterpret_cast<CappedRecordStoreV1*>(
_recordStore.get())->temp_cappedTruncateAfter( txn, end, inclusive );
@@ -615,25 +620,25 @@ namespace mongo {
return _details->isUserFlagSet( flag );
}
- bool Collection::setUserFlag( int flag ) {
+ bool Collection::setUserFlag( TransactionExperiment* txn, int flag ) {
if ( !_details->setUserFlag( flag ) )
return false;
- _syncUserFlags();
+ _syncUserFlags(txn);
return true;
}
- bool Collection::clearUserFlag( int flag ) {
+ bool Collection::clearUserFlag( TransactionExperiment* txn, int flag ) {
if ( !_details->clearUserFlag( flag ) )
return false;
- _syncUserFlags();
+ _syncUserFlags(txn);
return true;
}
- void Collection::_syncUserFlags() {
+ void Collection::_syncUserFlags(TransactionExperiment* txn) {
if ( _ns.coll() == "system.namespaces" )
return;
string system_namespaces = _ns.getSisterNS( "system.namespaces" );
- Collection* coll = _database->getCollection( system_namespaces );
+ Collection* coll = _database->getCollection( txn, system_namespaces );
DiskLoc oldLocation = Helpers::findOne( coll, BSON( "name" << _ns.ns() ), false );
fassert( 17247, !oldLocation.isNull() );
@@ -645,7 +650,7 @@ namespace mongo {
BSON( "options.flags" <<
_details->userFlags() ) ) );
- StatusWith<DiskLoc> loc = coll->updateDocument( oldLocation, newEntry, false, NULL );
+ StatusWith<DiskLoc> loc = coll->updateDocument( txn, oldLocation, newEntry, false, NULL );
if ( !loc.isOK() ) {
// TODO: should this be an fassert?
error() << "syncUserFlags failed! "
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 7a9306e5e15..be9d1f0e386 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -50,6 +50,7 @@ namespace mongo {
class NamespaceDetails;
class IndexCatalog;
class MultiIndexBlock;
+ class TransactionExperiment;
class RecordIterator;
class FlatIterator;
@@ -108,9 +109,10 @@ namespace mongo {
*/
class Collection {
public:
- Collection( const StringData& fullNS,
- NamespaceDetails* details,
- Database* database );
+ Collection( TransactionExperiment* txn,
+ const StringData& fullNS,
+ NamespaceDetails* details,
+ Database* database );
~Collection();
@@ -159,7 +161,8 @@ namespace mongo {
*/
int64_t countTableScan( const MatchExpression* expression );
- void deleteDocument( const DiskLoc& loc,
+ void deleteDocument( TransactionExperiment* txn,
+ const DiskLoc& loc,
bool cappedOK = false,
bool noWarn = false,
BSONObj* deletedId = 0 );
@@ -168,11 +171,17 @@ namespace mongo {
* this does NOT modify the doc before inserting
* i.e. will not add an _id field for documents that are missing it
*/
- StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforceQuota );
+ StatusWith<DiskLoc> insertDocument( TransactionExperiment* txn,
+ const BSONObj& doc,
+ bool enforceQuota );
- StatusWith<DiskLoc> insertDocument( const DocWriter* doc, bool enforceQuota );
+ StatusWith<DiskLoc> insertDocument( TransactionExperiment* txn,
+ const DocWriter* doc,
+ bool enforceQuota );
- StatusWith<DiskLoc> insertDocument( const BSONObj& doc, MultiIndexBlock& indexBlock );
+ StatusWith<DiskLoc> insertDocument( TransactionExperiment* txn,
+ const BSONObj& doc,
+ MultiIndexBlock& indexBlock );
/**
* updates the document @ oldLocation with newDoc
@@ -180,7 +189,8 @@ namespace mongo {
* if not, it is moved
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation,
+ StatusWith<DiskLoc> updateDocument( TransactionExperiment* txn,
+ const DiskLoc& oldLocation,
const BSONObj& newDoc,
bool enforceQuota,
OpDebug* debug );
@@ -188,7 +198,8 @@ namespace mongo {
/**
* right now not allowed to modify indexes
*/
- Status updateDocumentWithDamages( const DiskLoc& loc,
+ Status updateDocumentWithDamages( TransactionExperiment* txn,
+ const DiskLoc& loc,
const char* damangeSource,
const mutablebson::DamageVector& damages );
@@ -197,14 +208,14 @@ namespace mongo {
// -----------
- StatusWith<CompactStats> compact( const CompactOptions* options );
+ StatusWith<CompactStats> compact(TransactionExperiment* txn, const CompactOptions* options);
/**
* removes all documents as fast as possible
* indexes before and after will be the same
* as will other characteristics
*/
- Status truncate();
+ Status truncate(TransactionExperiment* txn);
/**
* @param full - does more checks
@@ -223,7 +234,7 @@ namespace mongo {
* @param inclusive - Truncate 'end' as well iff true
* XXX: this will go away soon, just needed to move for now
*/
- void temp_cappedTruncateAfter( DiskLoc end, bool inclusive );
+ void temp_cappedTruncateAfter( TransactionExperiment* txn, DiskLoc end, bool inclusive );
// -----------
@@ -232,7 +243,7 @@ namespace mongo {
// this will add a new extent the collection
// the new extent will be returned
// it will have been added to the linked list already
- void increaseStorageSize( int size, bool enforceQuota );
+ void increaseStorageSize( TransactionExperiment* txn, int size, bool enforceQuota );
//
// Stats
@@ -253,8 +264,8 @@ namespace mongo {
// TODO(erh) - below till next mark are suspect
bool isUserFlagSet( int flag ) const;
- bool setUserFlag( int flag );
- bool clearUserFlag( int flag );
+ bool setUserFlag( TransactionExperiment* txn, int flag );
+ bool clearUserFlag( TransactionExperiment* txn, int flag );
void setMaxCappedDocs( long long max );
// --- end suspect things
@@ -265,14 +276,18 @@ namespace mongo {
* - some user error checks
* - adjust padding
*/
- StatusWith<DiskLoc> _insertDocument( const BSONObj& doc,
+ StatusWith<DiskLoc> _insertDocument( TransactionExperiment* txn,
+ const BSONObj& doc,
bool enforceQuota );
- void _compactExtent(const DiskLoc diskloc, int extentNumber,
+ void _compactExtent(TransactionExperiment* txn,
+ const DiskLoc diskloc,
+ int extentNumber,
MultiIndexBlock& indexesToInsertTo,
- const CompactOptions* compactOptions, CompactStats* stats );
+ const CompactOptions* compactOptions,
+ CompactStats* stats );
- void _syncUserFlags(); // TODO: this is bizarre, should go away
+ void _syncUserFlags(TransactionExperiment* txn); // TODO: this is bizarre, should go away
// @return 0 for inf., otherwise a number of files
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 6b544726b26..1c59bf5e363 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/storage/data_file.h"
#include "mongo/db/storage/extent.h"
#include "mongo/db/storage/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/catalog/collection.h"
@@ -306,7 +307,7 @@ namespace mongo {
}
}
- void Database::clearTmpCollections() {
+ void Database::clearTmpCollections(TransactionExperiment* txn) {
Lock::assertWriteLocked( _name );
@@ -315,7 +316,7 @@ namespace mongo {
// would corrupt the cursor.
vector<string> toDelete;
{
- Collection* coll = getCollection( _namespacesName );
+ Collection* coll = getCollection( txn, _namespacesName );
if ( coll ) {
scoped_ptr<RecordIterator> it( coll->getIterator() );
DiskLoc next;
@@ -352,7 +353,7 @@ namespace mongo {
void Database::flushFiles( bool sync ) { return _extentManager->flushFiles( sync ); }
- bool Database::setProfilingLevel( int newLevel , string& errmsg ) {
+ bool Database::setProfilingLevel( TransactionExperiment* txn, int newLevel , string& errmsg ) {
if ( _profile == newLevel )
return true;
@@ -366,18 +367,18 @@ namespace mongo {
return true;
}
- if (!getOrCreateProfileCollection(this, true, &errmsg))
+ if (!getOrCreateProfileCollection(txn, this, true, &errmsg))
return false;
_profile = newLevel;
return true;
}
- Status Database::dropCollection( const StringData& fullns ) {
+ Status Database::dropCollection( TransactionExperiment* txn, const StringData& fullns ) {
LOG(1) << "dropCollection: " << fullns << endl;
massertNamespaceNotIndex( fullns, "dropCollection" );
- Collection* collection = getCollection( fullns );
+ Collection* collection = getCollection( txn, fullns );
if ( !collection ) {
// collection doesn't exist
return Status::OK();
@@ -424,7 +425,7 @@ namespace mongo {
Top::global.collectionDropped( fullns );
- Status s = _dropNS( fullns );
+ Status s = _dropNS( txn, fullns );
_clearCollectionCache( fullns ); // we want to do this always
@@ -466,6 +467,11 @@ namespace mongo {
}
Collection* Database::getCollection( const StringData& ns ) {
+ DurTransaction txn; // TODO remove once we require reads to have transactions
+ return getCollection(&txn, ns);
+ }
+
+ Collection* Database::getCollection( TransactionExperiment* txn, const StringData& ns ) {
verify( _name == nsToDatabaseSubstring( ns ) );
scoped_lock lk( _collectionLock );
@@ -491,18 +497,20 @@ namespace mongo {
return NULL;
}
- Collection* c = new Collection( ns, details, this );
+ Collection* c = new Collection( txn, ns, details, this );
_collections[ns] = c;
return c;
}
- Status Database::renameCollection( const StringData& fromNS, const StringData& toNS,
+ Status Database::renameCollection( TransactionExperiment* txn,
+ const StringData& fromNS,
+ const StringData& toNS,
bool stayTemp ) {
// move data namespace
- Status s = _renameSingleNamespace( fromNS, toNS, stayTemp );
+ Status s = _renameSingleNamespace( txn, fromNS, toNS, stayTemp );
if ( !s.isOK() )
return s;
@@ -513,7 +521,7 @@ namespace mongo {
// move index namespaces
BSONObj oldIndexSpec;
- while( Helpers::findOne( getCollection( _indexesName ),
+ while( Helpers::findOne( getCollection( txn, _indexesName ),
BSON( "ns" << fromNS ),
oldIndexSpec ) ) {
oldIndexSpec = oldIndexSpec.getOwned();
@@ -532,10 +540,10 @@ namespace mongo {
newIndexSpec = b.obj();
}
- Collection* systemIndexCollection = getCollection( _indexesName );
+ Collection* systemIndexCollection = getCollection( txn, _indexesName );
StatusWith<DiskLoc> newIndexSpecLoc =
- systemIndexCollection->insertDocument( newIndexSpec, false );
+ systemIndexCollection->insertDocument( txn, newIndexSpec, false );
if ( !newIndexSpecLoc.isOK() )
return newIndexSpecLoc.getStatus();
@@ -545,7 +553,7 @@ namespace mongo {
// fix IndexDetails pointer
int indexI = details->_catalogFindIndexByName( indexName );
IndexDetails& indexDetails = details->idx(indexI);
- *getDur().writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur
+ *txn->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur
}
{
@@ -553,12 +561,12 @@ namespace mongo {
string oldIndexNs = IndexDescriptor::makeIndexNamespace( fromNS, indexName );
string newIndexNs = IndexDescriptor::makeIndexNamespace( toNS, indexName );
- Status s = _renameSingleNamespace( oldIndexNs, newIndexNs, false );
+ Status s = _renameSingleNamespace( txn, oldIndexNs, newIndexNs, false );
if ( !s.isOK() )
return s;
}
- deleteObjects( _indexesName, oldIndexSpec, true, false, true );
+ deleteObjects( txn, _indexesName, oldIndexSpec, true, false, true );
}
Top::global.collectionDropped( fromNS.toString() );
@@ -566,7 +574,9 @@ namespace mongo {
return Status::OK();
}
- Status Database::_renameSingleNamespace( const StringData& fromNS, const StringData& toNS,
+ Status Database::_renameSingleNamespace( TransactionExperiment* txn,
+ const StringData& fromNS,
+ const StringData& toNS,
bool stayTemp ) {
// TODO: make it so we dont't need to do this
@@ -621,7 +631,7 @@ namespace mongo {
{
BSONObj oldSpec;
- if ( !Helpers::findOne( getCollection( _namespacesName ),
+ if ( !Helpers::findOne( getCollection( txn, _namespacesName ),
BSON( "name" << fromNS ),
oldSpec ) )
return Status( ErrorCodes::InternalError, "can't find system.namespaces entry" );
@@ -641,17 +651,21 @@ namespace mongo {
newSpec = b.obj();
}
- _addNamespaceToCatalog( toNSString, newSpec.isEmpty() ? 0 : &newSpec );
+ _addNamespaceToCatalog( txn, toNSString, newSpec.isEmpty() ? 0 : &newSpec );
- deleteObjects( _namespacesName, BSON( "name" << fromNS ), false, false, true );
+ deleteObjects( txn, _namespacesName, BSON( "name" << fromNS ), false, false, true );
return Status::OK();
}
Collection* Database::getOrCreateCollection( const StringData& ns ) {
- Collection* c = getCollection( ns );
+ DurTransaction txn; // TODO remove once we require reads to have transactions
+ return getOrCreateCollection(&txn, ns);
+ }
+ Collection* Database::getOrCreateCollection(TransactionExperiment* txn, const StringData& ns) {
+ Collection* c = getCollection( txn, ns );
if ( !c ) {
- c = createCollection( ns );
+ c = createCollection( txn, ns );
}
return c;
}
@@ -666,7 +680,8 @@ namespace mongo {
}
}
- Collection* Database::createCollection( const StringData& ns,
+ Collection* Database::createCollection( TransactionExperiment* txn,
+ const StringData& ns,
const CollectionOptions& options,
bool allocateDefaultSpace,
bool createIdIndex ) {
@@ -695,18 +710,18 @@ namespace mongo {
_namespaceIndex.add_ns( ns, DiskLoc(), options.capped );
BSONObj optionsAsBSON = options.toBSON();
- _addNamespaceToCatalog( ns, &optionsAsBSON );
+ _addNamespaceToCatalog( txn, ns, &optionsAsBSON );
- Collection* collection = getCollection( ns );
+ Collection* collection = getCollection( txn, ns );
massert( 17400, "_namespaceIndex.add_ns failed?", collection );
// allocation strategy set explicitly in flags or by server-wide default
if ( !options.capped ) {
if ( options.flagsSet ) {
- collection->setUserFlag( options.flags );
+ collection->setUserFlag( txn, options.flags );
}
else if ( newCollectionsUsePowerOf2Sizes ) {
- collection->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes );
+ collection->setUserFlag( txn, NamespaceDetails::Flag_UsePowerOf2Sizes );
}
}
@@ -717,14 +732,14 @@ namespace mongo {
if ( options.initialNumExtents > 0 ) {
int size = _massageExtentSize( options.cappedSize );
for ( int i = 0; i < options.initialNumExtents; i++ ) {
- collection->increaseStorageSize( size, false );
+ collection->increaseStorageSize( txn, size, false );
}
}
else if ( !options.initialExtentSizes.empty() ) {
for ( size_t i = 0; i < options.initialExtentSizes.size(); i++ ) {
int size = options.initialExtentSizes[i];
size = _massageExtentSize( size );
- collection->increaseStorageSize( size, false );
+ collection->increaseStorageSize( txn, size, false );
}
}
else if ( options.capped ) {
@@ -732,11 +747,11 @@ namespace mongo {
while ( collection->storageSize() < options.cappedSize ) {
int sz = _massageExtentSize( options.cappedSize - collection->storageSize() );
sz &= 0xffffff00;
- collection->increaseStorageSize( sz, true );
+ collection->increaseStorageSize( txn, sz, true );
}
}
else {
- collection->increaseStorageSize( Extent::initialSize( 128 ), false );
+ collection->increaseStorageSize( txn, Extent::initialSize( 128 ), false );
}
}
@@ -758,7 +773,9 @@ namespace mongo {
}
- void Database::_addNamespaceToCatalog( const StringData& ns, const BSONObj* options ) {
+ void Database::_addNamespaceToCatalog( TransactionExperiment* txn,
+ const StringData& ns,
+ const BSONObj* options ) {
LOG(1) << "Database::_addNamespaceToCatalog ns: " << ns << endl;
if ( nsToCollectionSubstring( ns ) == "system.namespaces" ) {
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
@@ -771,14 +788,14 @@ namespace mongo {
b.append("options", *options);
BSONObj obj = b.done();
- Collection* collection = getCollection( _namespacesName );
+ Collection* collection = getCollection( txn, _namespacesName );
if ( !collection )
- collection = createCollection( _namespacesName );
- StatusWith<DiskLoc> loc = collection->insertDocument( obj, false );
+ collection = createCollection( txn, _namespacesName );
+ StatusWith<DiskLoc> loc = collection->insertDocument( txn, obj, false );
uassertStatusOK( loc.getStatus() );
}
- Status Database::_dropNS( const StringData& ns ) {
+ Status Database::_dropNS( TransactionExperiment* txn, const StringData& ns ) {
NamespaceDetails* d = _namespaceIndex.details( ns );
if ( !d )
@@ -790,7 +807,7 @@ namespace mongo {
{
// remove from the system catalog
BSONObj cond = BSON( "name" << ns ); // { name: "colltodropname" }
- deleteObjects( _namespacesName, cond, false, false, true);
+ deleteObjects( txn, _namespacesName, cond, false, false, true);
}
// free extents
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 4ff05a209db..b40717c4d16 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -44,6 +44,7 @@ namespace mongo {
class DataFile;
class IndexCatalog;
class NamespaceDetails;
+ class TransactionExperiment;
struct CollectionOptions {
CollectionOptions() {
@@ -109,7 +110,7 @@ namespace mongo {
const string& name() const { return _name; }
const string& path() const { return _path; }
- void clearTmpCollections();
+ void clearTmpCollections(TransactionExperiment* txn);
/**
* tries to make sure that this hasn't been deleted
@@ -130,7 +131,7 @@ namespace mongo {
/**
* @return true if success. false if bad level or error creating profile ns
*/
- bool setProfilingLevel( int newLevel , string& errmsg );
+ bool setProfilingLevel( TransactionExperiment* txn, int newLevel , string& errmsg );
void flushFiles( bool sync );
@@ -157,23 +158,36 @@ namespace mongo {
ExtentManager& getExtentManager() { return *_extentManager; }
const ExtentManager& getExtentManager() const { return *_extentManager; }
- Status dropCollection( const StringData& fullns );
+ Status dropCollection( TransactionExperiment* txn, const StringData& fullns );
- Collection* createCollection( const StringData& ns,
+ Collection* createCollection( TransactionExperiment* txn,
+ const StringData& ns,
const CollectionOptions& options = CollectionOptions(),
bool allocateSpace = true,
bool createDefaultIndexes = true );
/**
* @param ns - this is fully qualified, which is maybe not ideal ???
+ * The methods without a transaction are deprecated.
+ * TODO remove deprecated method once we require reads to have Transaction objects.
*/
Collection* getCollection( const StringData& ns );
Collection* getCollection( const NamespaceString& ns ) { return getCollection( ns.ns() ); }
+ Collection* getCollection( TransactionExperiment* txn, const StringData& ns );
+
+ Collection* getCollection( TransactionExperiment* txn, const NamespaceString& ns ) {
+ return getCollection( txn, ns.ns() );
+ }
+
Collection* getOrCreateCollection( const StringData& ns );
+ Collection* getOrCreateCollection( TransactionExperiment* txn, const StringData& ns );
- Status renameCollection( const StringData& fromNS, const StringData& toNS, bool stayTemp );
+ Status renameCollection( TransactionExperiment* txn,
+ const StringData& fromNS,
+ const StringData& toNS,
+ bool stayTemp );
/**
* @return name of an existing database with same text name but different
@@ -193,7 +207,9 @@ namespace mongo {
~Database(); // closes files and other cleanup see below.
- void _addNamespaceToCatalog( const StringData& ns, const BSONObj* options );
+ void _addNamespaceToCatalog( TransactionExperiment* txn,
+ const StringData& ns,
+ const BSONObj* options );
/**
@@ -202,7 +218,7 @@ namespace mongo {
* removes from NamespaceIndex
* NOT RIGHT NOW, removes cache entry in Database TODO?
*/
- Status _dropNS( const StringData& ns );
+ Status _dropNS( TransactionExperiment* txn, const StringData& ns );
/**
* @throws DatabaseDifferCaseCode if the name is a duplicate based on
@@ -212,7 +228,9 @@ namespace mongo {
void openAllFiles();
- Status _renameSingleNamespace( const StringData& fromNS, const StringData& toNS,
+ Status _renameSingleNamespace( TransactionExperiment* txn,
+ const StringData& fromNS,
+ const StringData& toNS,
bool stayTemp );
const string _name; // "alleyinsider"
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 2c907eb735d..a6d89533018 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -126,7 +126,7 @@ namespace mongo {
}
IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures( IndexDescriptor* descriptor ) {
- DurTransaction txn[1];
+ DurTransaction txn; //XXX
auto_ptr<IndexDescriptor> descriptorCleanup( descriptor );
NamespaceDetails* indexMetadata =
@@ -136,7 +136,7 @@ namespace mongo {
str::stream() << "no NamespaceDetails for index: " << descriptor->toString(),
indexMetadata );
- auto_ptr<RecordStore> recordStore( new SimpleRecordStoreV1( txn,
+ auto_ptr<RecordStore> recordStore( new SimpleRecordStoreV1( &txn,
descriptor->indexNamespace(),
new NamespaceDetailsRSV1MetaData( indexMetadata ),
_collection->getExtentManager(),
@@ -166,7 +166,7 @@ namespace mongo {
requirePowerOf2 = true;
if ( requirePowerOf2 ) {
- _collection->setUserFlag(NamespaceDetails::Flag_UsePowerOf2Sizes);
+ _collection->setUserFlag(&txn, NamespaceDetails::Flag_UsePowerOf2Sizes);
}
}
@@ -430,7 +430,8 @@ namespace mongo {
Collection* systemIndexes = db->getOrCreateCollection( db->_indexesName );
invariant( systemIndexes );
- StatusWith<DiskLoc> systemIndexesEntry = systemIndexes->insertDocument( _spec, false );
+ DurTransaction txn; //XXX
+ StatusWith<DiskLoc> systemIndexesEntry = systemIndexes->insertDocument( &txn, _spec, false );
if ( !systemIndexesEntry.isOK() )
return systemIndexesEntry.getStatus();
@@ -468,7 +469,7 @@ namespace mongo {
nsi.add_ns( descriptor->indexNamespace(), DiskLoc(), false );
// 4) system.namespaces entry index ns
- db->_addNamespaceToCatalog( descriptor->indexNamespace(), NULL );
+ db->_addNamespaceToCatalog( &txn, descriptor->indexNamespace(), NULL );
/// ---------- setup in memory structures ----------------
@@ -894,7 +895,8 @@ namespace mongo {
invariant( _details->_catalogFindIndexByName( indexName, true ) == idxNo );
// data + system.namespacesa
- Status status = _collection->_database->_dropNS( indexNamespace );
+ DurTransaction txn; // XXX
+ Status status = _collection->_database->_dropNS( &txn, indexNamespace );
if ( status.code() == ErrorCodes::NamespaceNotFound ) {
// this is ok, as we may be partially through index creation
}
@@ -917,7 +919,9 @@ namespace mongo {
b.append( "ns", _collection->ns() );
b.append( "name", indexName );
BSONObj cond = b.obj(); // e.g.: { name: "ts_1", ns: "foo.coll" }
- return static_cast<int>( deleteObjects( _collection->_database->_indexesName,
+ DurTransaction txn; // XXX
+ return static_cast<int>( deleteObjects( &txn,
+ _collection->_database->_indexesName,
cond,
false,
false,
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index f71b107e984..2cd3bc1172f 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/rs.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/index_details.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/util/processinfo.h"
@@ -136,7 +137,8 @@ namespace mongo {
bool runnerEOF = runner->isEOF();
runner->saveState();
BSONObj toDelete;
- collection->deleteDocument( loc, false, true, &toDelete );
+ DurTransaction txn; // XXX
+ collection->deleteDocument( &txn, loc, false, true, &toDelete );
logOp( "d", ns.c_str(), toDelete );
if (!runner->restoreState()) {
@@ -278,7 +280,9 @@ namespace mongo {
for( set<DiskLoc>::const_iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); ++i ) {
BSONObj toDelete;
- collection->deleteDocument( *i,
+ DurTransaction txn; // XXX
+ collection->deleteDocument( &txn,
+ *i,
false /* cappedOk */,
true /* noWarn */,
&toDelete );
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 82918653d79..9e782f42ce9 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/oplogreader.h"
#include "mongo/db/pdfile.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
namespace mongo {
@@ -125,6 +126,7 @@ namespace mongo {
void operator()( DBClientCursorBatchIterator &i ) {
Lock::GlobalWrite lk;
+ DurTransaction txn;
context.relocked();
bool createdCollection = false;
@@ -153,7 +155,7 @@ namespace mongo {
<< to_collection << "]",
!createdCollection );
createdCollection = true;
- collection = context.db()->createCollection( to_collection );
+ collection = context.db()->createCollection( &txn, to_collection );
verify( collection );
}
}
@@ -180,7 +182,7 @@ namespace mongo {
verify(nsToCollectionSubstring(from_collection) != "system.indexes");
- StatusWith<DiskLoc> loc = collection->insertDocument( js, true );
+ StatusWith<DiskLoc> loc = collection->insertDocument( &txn, js, true );
if ( !loc.isOK() ) {
error() << "error: exception cloning object in " << from_collection
<< ' ' << loc.toString() << " obj:" << js;
@@ -251,7 +253,8 @@ namespace mongo {
string ns = spec["ns"].String(); // this was fixed when pulled off network
Collection* collection = f.context.db()->getCollection( ns );
if ( !collection ) {
- collection = f.context.db()->createCollection( ns );
+ DurTransaction txn; // XXX
+ collection = f.context.db()->createCollection( &txn, ns );
verify( collection );
}
@@ -309,12 +312,13 @@ namespace mongo {
bool logForRepl) {
Client::WriteContext ctx(ns);
+ DurTransaction txn; // XXX
// config
string temp = ctx.ctx().db()->name() + ".system.namespaces";
BSONObj config = _conn->findOne(temp , BSON("name" << ns));
if (config["options"].isABSONObj()) {
- Status status = userCreateNS(ctx.ctx().db(), ns, config["options"].Obj(), logForRepl, 0);
+ Status status = userCreateNS(&txn, ctx.ctx().db(), ns, config["options"].Obj(), logForRepl, 0);
if ( !status.isOK() ) {
errmsg = status.toString();
return false;
@@ -345,6 +349,7 @@ namespace mongo {
bool Cloner::go(Client::Context& context,
const string& masterHost, const CloneOptions& opts, set<string>* clonedColls,
string& errmsg, int* errCode) {
+ DurTransaction txn; // XXX
if ( errCode ) {
*errCode = 0;
}
@@ -468,7 +473,7 @@ namespace mongo {
{
/* we defer building id index for performance - building it in batch is much faster */
- userCreateNS(context.db(), to_name, options, opts.logForRepl, false);
+ userCreateNS(&txn, context.db(), to_name, options, opts.logForRepl, false);
}
LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
Query q;
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 7df06e876ed..67eaa4751ff 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -36,10 +36,12 @@
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/query/new_find.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
namespace mongo {
- Status cloneCollectionAsCapped( Database* db,
+ Status cloneCollectionAsCapped( TransactionExperiment* txn,
+ Database* db,
const string& shortFrom,
const string& shortTo,
double size,
@@ -49,7 +51,7 @@ namespace mongo {
string fromNs = db->name() + "." + shortFrom;
string toNs = db->name() + "." + shortTo;
- Collection* fromCollection = db->getCollection( fromNs );
+ Collection* fromCollection = db->getCollection( txn, fromNs );
if ( !fromCollection )
return Status( ErrorCodes::NamespaceNotFound,
str::stream() << "source collection " << fromNs << " does not exist" );
@@ -66,12 +68,12 @@ namespace mongo {
if ( temp )
spec.appendBool( "temp", true );
- Status status = userCreateNS( ctx.db(), toNs, spec.done(), logForReplication );
+ Status status = userCreateNS( txn, ctx.db(), toNs, spec.done(), logForReplication );
if ( !status.isOK() )
return status;
}
- Collection* toCollection = db->getCollection( toNs );
+ Collection* toCollection = db->getCollection( txn, toNs );
invariant( toCollection ); // we created above
// how much data to ignore because it won't fit anyway
@@ -93,7 +95,7 @@ namespace mongo {
case Runner::RUNNER_EOF:
return Status::OK();
case Runner::RUNNER_DEAD:
- db->dropCollection( toNs );
+ db->dropCollection( txn, toNs );
return Status( ErrorCodes::InternalError, "runner turned dead while iterating" );
case Runner::RUNNER_ERROR:
return Status( ErrorCodes::InternalError, "runner error while iterating" );
@@ -103,7 +105,7 @@ namespace mongo {
continue;
}
- toCollection->insertDocument( obj, true );
+ toCollection->insertDocument( txn, obj, true );
if ( logForReplication )
logOp( "i", toNs.c_str(), obj );
getDur().commitIfNeeded();
@@ -153,8 +155,9 @@ namespace mongo {
Lock::DBWrite dbXLock(dbname);
Client::Context ctx(dbname);
+ DurTransaction txn;
- Status status = cloneCollectionAsCapped( ctx.db(), from, to, size, temp, true );
+ Status status = cloneCollectionAsCapped( &txn, ctx.db(), from, to, size, temp, true );
return appendCommandStatus( result, status );
}
} cmdCloneCollectionAsCapped;
@@ -203,6 +206,7 @@ namespace mongo {
//
Lock::GlobalWrite globalWriteLock;
Client::Context ctx(dbname);
+ DurTransaction txn;
Database* db = ctx.db();
@@ -222,23 +226,23 @@ namespace mongo {
string longTmpName = str::stream() << dbname << "." << shortTmpName;
if ( db->getCollection( longTmpName ) ) {
- Status status = db->dropCollection( longTmpName );
+ Status status = db->dropCollection( &txn, longTmpName );
if ( !status.isOK() )
return appendCommandStatus( result, status );
}
- Status status = cloneCollectionAsCapped( db, shortSource, shortTmpName, size, true, false );
+ Status status = cloneCollectionAsCapped( &txn, db, shortSource, shortTmpName, size, true, false );
if ( !status.isOK() )
return appendCommandStatus( result, status );
verify( db->getCollection( longTmpName ) );
- status = db->dropCollection( longSource );
+ status = db->dropCollection( &txn, longSource );
if ( !status.isOK() )
return appendCommandStatus( result, status );
- status = db->renameCollection( longTmpName, longSource, false );
+ status = db->renameCollection( &txn, longTmpName, longSource, false );
return appendCommandStatus( result, status );
}
} cmdConvertToCapped;
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index ee71d38ba01..3526c67dd8f 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/kill_current_op.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
namespace mongo {
@@ -145,6 +146,7 @@ namespace mongo {
Lock::DBWrite lk(ns.ns());
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(ns);
+ DurTransaction txn;
Collection* collection = ctx.db()->getCollection(ns.ns());
if( ! collection ) {
@@ -161,7 +163,7 @@ namespace mongo {
std::vector<BSONObj> indexesInProg = stopIndexBuilds(ctx.db(), cmdObj);
- StatusWith<CompactStats> status = collection->compact( &compactOptions );
+ StatusWith<CompactStats> status = collection->compact( &txn, &compactOptions );
if ( !status.isOK() )
return appendCommandStatus( result, status.getStatus() );
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index c69a0157c13..4c5bc195c1c 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/s/d_logic.h"
#include "mongo/s/shard_key_pattern.h"
@@ -167,12 +168,13 @@ namespace mongo {
Client::WriteContext writeContext( ns.ns(),
storageGlobalParams.dbpath,
false /* doVersion */ );
+ DurTransaction txn;
Database* db = writeContext.ctx().db();
- Collection* collection = db->getCollection( ns.ns() );
+ Collection* collection = db->getCollection( &txn, ns.ns() );
result.appendBool( "createdCollectionAutomatically", collection == NULL );
if ( !collection ) {
- collection = db->createCollection( ns.ns() );
+ collection = db->createCollection( &txn, ns.ns() );
invariant( collection );
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 072bb2d8000..880d3a65f08 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/queryutil.h"
#include "mongo/db/query/get_runner.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
namespace mongo {
@@ -139,7 +140,8 @@ namespace mongo {
Lock::DBWrite lk( ns );
Client::Context cx( ns );
- Collection* collection = cx.db()->getCollection( ns );
+ DurTransaction txn;
+ Collection* collection = cx.db()->getCollection( &txn, ns );
BSONObj doc;
bool found = false;
@@ -225,7 +227,7 @@ namespace mongo {
if ( remove ) {
_appendHelper( result , doc , found , fields );
if ( found ) {
- deleteObjects( ns , queryModified , true , true );
+ deleteObjects( &txn, ns , queryModified , true , true );
BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
le.appendNumber( "n" , 1 );
le.done();
@@ -255,7 +257,7 @@ namespace mongo {
// the shard version below, but for now no
UpdateLifecycleImpl updateLifecycle(false, requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult res = mongo::update(request, &cc().curop()->debug());
+ UpdateResult res = mongo::update(&txn, request, &cc().curop()->debug());
if ( !collection ) {
// collection created by an upsert
collection = cx.db()->getCollection( ns );
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 1f72e6648a2..32035de83c6 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/range_preserver.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/scripting/engine.h"
#include "mongo/s/collection_metadata.h"
@@ -340,12 +341,13 @@ namespace mongo {
if (_useIncremental) {
// Create the inc collection and make sure we have index on "0" key.
Client::WriteContext incCtx( _config.incLong );
- Collection* incColl = incCtx.ctx().db()->getCollection( _config.incLong );
+ DurTransaction txn;
+ Collection* incColl = incCtx.ctx().db()->getCollection( &txn, _config.incLong );
if ( !incColl ) {
CollectionOptions options;
options.setNoIdIndex();
options.temp = true;
- incColl = incCtx.ctx().db()->createCollection( _config.incLong, options );
+ incColl = incCtx.ctx().db()->createCollection( &txn, _config.incLong, options );
// Log the createCollection operation.
BSONObjBuilder b;
@@ -400,11 +402,12 @@ namespace mongo {
{
// create temp collection and insert the indexes from temporary storage
Client::WriteContext tempCtx( _config.tempNamespace );
- Collection* tempColl = tempCtx.ctx().db()->getCollection( _config.tempNamespace );
+ DurTransaction txn;
+ Collection* tempColl = tempCtx.ctx().db()->getCollection( &txn, _config.tempNamespace );
if ( !tempColl ) {
CollectionOptions options;
options.temp = true;
- tempColl = tempCtx.ctx().db()->createCollection( _config.tempNamespace, options );
+ tempColl = tempCtx.ctx().db()->createCollection( &txn, _config.tempNamespace, options );
// Log the createCollection operation.
BSONObjBuilder b;
@@ -626,6 +629,7 @@ namespace mongo {
verify( _onDisk );
Client::WriteContext ctx( ns );
+ DurTransaction txn;
Collection* coll = ctx.ctx().db()->getCollection( ns );
if ( !coll )
uasserted(13630, str::stream() << "attempted to insert into nonexistent" <<
@@ -641,7 +645,7 @@ namespace mongo {
b.appendElements(o);
BSONObj bo = b.obj();
- coll->insertDocument( bo, true );
+ coll->insertDocument( &txn, bo, true );
logOp( "i", ns.c_str(), bo );
}
@@ -652,13 +656,14 @@ namespace mongo {
verify( _onDisk );
Client::WriteContext ctx( _config.incLong );
+ DurTransaction txn;
Collection* coll = ctx.ctx().db()->getCollection( _config.incLong );
if ( !coll )
uasserted(13631, str::stream() << "attempted to insert into nonexistent"
" collection during a mr operation." <<
" collection expected: " << _config.incLong );
- coll->insertDocument( o, true );
+ coll->insertDocument( &txn, o, true );
logOp( "i", _config.incLong.c_str(), o );
getDur().commitIfNeeded();
}
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index e7cc87a2a13..8c239e7e414 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/insert.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
namespace mongo {
@@ -127,6 +128,7 @@ namespace mongo {
std::vector<BSONObj> indexesInProg;
Lock::GlobalWrite globalWriteLock;
+ DurTransaction txn;
{
Client::Context srcCtx( source );
@@ -182,7 +184,7 @@ namespace mongo {
return false;
}
- Status s = ctx.db()->dropCollection( target );
+ Status s = ctx.db()->dropCollection( &txn, target );
if ( !s.isOK() ) {
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
@@ -193,7 +195,7 @@ namespace mongo {
// If we are renaming in the same database, just
// rename the namespace and we're done.
if ( sourceDB == targetDB ) {
- Status s = ctx.db()->renameCollection( source, target,
+ Status s = ctx.db()->renameCollection( &txn, source, target,
cmdObj["stayTemp"].trueValue() );
if ( !s.isOK() ) {
errmsg = s.toString();
@@ -214,13 +216,13 @@ namespace mongo {
options.cappedSize = size;
options.setNoIdIndex();
- targetColl = ctx.db()->createCollection( target, options );
+ targetColl = ctx.db()->createCollection( &txn, target, options );
}
else {
CollectionOptions options;
options.setNoIdIndex();
// No logOp necessary because the entire renameCollection command is one logOp.
- targetColl = ctx.db()->createCollection( target, options );
+ targetColl = ctx.db()->createCollection( &txn, target, options );
}
if ( !targetColl ) {
errmsg = "Failed to create target collection.";
@@ -252,7 +254,7 @@ namespace mongo {
if ( !targetColl )
targetColl = ctx.db()->getCollection( target );
// No logOp necessary because the entire renameCollection command is one logOp.
- Status s = targetColl->insertDocument( o, true ).getStatus();
+ Status s = targetColl->insertDocument( &txn, o, true ).getStatus();
if ( !s.isOK() ) {
insertSuccessful = false;
errmsg = s.toString();
@@ -264,7 +266,7 @@ namespace mongo {
// If inserts were unsuccessful, drop the target collection and return false.
if ( !insertSuccessful ) {
Client::Context ctx( target );
- Status s = ctx.db()->dropCollection( target );
+ Status s = ctx.db()->dropCollection( &txn, target );
if ( !s.isOK() )
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
@@ -318,7 +320,7 @@ namespace mongo {
// If indexes were unsuccessful, drop the target collection and return false.
if ( !indexSuccessful ) {
- Status s = ctx.db()->dropCollection( target );
+ Status s = ctx.db()->dropCollection( &txn, target );
if ( !s.isOK() )
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
@@ -329,7 +331,7 @@ namespace mongo {
// Drop the source collection.
{
Client::Context srcCtx( source );
- Status s = srcCtx.db()->dropCollection( source );
+ Status s = srcCtx.db()->dropCollection( &txn, source );
if ( !s.isOK() ) {
errmsg = s.toString();
restoreIndexBuildsOnSource( indexesInProg, source );
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index a3560d6c15d..97cde83d144 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/kill_current_op.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
namespace mongo {
@@ -64,16 +65,17 @@ namespace mongo {
Lock::DBWrite lk(ns);
Client::Context ctx( ns );
+ DurTransaction txn;
Database* db = ctx.db();
Collection* collection = db->getCollection( ns );
if ( !collection ) {
- collection = db->createCollection( ns );
+ collection = db->createCollection( &txn, ns );
if ( !collection ) {
errmsg = "could not create collection";
return false;
}
}
- StatusWith<DiskLoc> res = collection->insertDocument( obj, false );
+ StatusWith<DiskLoc> res = collection->insertDocument( &txn, obj, false );
return appendCommandStatus( result, res.getStatus() );
}
};
@@ -142,6 +144,7 @@ namespace mongo {
bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
Client::WriteContext ctx( nss.ns() );
+ DurTransaction txn;
Collection* collection = ctx.ctx().db()->getCollection( nss.ns() );
massert( 13417, "captrunc collection not found or empty", collection);
@@ -154,7 +157,7 @@ namespace mongo {
Runner::RunnerState state = runner->getNext(NULL, &end);
massert( 13418, "captrunc invalid n", Runner::RUNNER_ADVANCED == state);
}
- collection->temp_cappedTruncateAfter( end, inc );
+ collection->temp_cappedTruncateAfter( &txn, end, inc );
return true;
}
};
@@ -187,13 +190,14 @@ namespace mongo {
NamespaceString nss( dbname, coll );
Client::WriteContext ctx( nss.ns() );
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( nss.ns() );
massert( 13429, "emptycapped no such collection", collection );
std::vector<BSONObj> indexes = stopIndexBuilds(db, cmdObj);
- Status status = collection->truncate();
+ Status status = collection->truncate(&txn);
if ( !status.isOK() )
return appendCommandStatus( result, status );
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index b40772e2848..f3257976c1f 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/repl/rs.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/write_concern.h"
#include "mongo/s/collection_metadata.h"
#include "mongo/s/d_logic.h"
@@ -598,7 +599,8 @@ namespace mongo {
}
if ( currentOp->shouldDBProfile( executionTime ) ) {
- profile( *client, currentOp->getOp(), *currentOp );
+ DurTransaction txn;
+ profile( &txn, *client, currentOp->getOp(), *currentOp );
}
}
@@ -924,7 +926,8 @@ namespace mongo {
_collection = database->getCollection(request->getTargetingNS());
if (!_collection) {
// Implicitly create if it doesn't exist
- _collection = database->createCollection(request->getTargetingNS());
+ DurTransaction txn;
+ _collection = database->createCollection(&txn, request->getTargetingNS());
if (!_collection) {
result->setError(
toWriteError(Status(ErrorCodes::InternalError,
@@ -1041,7 +1044,8 @@ namespace mongo {
Lock::assertWriteLocked( insertNS );
- StatusWith<DiskLoc> status = collection->insertDocument( docToInsert, true );
+ DurTransaction txn;
+ StatusWith<DiskLoc> status = collection->insertDocument( &txn, docToInsert, true );
if ( !status.isOK() ) {
result->setError(toWriteError(status.getStatus()));
@@ -1111,9 +1115,10 @@ namespace mongo {
Client::Context ctx( nsString.ns(),
storageGlobalParams.dbpath,
false /* don't check version */ );
+ DurTransaction txn;
try {
- UpdateResult res = executor.execute();
+ UpdateResult res = executor.execute(&txn);
const long long numDocsModified = res.numDocsModified;
const long long numMatched = res.numMatched;
@@ -1173,9 +1178,10 @@ namespace mongo {
Client::Context writeContext( nss.ns(),
storageGlobalParams.dbpath,
false /* don't check version */);
+ DurTransaction txn;
try {
- result->getStats().n = executor.execute();
+ result->getStats().n = executor.execute(&txn);
}
catch ( const DBException& ex ) {
status = ex.toStatus();
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 72cb43050ce..f98e36c847e 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -74,6 +74,7 @@
#include "mongo/db/stats/snapshots.h"
#include "mongo/db/storage/data_file.h"
#include "mongo/db/storage/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/ttl.h"
#include "mongo/platform/process_id.h"
@@ -365,6 +366,7 @@ namespace mongo {
LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;
Lock::GlobalWrite lk;
+ DurTransaction txn;
vector< string > dbNames;
getDatabaseNames( dbNames );
for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
@@ -381,7 +383,7 @@ namespace mongo {
}
if (shouldClearNonLocalTmpCollections || dbName == "local")
- ctx.db()->clearTmpCollections();
+ ctx.db()->clearTmpCollections(&txn);
if (!h->isCurrentVersion() || mongodGlobalParams.repair) {
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 2fb41d3a976..99bfaa0e0c3 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -68,6 +68,7 @@
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/storage/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/write_concern.h"
#include "mongo/s/d_logic.h"
@@ -357,6 +358,7 @@ namespace mongo {
//
Lock::DBWrite dbXLock(dbname);
Client::Context ctx(dbname);
+ DurTransaction txn;
BSONElement e = cmdObj.firstElement();
result.append("was", ctx.db()->getProfilingLevel());
@@ -368,7 +370,7 @@ namespace mongo {
if ( p == -1 )
ok = true;
else if ( p >= 0 && p <= 2 ) {
- ok = ctx.db()->setProfilingLevel( p , errmsg );
+ ok = ctx.db()->setProfilingLevel( &txn, p , errmsg );
}
BSONElement slow = cmdObj["slowms"];
@@ -483,9 +485,10 @@ namespace mongo {
Lock::DBWrite dbXLock(dbname);
Client::Context ctx(nsToDrop);
+ DurTransaction txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection( nsToDrop );
+ Collection* coll = db->getCollection( &txn, nsToDrop );
// If collection does not exist, short circuit and return.
if ( !coll ) {
errmsg = "ns not found";
@@ -499,7 +502,7 @@ namespace mongo {
result.append( "ns", nsToDrop );
result.append( "nIndexesWas", numIndexes );
- Status s = db->dropCollection( nsToDrop );
+ Status s = db->dropCollection( &txn, nsToDrop );
if ( s.isOK() )
return true;
@@ -648,10 +651,11 @@ namespace mongo {
Lock::DBWrite dbXLock(dbname);
Client::Context ctx(ns);
+ DurTransaction txn;
// Create collection.
return appendCommandStatus( result,
- userCreateNS(ctx.db(), ns.c_str(), options, !fromRepl) );
+ userCreateNS(&txn, ctx.db(), ns.c_str(), options, !fromRepl) );
}
} cmdCreate;
@@ -1211,6 +1215,7 @@ namespace mongo {
Lock::DBWrite dbXLock(dbname);
Client::Context ctx( ns );
+ DurTransaction txn;
Collection* coll = ctx.db()->getCollection( ns );
if ( !coll ) {
@@ -1236,9 +1241,9 @@ namespace mongo {
result.appendBool( "usePowerOf2Sizes_old", oldPowerOf2 );
if ( newPowerOf2 )
- coll->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes );
+ coll->setUserFlag( &txn, NamespaceDetails::Flag_UsePowerOf2Sizes );
else
- coll->clearUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes );
+ coll->clearUserFlag( &txn, NamespaceDetails::Flag_UsePowerOf2Sizes );
result.appendBool( "usePowerOf2Sizes_new", newPowerOf2 );
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 97bf5e70866..39434dc84af 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/query/query_planner.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/write_concern.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/s/d_logic.h"
@@ -217,6 +218,7 @@ namespace mongo {
OpDebug debug;
Client::Context context(ns);
+ DurTransaction txn; // XXX
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
@@ -229,12 +231,13 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(request, &debug);
+ update(&txn, request, &debug);
}
void Helpers::putSingleton(const char *ns, BSONObj obj) {
OpDebug debug;
Client::Context context(ns);
+ DurTransaction txn; // XXX
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
@@ -245,7 +248,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(request, &debug);
+ update(&txn, request, &debug);
context.getClient()->curop()->done();
}
@@ -253,6 +256,7 @@ namespace mongo {
void Helpers::putSingletonGod(const char *ns, BSONObj obj, bool logTheOp) {
OpDebug debug;
Client::Context context(ns);
+ DurTransaction txn; // XXX
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
@@ -262,7 +266,7 @@ namespace mongo {
request.setUpsert();
request.setUpdateOpLog(logTheOp);
- update(request, &debug);
+ update(&txn, request, &debug);
context.getClient()->curop()->done();
}
@@ -352,7 +356,8 @@ namespace mongo {
// Scoping for write lock.
{
Client::WriteContext ctx(ns);
- Collection* collection = ctx.ctx().db()->getCollection( ns );
+ DurTransaction txn;
+ Collection* collection = ctx.ctx().db()->getCollection( &txn, ns );
if ( !collection )
break;
@@ -424,7 +429,7 @@ namespace mongo {
callback->goingToDelete( obj );
logOp("d", ns.c_str(), obj["_id"].wrap(), 0, 0, fromMigrate);
- collection->deleteDocument( rloc );
+ collection->deleteDocument( &txn, rloc );
numDeleted++;
}
@@ -544,7 +549,8 @@ namespace mongo {
void Helpers::emptyCollection(const char *ns) {
Client::Context context(ns);
- deleteObjects(ns, BSONObj(), false);
+ DurTransaction txn; // XXX
+ deleteObjects(&txn, ns, BSONObj(), false);
}
Helpers::RemoveSaver::RemoveSaver( const string& a , const string& b , const string& why)
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.cpp b/src/mongo/db/index/btree_based_bulk_access_method.cpp
index 2725a967c19..d4471b2fc7a 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.cpp
+++ b/src/mongo/db/index/btree_based_bulk_access_method.cpp
@@ -132,12 +132,12 @@ namespace mongo {
}
Status BtreeBasedBulkAccessMethod::commit(set<DiskLoc>* dupsToDrop, CurOp* op, bool mayInterrupt) {
- DurTransaction txn[1];
+ DurTransaction txn; // XXX
DiskLoc oldHead = _real->_btreeState->head();
// XXX: do we expect the tree to be empty but have a head set? Looks like so from old code.
invariant(!oldHead.isNull());
_real->_btreeState->setHead(DiskLoc());
- _real->_btreeState->recordStore()->deleteRecord(txn, oldHead);
+ _real->_btreeState->recordStore()->deleteRecord(&txn, oldHead);
if (_isMultiKey) {
_real->_btreeState->setMultikey();
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index d05fcd2674b..71a3fe9dbc2 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/storage/mmap_v1/dur_commitjob.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_recover.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/instance.h"
#include "mongo/db/introspect.h"
#include "mongo/db/jsobjmanipulator.h"
@@ -516,7 +517,8 @@ namespace mongo {
LOG(1) << "note: not profiling because doing fsync+lock" << endl;
}
else {
- profile(c, op, currentOp);
+ DurTransaction txn;
+ profile(&txn, c, op, currentOp);
}
}
@@ -621,8 +623,9 @@ namespace mongo {
return;
Client::Context ctx( ns );
+ DurTransaction txn;
- UpdateResult res = executor.execute();
+ UpdateResult res = executor.execute(&txn);
// for getlasterror
lastError.getSafe()->recordUpdate( res.existing , res.numMatched , res.upserted );
@@ -663,8 +666,9 @@ namespace mongo {
return;
Client::Context ctx(ns);
+ DurTransaction txn;
- long long n = executor.execute();
+ long long n = executor.execute(&txn);
lastError.getSafe()->recordDelete( n );
op.debug().ndeleted = n;
break;
@@ -802,7 +806,11 @@ namespace mongo {
return ok;
}
- void checkAndInsert(Client::Context& ctx, const char *ns, /*modifies*/BSONObj& js) {
+ void checkAndInsert(TransactionExperiment* txn,
+ Client::Context& ctx,
+ const char *ns,
+ /*modifies*/BSONObj& js) {
+
if ( nsToCollectionSubstring( ns ) == "system.indexes" ) {
string targetNS = js["ns"].String();
uassertStatusOK( userAllowedWriteNS( targetNS ) );
@@ -810,7 +818,7 @@ namespace mongo {
Collection* collection = ctx.db()->getCollection( targetNS );
if ( !collection ) {
// implicitly create
- collection = ctx.db()->createCollection( targetNS );
+ collection = ctx.db()->createCollection( txn, targetNS );
verify( collection );
}
@@ -838,20 +846,25 @@ namespace mongo {
Collection* collection = ctx.db()->getCollection( ns );
if ( !collection ) {
- collection = ctx.db()->createCollection( ns );
+ collection = ctx.db()->createCollection( txn, ns );
verify( collection );
}
- StatusWith<DiskLoc> status = collection->insertDocument( js, true );
+ StatusWith<DiskLoc> status = collection->insertDocument( txn, js, true );
uassertStatusOK( status.getStatus() );
logOp("i", ns, js);
}
- NOINLINE_DECL void insertMulti(Client::Context& ctx, bool keepGoing, const char *ns, vector<BSONObj>& objs, CurOp& op) {
+ NOINLINE_DECL void insertMulti(TransactionExperiment* txn,
+ Client::Context& ctx,
+ bool keepGoing,
+ const char *ns,
+ vector<BSONObj>& objs,
+ CurOp& op) {
size_t i;
for (i=0; i<objs.size(); i++){
try {
- checkAndInsert(ctx, ns, objs[i]);
+ checkAndInsert(txn, ctx, ns, objs[i]);
getDur().commitIfNeeded();
} catch (const UserException&) {
if (!keepGoing || i == objs.size()-1){
@@ -904,12 +917,13 @@ namespace mongo {
return;
Client::Context ctx(ns);
+ DurTransaction txn;
if (multi.size() > 1) {
const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
- insertMulti(ctx, keepGoing, ns, multi, op);
+ insertMulti(&txn, ctx, keepGoing, ns, multi, op);
} else {
- checkAndInsert(ctx, ns, multi[0]);
+ checkAndInsert(&txn, ctx, ns, multi[0]);
globalOpCounters.incInsertInWriteLock(1);
op.debug().ninserted = 1;
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 4f18011d2fb..77f7835d39e 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -79,8 +79,11 @@ namespace {
}
} // namespace
- static void _profile(const Client& c, Database* db,
- CurOp& currentOp, BufBuilder& profileBufBuilder) {
+ static void _profile(TransactionExperiment* txn,
+ const Client& c,
+ Database* db,
+ CurOp& currentOp,
+ BufBuilder& profileBufBuilder) {
dassert( db );
// build object
@@ -120,13 +123,13 @@ namespace {
// write: not replicated
// get or create the profiling collection
- Collection* profileCollection = getOrCreateProfileCollection(db);
+ Collection* profileCollection = getOrCreateProfileCollection(txn, db);
if ( profileCollection ) {
- profileCollection->insertDocument( p, false );
+ profileCollection->insertDocument( txn, p, false );
}
}
- void profile(const Client& c, int op, CurOp& currentOp) {
+ void profile(TransactionExperiment* txn, const Client& c, int op, CurOp& currentOp) {
// initialize with 1kb to start, to avoid realloc later
// doing this outside the dblock to improve performance
BufBuilder profileBufBuilder(1024);
@@ -137,7 +140,7 @@ namespace {
Lock::DBWrite lk( currentOp.getNS() );
if (dbHolder()._isLoaded(nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) {
Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false);
- _profile(c, cx.db(),
+ _profile(txn, c, cx.db(),
currentOp, profileBufBuilder);
}
else {
@@ -152,10 +155,13 @@ namespace {
}
}
- Collection* getOrCreateProfileCollection(Database *db, bool force, string* errmsg ) {
+ Collection* getOrCreateProfileCollection(TransactionExperiment* txn,
+ Database *db,
+ bool force,
+ string* errmsg ) {
fassert(16372, db);
const char* profileName = db->getProfilingNS();
- Collection* collection = db->getCollection( profileName );
+ Collection* collection = db->getCollection( txn, profileName );
if ( collection ) {
if ( !collection->isCapped() ) {
@@ -187,7 +193,7 @@ namespace {
collectionOptions.capped = true;
collectionOptions.cappedSize = 1024 * 1024;
- collection = db->createCollection( profileName, collectionOptions );
+ collection = db->createCollection( txn, profileName, collectionOptions );
invariant( collection );
return collection;
}
diff --git a/src/mongo/db/introspect.h b/src/mongo/db/introspect.h
index 50b98656bc2..f45d9796b34 100644
--- a/src/mongo/db/introspect.h
+++ b/src/mongo/db/introspect.h
@@ -40,12 +40,13 @@ namespace mongo {
class Collection;
class Database;
+ class TransactionExperiment;
/* --- profiling --------------------------------------------
do when database->profile is set
*/
- void profile(const Client& c, int op, CurOp& currentOp);
+ void profile(TransactionExperiment* txn, const Client& c, int op, CurOp& currentOp);
/**
* Get (or create) the profile collection
@@ -54,7 +55,8 @@ namespace mongo {
* @param force Always create the collection if it does not exist
* @return Collection for the newly created collection, or NULL on error
**/
- Collection* getOrCreateProfileCollection(Database *db,
+ Collection* getOrCreateProfileCollection(TransactionExperiment* txn,
+ Database *db,
bool force = false,
std::string* errmsg = NULL);
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index cbc4c392140..9401fc908a3 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -38,7 +38,12 @@ namespace mongo {
justOne: stop after 1 match
god: allow access to system namespaces, and don't yield
*/
- long long deleteObjects(const StringData& ns, BSONObj pattern, bool justOne, bool logop, bool god) {
+ long long deleteObjects(TransactionExperiment* txn,
+ const StringData& ns,
+ BSONObj pattern,
+ bool justOne,
+ bool logop,
+ bool god) {
NamespaceString nsString(ns);
DeleteRequest request(nsString);
request.setQuery(pattern);
@@ -46,7 +51,7 @@ namespace mongo {
request.setUpdateOpLog(logop);
request.setGod(god);
DeleteExecutor executor(&request);
- return executor.execute();
+ return executor.execute(txn);
}
} // namespace mongo
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index c1ebe4f2211..a7472637f2d 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -34,8 +34,11 @@
namespace mongo {
+ class TransactionExperiment;
+
// If justOne is true, deletedId is set to the id of the deleted object.
- long long deleteObjects(const StringData& ns,
+ long long deleteObjects(TransactionExperiment* txn,
+ const StringData& ns,
BSONObj pattern,
bool justOne,
bool logop = false,
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index a7007e8df2c..24b1a6a112c 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -81,7 +81,7 @@ namespace mongo {
return status;
}
- long long DeleteExecutor::execute() {
+ long long DeleteExecutor::execute(TransactionExperiment* txn) {
uassertStatusOK(prepare());
uassert(17417,
mongoutils::str::stream() <<
@@ -165,7 +165,7 @@ namespace mongo {
// TODO: do we want to buffer docs and delete them in a group rather than
// saving/restoring state repeatedly?
runner->saveState();
- collection->deleteDocument(rloc, false, false, logop ? &toDelete : NULL );
+ collection->deleteDocument(txn, rloc, false, false, logop ? &toDelete : NULL );
runner->restoreState();
nDeleted++;
diff --git a/src/mongo/db/ops/delete_executor.h b/src/mongo/db/ops/delete_executor.h
index 9e32ba25044..4f998f61fc8 100644
--- a/src/mongo/db/ops/delete_executor.h
+++ b/src/mongo/db/ops/delete_executor.h
@@ -37,6 +37,7 @@ namespace mongo {
class CanonicalQuery;
class DeleteRequest;
+ class TransactionExperiment;
/**
* Implementation of the processing of a delete operation in a mongod.
@@ -89,7 +90,7 @@ namespace mongo {
*
* Returns the number of documents deleted.
*/
- long long execute();
+ long long execute(TransactionExperiment* txn);
private:
/// Unowned pointer to the request object that this executor will process.
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index f690cc019aa..63bfe547d32 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -51,6 +51,7 @@
#include "mongo/db/queryutil.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage/record.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/platform/unordered_set.h"
@@ -480,13 +481,16 @@ namespace mongo {
}
} // namespace
- UpdateResult update(const UpdateRequest& request, OpDebug* opDebug) {
+ UpdateResult update(TransactionExperiment* txn,
+ const UpdateRequest& request,
+ OpDebug* opDebug) {
UpdateExecutor executor(&request, opDebug);
- return executor.execute();
+ return executor.execute(txn);
}
UpdateResult update(
+ TransactionExperiment* txn,
const UpdateRequest& request,
OpDebug* opDebug,
UpdateDriver* driver,
@@ -719,7 +723,7 @@ namespace mongo {
// If a set of modifiers were all no-ops, we are still 'in place', but there is
// no work to do, in which case we want to consider the object unchanged.
if (!damages.empty() ) {
- collection->updateDocumentWithDamages( loc, source, damages );
+ collection->updateDocumentWithDamages( txn, loc, source, damages );
docWasModified = true;
opDebug->fastmod = true;
}
@@ -734,7 +738,8 @@ namespace mongo {
str::stream() << "Resulting document after update is larger than "
<< BSONObjMaxUserSize,
newObj.objsize() <= BSONObjMaxUserSize);
- StatusWith<DiskLoc> res = collection->updateDocument(loc,
+ StatusWith<DiskLoc> res = collection->updateDocument(txn,
+ loc,
newObj,
true,
opDebug);
@@ -860,7 +865,7 @@ namespace mongo {
Database* db = cc().getContext()->db();
collection = db->getCollection(request.getNamespaceString().ns());
if (!collection) {
- collection = db->createCollection(request.getNamespaceString().ns());
+ collection = db->createCollection(txn, request.getNamespaceString().ns());
}
}
@@ -870,7 +875,8 @@ namespace mongo {
str::stream() << "Document to upsert is larger than " << BSONObjMaxUserSize,
newObj.objsize() <= BSONObjMaxUserSize);
- StatusWith<DiskLoc> newLoc = collection->insertDocument(newObj,
+ StatusWith<DiskLoc> newLoc = collection->insertDocument(txn,
+ newObj,
!request.isGod() /*enforceQuota*/);
uassertStatusOK(newLoc.getStatus());
if (request.shouldCallLogOp()) {
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index 34d290cfe85..89550f674cd 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -38,6 +38,7 @@
namespace mongo {
class CanonicalQuery;
+ class TransactionExperiment;
class UpdateDriver;
/**
@@ -45,7 +46,7 @@ namespace mongo {
*
* Caller must hold the appropriate database locks.
*/
- UpdateResult update(const UpdateRequest& request, OpDebug* opDebug);
+ UpdateResult update(TransactionExperiment* txn, const UpdateRequest& request, OpDebug* opDebug);
/**
* Execute the update described by "request", using the given already-parsed
@@ -55,7 +56,8 @@ namespace mongo {
*
* TODO: Move this into a private method of UpdateExecutor.
*/
- UpdateResult update(const UpdateRequest& request,
+ UpdateResult update(TransactionExperiment* txn,
+ const UpdateRequest& request,
OpDebug* opDebug,
UpdateDriver* driver,
CanonicalQuery* cq);
diff --git a/src/mongo/db/ops/update_executor.cpp b/src/mongo/db/ops/update_executor.cpp
index 5ae9d7fbd24..9abd39b6016 100644
--- a/src/mongo/db/ops/update_executor.cpp
+++ b/src/mongo/db/ops/update_executor.cpp
@@ -62,9 +62,10 @@ namespace mongo {
return Status::OK();
}
- UpdateResult UpdateExecutor::execute() {
+ UpdateResult UpdateExecutor::execute(TransactionExperiment* txn) {
uassertStatusOK(prepare());
- return update(*_request,
+ return update(txn,
+ *_request,
_opDebug,
&_driver,
_canonicalQuery.release());
diff --git a/src/mongo/db/ops/update_executor.h b/src/mongo/db/ops/update_executor.h
index eabfbb18519..8c168202db7 100644
--- a/src/mongo/db/ops/update_executor.h
+++ b/src/mongo/db/ops/update_executor.h
@@ -39,6 +39,7 @@ namespace mongo {
class CanonicalQuery;
class OpDebug;
+ class TransactionExperiment;
class UpdateRequest;
/**
@@ -90,7 +91,7 @@ namespace mongo {
* Execute an update. Requires the caller to hold the database lock on the
* appropriate resources for the request.
*/
- UpdateResult execute();
+ UpdateResult execute(TransactionExperiment* txn);
private:
/**
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index e02008ae18c..93e8367f698 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -105,7 +105,8 @@ namespace mongo {
* @param createDefaultIndexes - if false, defers id (and other) index creation.
* @return true if successful
*/
- Status userCreateNS( Database* db,
+ Status userCreateNS( TransactionExperiment* txn,
+ Database* db,
const StringData& ns,
BSONObj options,
bool logForReplication,
@@ -130,7 +131,7 @@ namespace mongo {
if ( !status.isOK() )
return status;
- invariant( db->createCollection( ns, collectionOptions, true, createDefaultIndexes ) );
+ invariant( db->createCollection( txn, ns, collectionOptions, true, createDefaultIndexes ) );
if ( logForReplication ) {
if ( options.getField( "create" ).eoo() ) {
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
index 0d347d09e64..6ae7f8a7e23 100644
--- a/src/mongo/db/pdfile.h
+++ b/src/mongo/db/pdfile.h
@@ -45,10 +45,12 @@
namespace mongo {
class Database;
+ class TransactionExperiment;
void dropDatabase(Database* db);
- Status userCreateNS( Database* db,
+ Status userCreateNS( TransactionExperiment* txn,
+ Database* db,
const StringData& ns,
BSONObj options,
bool logForReplication,
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 31db42f7fa3..7eaec654d91 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/cloner.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/kill_current_op.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/util/file.h"
#include "mongo/util/file_allocator.h"
@@ -273,6 +274,7 @@ namespace mongo {
Status repairDatabase( string dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles ) {
+ DurTransaction txn; // XXX
scoped_ptr<RepairFileDeleter> repairFileDeleter;
doingRepair dr;
dbName = nsToDatabase( dbName );
@@ -364,11 +366,11 @@ namespace mongo {
Collection* tempCollection = NULL;
{
Client::Context tempContext( ns, tempDatabase );
- tempCollection = tempDatabase->createCollection( ns, options, true, false );
+ tempCollection = tempDatabase->createCollection( &txn, ns, options, true, false );
}
Client::Context readContext( ns, originalDatabase );
- Collection* originalCollection = originalDatabase->getCollection( ns );
+ Collection* originalCollection = originalDatabase->getCollection( &txn, ns );
invariant( originalCollection );
// data
@@ -400,7 +402,7 @@ namespace mongo {
BSONObj doc = originalCollection->docFor( loc );
Client::Context tempContext( ns, tempDatabase );
- StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock );
+ StatusWith<DiskLoc> result = tempCollection->insertDocument( &txn, doc, indexBlock );
if ( !result.isOK() )
return result.getStatus();
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 0b8f9d0d599..ad215b9854f 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -53,6 +53,7 @@
#include "mongo/db/repl/repl_settings.h" // replSettings
#include "mongo/db/repl/rs.h" // replLocalAuth()
#include "mongo/db/server_parameters.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
namespace mongo {
@@ -193,6 +194,7 @@ namespace mongo {
{
OpDebug debug;
Client::Context ctx("local.sources");
+ DurTransaction txn;
const NamespaceString requestNs("local.sources");
UpdateRequest request(requestNs);
@@ -201,7 +203,7 @@ namespace mongo {
request.setUpdates(o);
request.setUpsert();
- UpdateResult res = update(request, &debug);
+ UpdateResult res = update(&txn, request, &debug);
verify( ! res.modifiers );
verify( res.numMatched == 1 );
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 29c3491a6cc..4ec5c9a2df1 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -54,6 +54,7 @@
#include "mongo/db/repl/rs.h"
#include "mongo/db/repl/write_concern.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/s/d_logic.h"
@@ -93,6 +94,7 @@ namespace mongo {
*/
void _logOpObjRS(const BSONObj& op) {
Lock::DBWrite lk("local");
+ DurTransaction txn; //XXX should be part of parent txn
const OpTime ts = op["ts"]._opTime();
long long h = op["h"].numberLong();
@@ -108,7 +110,7 @@ namespace mongo {
localOplogRSCollection);
}
Client::Context ctx(rsoplog, localDB);
- checkOplogInsert( localOplogRSCollection->insertDocument( op, false ) );
+ checkOplogInsert( localOplogRSCollection->insertDocument( &txn, op, false ) );
/* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy.
this code (or code in now() maybe) should be improved.
@@ -257,8 +259,9 @@ namespace mongo {
}
Client::Context ctx(rsoplog, localDB);
+ DurTransaction txn; // XXX
OplogDocWriter writer( partial, obj );
- checkOplogInsert( localOplogRSCollection->insertDocument( &writer, false ) );
+ checkOplogInsert( localOplogRSCollection->insertDocument( &txn, &writer, false ) );
/* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy.
this code (or code in now() maybe) should be improved.
@@ -328,8 +331,9 @@ namespace mongo {
}
Client::Context ctx(logNS , localDB);
+ DurTransaction txn; //XXX should be part of parent txn
OplogDocWriter writer( partial, obj );
- checkOplogInsert( localOplogMainCollection->insertDocument( &writer, false ) );
+ checkOplogInsert( localOplogMainCollection->insertDocument( &txn, &writer, false ) );
context.getClient()->setLastOp( ts );
}
@@ -393,6 +397,7 @@ namespace mongo {
ns = rsoplog;
Client::Context ctx(ns);
+ DurTransaction txn; // XXX
Collection* collection = ctx.db()->getCollection( ns );
if ( collection ) {
@@ -453,7 +458,7 @@ namespace mongo {
options.cappedSize = sz;
options.autoIndexId = CollectionOptions::NO;
- invariant( ctx.db()->createCollection( ns, options ) );
+ invariant( ctx.db()->createCollection( &txn, ns, options ) );
if( !rs )
logOp( "n", "", BSONObj() );
@@ -469,6 +474,7 @@ namespace mongo {
*/
bool applyOperation_inlock(Database* db, const BSONObj& op,
bool fromRepl, bool convertUpdateToUpsert) {
+ DurTransaction txn; //XXX should be part of parent txn
LOG(3) << "applying op: " << op << endl;
bool failedUpdate = false;
@@ -553,7 +559,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(request, &debug);
+ update(&txn, request, &debug);
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
@@ -582,7 +588,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(request, &debug);
+ update(&txn, request, &debug);
}
}
}
@@ -609,7 +615,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult ur = update(request, &debug);
+ UpdateResult ur = update(&txn, request, &debug);
if( ur.numMatched == 0 ) {
if( ur.modifiers ) {
@@ -650,7 +656,7 @@ namespace mongo {
else if ( *opType == 'd' ) {
opCounters->gotDelete();
if ( opType[1] == 0 )
- deleteObjects(ns, o, /*justOne*/ valueB);
+ deleteObjects(&txn, ns, o, /*justOne*/ valueB);
else
verify( opType[1] == 'b' ); // "db" advertisement
}
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index fdaf0668111..05623463a1d 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/repl/repl_start.h"
#include "mongo/db/repl/rs.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/platform/bits.h"
#include "mongo/s/d_logic.h"
#include "mongo/util/net/sock.h"
@@ -119,6 +120,7 @@ namespace {
void dropAllTempCollections() {
vector<string> dbNames;
getDatabaseNames(dbNames);
+ DurTransaction txn;
for (vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it) {
// The local db is special because it isn't replicated. It is cleared at startup even on
// replica set members.
@@ -126,7 +128,7 @@ namespace {
continue;
Client::Context ctx(*it);
- ctx.db()->clearTmpCollections();
+ ctx.db()->clearTmpCollections(&txn);
}
}
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 86d42d3b7a0..76c39c88d9a 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -42,6 +42,7 @@
#include "mongo/bson/optime.h"
#include "mongo/db/repl/repl_settings.h" // replSettings
#include "mongo/db/repl/initial_sync.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/util/mongoutils/str.h"
@@ -127,6 +128,7 @@ namespace mongo {
static void emptyOplog() {
Client::WriteContext ctx(rsoplog);
+ DurTransaction txn;
Collection* collection = ctx.ctx().db()->getCollection(rsoplog);
// temp
@@ -134,7 +136,7 @@ namespace mongo {
return; // already empty, ok.
LOG(1) << "replSet empty oplog" << rsLog;
- uassertStatusOK( collection->truncate() );
+ uassertStatusOK( collection->truncate(&txn) );
}
const Member* ReplSetImpl::getMemberToSyncTo() {
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 3a22f7a07bb..529ea548462 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/rs.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
/* Scenarios
@@ -336,6 +337,7 @@ namespace mongo {
void ReplSetImpl::syncFixUp(HowToFixUp& h, OplogReader& r) {
DBClientConnection *them = r.conn();
+ DurTransaction txn;
// fetch all first so we needn't handle interruption in a fancy way
@@ -407,7 +409,7 @@ namespace mongo {
Client::Context c(ns);
{
- c.db()->dropCollection(ns);
+ c.db()->dropCollection(&txn, ns);
{
string errmsg;
dbtemprelease r;
@@ -458,7 +460,7 @@ namespace mongo {
Client::Context c(*i);
try {
log() << "replSet rollback drop: " << *i << rsLog;
- c.db()->dropCollection(*i);
+ c.db()->dropCollection(&txn, *i);
}
catch(...) {
log() << "replset rollback error dropping collection " << *i << rsLog;
@@ -524,12 +526,12 @@ namespace mongo {
//would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
if( !loc.isNull() ) {
try {
- collection->temp_cappedTruncateAfter(loc, true);
+ collection->temp_cappedTruncateAfter(&txn, loc, true);
}
catch(DBException& e) {
if( e.getCode() == 13415 ) {
// hack: need to just make cappedTruncate do this...
- uassertStatusOK( collection->truncate() );
+ uassertStatusOK( collection->truncate(&txn ) );
}
else {
throw;
@@ -544,7 +546,7 @@ namespace mongo {
else {
try {
deletes++;
- deleteObjects(d.ns, pattern, /*justone*/true, /*logop*/false, /*god*/true);
+ deleteObjects(&txn, d.ns, pattern, /*justone*/true, /*logop*/false, /*god*/true);
}
catch(...) {
log() << "replSet error rollback delete failed ns:" << d.ns << rsLog;
@@ -558,7 +560,7 @@ namespace mongo {
if( o.isEmpty() ) {
// we should drop
try {
- c.db()->dropCollection(d.ns);
+ c.db()->dropCollection(&txn, d.ns);
}
catch(...) {
log() << "replset error rolling back collection " << d.ns << rsLog;
@@ -587,7 +589,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(request, &debug);
+ update(&txn, request, &debug);
}
}
@@ -606,7 +608,7 @@ namespace mongo {
// clean up oplog
LOG(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
// todo: fatal error if this throws?
- oplogCollection->temp_cappedTruncateAfter(h.commonPointOurDiskloc, false);
+ oplogCollection->temp_cappedTruncateAfter(&txn, h.commonPointOurDiskloc, false);
Status status = getGlobalAuthorizationManager()->initialize();
if (!status.isOK()) {
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index 2d4b652ffe4..4c2cc6b5f1c 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/repl/sync_tail.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/timer_stats.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/storage_options.h"
#include "mongo/util/fail_point_service.h"
@@ -230,7 +231,8 @@ namespace mongo {
changeState(MemberState::RS_RECOVERING);
Client::Context ctx("local");
- ctx.db()->dropCollection("local.oplog.rs");
+ DurTransaction txn;
+ ctx.db()->dropCollection(&txn, "local.oplog.rs");
{
boost::unique_lock<boost::mutex> lock(theReplSet->initialSyncMutex);
theReplSet->initialSyncRequested = true;
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index 87380803105..9a4b5ad8073 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/pdfile.h"
#include "mongo/db/repl/oplogreader.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -109,6 +110,7 @@ namespace mongo {
// should already have write lock
const char *ns = o.getStringField("ns");
Client::Context ctx(ns);
+ DurTransaction txn;
// we don't have the object yet, which is possible on initial sync. get it.
log() << "replication info adding missing object" << endl; // rare enough we can log
@@ -125,7 +127,7 @@ namespace mongo {
else {
Collection* collection = ctx.db()->getOrCreateCollection( ns );
verify( collection ); // should never happen
- StatusWith<DiskLoc> result = collection->insertDocument( missingObj, true );
+ StatusWith<DiskLoc> result = collection->insertDocument( &txn, missingObj, true );
uassert(15917,
str::stream() << "failed to insert missing doc: " << result.toString(),
result.isOK() );
diff --git a/src/mongo/db/structure/btree/btree_logic.cpp b/src/mongo/db/structure/btree/btree_logic.cpp
index c06016dd74a..cb3149b5c66 100644
--- a/src/mongo/db/structure/btree/btree_logic.cpp
+++ b/src/mongo/db/structure/btree/btree_logic.cpp
@@ -1277,10 +1277,10 @@ namespace mongo {
template <class BtreeLayout>
void BtreeLogic<BtreeLayout>::deallocBucket(BucketType* bucket, const DiskLoc bucketLoc) {
- DurTransaction txn[1];
+ DurTransaction txn; // XXX
bucket->n = BtreeLayout::INVALID_N_SENTINEL;
bucket->parent.Null();
- _recordStore->deleteRecord(txn, bucketLoc);
+ _recordStore->deleteRecord(&txn, bucketLoc);
}
template <class BtreeLayout>
diff --git a/src/mongo/db/structure/collection_compact.cpp b/src/mongo/db/structure/collection_compact.cpp
index e1f20e272a3..58a018bcb73 100644
--- a/src/mongo/db/structure/collection_compact.cpp
+++ b/src/mongo/db/structure/collection_compact.cpp
@@ -101,9 +101,8 @@ namespace mongo {
}
- StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {
- DurTransaction txn[1];
-
+ StatusWith<CompactStats> Collection::compact( TransactionExperiment* txn,
+ const CompactOptions* compactOptions ) {
if ( !_recordStore->compactSupported() )
return StatusWith<CompactStats>( ErrorCodes::BadValue,
str::stream() <<
diff --git a/src/mongo/db/structure/record_store_v1_capped.cpp b/src/mongo/db/structure/record_store_v1_capped.cpp
index 10484496b6a..8f455395144 100644
--- a/src/mongo/db/structure/record_store_v1_capped.cpp
+++ b/src/mongo/db/structure/record_store_v1_capped.cpp
@@ -164,7 +164,7 @@ namespace mongo {
}
DiskLoc fr = theCapExtent()->firstRecord;
- _collection->deleteDocument( fr, true );
+ _collection->deleteDocument( txn, fr, true );
compact(txn);
if( ++passes > maxPasses ) {
StringBuilder sb;
@@ -474,7 +474,7 @@ namespace mongo {
// Delete the newest record, and coalesce the new deleted
// record with existing deleted records.
- _collection->deleteDocument( curr, true );
+ _collection->deleteDocument( txn, curr, true );
compact(txn);
// This is the case where we have not yet had to remove any
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 87217718c57..30b16e9a14d 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/ops/delete.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/util/background.h"
@@ -115,6 +116,7 @@ namespace mongo {
{
string ns = idx["ns"].String();
Client::WriteContext ctx( ns );
+ DurTransaction txn;
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection ) {
// collection was dropped
@@ -133,7 +135,7 @@ namespace mongo {
continue;
}
- n = deleteObjects( ns , query , false , true );
+ n = deleteObjects( &txn, ns , query , false , true );
ttlDeletedDocuments.increment( n );
}
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index e8881b8a972..103eb50e503 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/json.h"
#include "mongo/db/ops/count.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
@@ -44,20 +45,21 @@ namespace CountTests {
Client::Context _context;
Database* _database;
Collection* _collection;
+ DurTransaction _txn;
public:
Base() : lk(ns()), _context( ns() ) {
_database = _context.db();
_collection = _database->getCollection( ns() );
if ( _collection ) {
- _database->dropCollection( ns() );
+ _database->dropCollection( &_txn, ns() );
}
- _collection = _database->createCollection( ns() );
+ _collection = _database->createCollection( &_txn, ns() );
addIndex( fromjson( "{\"a\":1}" ) );
}
~Base() {
try {
- uassertStatusOK( _database->dropCollection( ns() ) );
+ uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
}
catch ( ... ) {
FAIL( "Exception while cleaning up collection" );
@@ -86,10 +88,10 @@ namespace CountTests {
oid.init();
b.appendOID( "_id", &oid );
b.appendElements( o );
- _collection->insertDocument( b.obj(), false );
+ _collection->insertDocument( &_txn, b.obj(), false );
}
else {
- _collection->insertDocument( o, false );
+ _collection->insertDocument( &_txn, o, false );
}
}
static BSONObj countCommand( const BSONObj &query ) {
diff --git a/src/mongo/dbtests/extsorttests.cpp b/src/mongo/dbtests/extsorttests.cpp
index aec478aadc4..1ebf364421f 100644
--- a/src/mongo/dbtests/extsorttests.cpp
+++ b/src/mongo/dbtests/extsorttests.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/extsort.h"
#include "mongo/db/index/btree_based_bulk_access_method.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/platform/cstdint.h"
@@ -310,6 +311,7 @@ namespace ExtSortTests {
_client.createCollection( _ns );
// Take a write lock.
Client::WriteContext ctx( _ns );
+ DurTransaction txn;
Collection* coll = ctx.ctx().db()->getCollection( _ns );
// Do a write to ensure the implementation will interrupt sort() even after a write has
// occurred.
@@ -317,7 +319,7 @@ namespace ExtSortTests {
OID id;
id.init();
b.appendOID( "_id", &id );
- coll->insertDocument( b.obj(), true );
+ coll->insertDocument( &txn, b.obj(), true );
// Create a sorter with a max file size of only 10k, to trigger a file flush after a
// relatively small number of inserts.
auto_ptr<ExternalSortComparison> cmp(BtreeBasedBulkAccessMethod::getComparison(0,
@@ -358,6 +360,7 @@ namespace ExtSortTests {
_client.createCollection( _ns );
// Take a write lock.
Client::WriteContext ctx( _ns );
+ DurTransaction txn;
Collection* coll = ctx.ctx().db()->getCollection( _ns );
// Do a write to ensure the implementation will interrupt sort() even after a write has
// occurred.
@@ -365,7 +368,7 @@ namespace ExtSortTests {
OID id;
id.init();
b.appendOID( "_id", &id );
- coll->insertDocument( b.obj(), true );
+ coll->insertDocument( &txn, b.obj(), true );
// Create a sorter.
BSONObjExternalSorter sorter(_aFirstSort);
// Add keys to the sorter.
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 02ad7ee79d8..71ad6be6197 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -20,6 +20,7 @@
#include "mongo/db/db.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
@@ -31,14 +32,16 @@ namespace IndexCatalogTests {
public:
IndexIteratorTests() {
Client::WriteContext ctx(_ns);
+ DurTransaction txn;
_db = ctx.ctx().db();
- _coll = _db->createCollection(_ns);
+ _coll = _db->createCollection(&txn, _ns);
_catalog = _coll->getIndexCatalog();
}
~IndexIteratorTests() {
Client::WriteContext ctx(_ns);
- _db->dropCollection(_ns);
+ DurTransaction txn;
+ _db->dropCollection(&txn, _ns);
}
void run() {
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index fa5b119922c..84131927ba6 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/kill_current_op.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/platform/cstdint.h"
#include "mongo/dbtests/dbtests.h"
@@ -89,6 +90,7 @@ namespace IndexUpdateTests {
}
#endif
Client::WriteContext _ctx;
+ DurTransaction _txn;
};
/** addKeysToPhaseOne() adds keys from a collection's documents to an external sorter. */
@@ -313,14 +315,14 @@ namespace IndexUpdateTests {
void run() {
// Create a new collection.
Database* db = _ctx.ctx().db();
- db->dropCollection( _ns );
- Collection* coll = db->createCollection( _ns );
+ db->dropCollection( &_txn, _ns );
+ Collection* coll = db->createCollection( &_txn, _ns );
// Drop all indexes including id index.
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents with enforceQuota=true.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( BSON( "a" << i ), true );
+ coll->insertDocument( &_txn, BSON( "a" << i ), true );
}
// Initialize curop.
cc().curop()->reset();
@@ -343,13 +345,13 @@ namespace IndexUpdateTests {
void run() {
// Create a new collection.
Database* db = _ctx.ctx().db();
- db->dropCollection( _ns );
- Collection* coll = db->createCollection( _ns );
+ db->dropCollection( &_txn, _ns );
+ Collection* coll = db->createCollection( &_txn, _ns );
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( BSON( "a" << i ), true );
+ coll->insertDocument( &_txn, BSON( "a" << i ), true );
}
// Initialize curop.
cc().curop()->reset();
@@ -372,16 +374,16 @@ namespace IndexUpdateTests {
void run() {
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.ctx().db();
- db->dropCollection( _ns );
+ db->dropCollection( &_txn, _ns );
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- Collection* coll = db->createCollection( _ns, options );
+ Collection* coll = db->createCollection( &_txn, _ns, options );
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( BSON( "_id" << i ), true );
+ coll->insertDocument( &_txn, BSON( "_id" << i ), true );
}
// Initialize curop.
cc().curop()->reset();
@@ -406,16 +408,16 @@ namespace IndexUpdateTests {
void run() {
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.ctx().db();
- db->dropCollection( _ns );
+ db->dropCollection( &_txn, _ns );
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
- Collection* coll = db->createCollection( _ns, options );
+ Collection* coll = db->createCollection( &_txn, _ns, options );
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
- coll->insertDocument( BSON( "_id" << i ), true );
+ coll->insertDocument( &_txn, BSON( "_id" << i ), true );
}
// Initialize curop.
cc().curop()->reset();
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index cc5cc892fd6..97cdeb2c63d 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -132,14 +132,16 @@ namespace NamespaceTests {
public:
Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
virtual ~Base() {
+ DurTransaction txn;
if ( !nsd() )
return;
- _context.db()->dropCollection( ns() );
+ _context.db()->dropCollection( &txn, ns() );
}
protected:
void create() {
Lock::GlobalWrite lk;
- ASSERT( userCreateNS( db(), ns(), fromjson( spec() ), false ).isOK() );
+ DurTransaction txn;
+ ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
}
virtual string spec() const {
return "{\"capped\":true,\"size\":512,\"$nExtents\":1}";
@@ -274,9 +276,10 @@ namespace NamespaceTests {
class SingleAlloc : public Base {
public:
void run() {
+ DurTransaction txn;
create();
BSONObj b = bigObj();
- ASSERT( collection()->insertDocument( b, true ).isOK() );
+ ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
ASSERT_EQUALS( 1, nRecords() );
}
};
@@ -284,6 +287,7 @@ namespace NamespaceTests {
class Realloc : public Base {
public:
void run() {
+ DurTransaction txn;
create();
const int N = 20;
@@ -291,7 +295,7 @@ namespace NamespaceTests {
DiskLoc l[ N ];
for ( int i = 0; i < N; ++i ) {
BSONObj b = bigObj();
- StatusWith<DiskLoc> status = collection()->insertDocument( b, true );
+ StatusWith<DiskLoc> status = collection()->insertDocument( &txn, b, true );
ASSERT( status.isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
@@ -306,12 +310,13 @@ namespace NamespaceTests {
class TwoExtent : public Base {
public:
void run() {
+ DurTransaction txn;
create();
ASSERT_EQUALS( 2, nExtents() );
DiskLoc l[ 8 ];
for ( int i = 0; i < 8; ++i ) {
- StatusWith<DiskLoc> status = collection()->insertDocument( bigObj(), true );
+ StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigObj(), true );
ASSERT( status.isOK() );
l[ i ] = status.getValue();
ASSERT( !l[ i ].isNull() );
@@ -326,7 +331,7 @@ namespace NamespaceTests {
bob.appendOID( "_id", NULL, true );
bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
BSONObj bigger = bob.done();
- StatusWith<DiskLoc> status = collection()->insertDocument( bigger, false );
+ StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, false );
ASSERT( !status.isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
@@ -547,18 +552,18 @@ namespace NamespaceTests {
class AllocQuantized : public Base {
public:
void run() {
- DurTransaction txn[1];
+ DurTransaction txn;
string myns = (string)ns() + "AllocQuantized";
db()->namespaceIndex().add_ns( myns, DiskLoc(), false );
- SimpleRecordStoreV1 rs( txn,
+ SimpleRecordStoreV1 rs( &txn,
myns,
new NamespaceDetailsRSV1MetaData( db()->namespaceIndex().details( myns ) ),
&db()->getExtentManager(),
false );
BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> result = rs.insertRecord( txn, obj.objdata(), obj.objsize(), 0 );
+ StatusWith<DiskLoc> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), 0 );
ASSERT( result.isOK() );
// The length of the allocated record is quantized.
@@ -571,12 +576,13 @@ namespace NamespaceTests {
class AllocCappedNotQuantized : public Base {
public:
void run() {
+ DurTransaction txn;
create();
ASSERT( nsd()->isCapped() );
ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
StatusWith<DiskLoc> result =
- collection()->insertDocument( docForRecordSize( 300 ), false );
+ collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
ASSERT( result.isOK() );
Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
// Check that no quantization is performed.
@@ -592,18 +598,18 @@ namespace NamespaceTests {
class AllocIndexNamespaceNotQuantized : public Base {
public:
void run() {
- DurTransaction txn[1];
+ DurTransaction txn;
string myns = (string)ns() + "AllocIndexNamespaceNotQuantized";
db()->namespaceIndex().add_ns( myns, DiskLoc(), false );
- SimpleRecordStoreV1 rs( txn,
+ SimpleRecordStoreV1 rs( &txn,
myns + ".$x",
new NamespaceDetailsRSV1MetaData( db()->namespaceIndex().details( myns ) ),
&db()->getExtentManager(),
false );
BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> result = rs.insertRecord(txn, obj.objdata(), obj.objsize(), 0 );
+ StatusWith<DiskLoc> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), 0 );
ASSERT( result.isOK() );
// The length of the allocated record is not quantized.
@@ -616,18 +622,18 @@ namespace NamespaceTests {
class AllocIndexNamespaceSlightlyQuantized : public Base {
public:
void run() {
- DurTransaction txn[1];
+ DurTransaction txn;
string myns = (string)ns() + "AllocIndexNamespaceNotQuantized";
db()->namespaceIndex().add_ns( myns, DiskLoc(), false );
- SimpleRecordStoreV1 rs( txn,
+ SimpleRecordStoreV1 rs( &txn,
myns + ".$x",
new NamespaceDetailsRSV1MetaData( db()->namespaceIndex().details( myns ) ),
&db()->getExtentManager(),
true );
BSONObj obj = docForRecordSize( 298 );
- StatusWith<DiskLoc> result = rs.insertRecord( txn, obj.objdata(), obj.objsize(), 0 );
+ StatusWith<DiskLoc> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), 0 );
ASSERT( result.isOK() );
ASSERT_EQUALS( 300, rs.recordFor( result.getValue() )->lengthWithHeaders() );
@@ -638,10 +644,12 @@ namespace NamespaceTests {
class AllocUseNonQuantizedDeletedRecord : public Base {
public:
void run() {
+ DurTransaction txn;
create();
cookDeletedList( 310 );
- StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(300),
+ StatusWith<DiskLoc> actualLocation = collection()->insertDocument( &txn,
+ docForRecordSize(300),
false );
ASSERT( actualLocation.isOK() );
Record* rec = collection()->getRecordStore()->recordFor( actualLocation.getValue() );
@@ -657,10 +665,12 @@ namespace NamespaceTests {
class AllocExactSizeNonQuantizedDeletedRecord : public Base {
public:
void run() {
+ DurTransaction txn;
create();
cookDeletedList( 300 );
- StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(300),
+ StatusWith<DiskLoc> actualLocation = collection()->insertDocument( &txn,
+ docForRecordSize(300),
false );
ASSERT( actualLocation.isOK() );
Record* rec = collection()->getRecordStore()->recordFor( actualLocation.getValue() );
@@ -678,10 +688,12 @@ namespace NamespaceTests {
class AllocQuantizedWithExtra : public Base {
public:
void run() {
+ DurTransaction txn;
create();
cookDeletedList( 343 );
- StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(300),
+ StatusWith<DiskLoc> actualLocation = collection()->insertDocument( &txn,
+ docForRecordSize(300),
false );
ASSERT( actualLocation.isOK() );
Record* rec = collection()->getRecordStore()->recordFor( actualLocation.getValue() );
@@ -699,13 +711,15 @@ namespace NamespaceTests {
class AllocQuantizedWithoutExtra : public Base {
public:
void run() {
+ DurTransaction txn;
create();
cookDeletedList( 344 );
const RecordStore* rs = collection()->getRecordStore();
// The returned record is quantized from 300 to 320.
- StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(300),
+ StatusWith<DiskLoc> actualLocation = collection()->insertDocument( &txn,
+ docForRecordSize(300),
false );
ASSERT( actualLocation.isOK() );
Record* rec = rs->recordFor( actualLocation.getValue() );
@@ -725,10 +739,12 @@ namespace NamespaceTests {
class AllocNotQuantizedNearDeletedSize : public Base {
public:
void run() {
+ DurTransaction txn;
create();
cookDeletedList( 344 );
- StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(319),
+ StatusWith<DiskLoc> actualLocation = collection()->insertDocument( &txn,
+ docForRecordSize(319),
false );
ASSERT( actualLocation.isOK() );
Record* rec = collection()->getRecordStore()->recordFor( actualLocation.getValue() );
@@ -749,6 +765,7 @@ namespace NamespaceTests {
return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
}
void pass(int p) {
+ DurTransaction txn;
create();
ASSERT_EQUALS( 2, nExtents() );
@@ -761,7 +778,7 @@ namespace NamespaceTests {
//DiskLoc l[ 8 ];
for ( int i = 0; i < N; ++i ) {
BSONObj bb = bigObj();
- StatusWith<DiskLoc> status = collection()->insertDocument( bb, true );
+ StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bb, true );
ASSERT( status.isOK() );
DiskLoc a = status.getValue();
if( T == i )
@@ -790,7 +807,7 @@ namespace NamespaceTests {
ASSERT( first != last ) ;
}
- collection()->temp_cappedTruncateAfter(truncAt, false);
+ collection()->temp_cappedTruncateAfter(&txn, truncAt, false);
ASSERT_EQUALS( collection()->numRecords() , 28u );
{
@@ -816,7 +833,7 @@ namespace NamespaceTests {
bob.appendOID("_id", 0, true);
bob.append( "a", string( MinExtentSize + 300, 'a' ) );
BSONObj bigger = bob.done();
- StatusWith<DiskLoc> status = collection()->insertDocument( bigger, true );
+ StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, true );
ASSERT( !status.isOK() );
ASSERT_EQUALS( 0, nRecords() );
}
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 517ab307f09..c28103e30b0 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/query/internal_runner.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_settings.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/util/fail_point_service.h"
@@ -40,9 +41,10 @@ namespace OplogStartTests {
class Base {
public:
Base() : _context(ns()) {
- Collection* c = _context.db()->getCollection(ns());
+ DurTransaction txn;
+ Collection* c = _context.db()->getCollection(&txn, ns());
if (!c) {
- c = _context.db()->createCollection(ns());
+ c = _context.db()->createCollection(&txn, ns());
}
c->getIndexCatalog()->ensureHaveIdIndex();
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index d4a00be89f7..a3bc2ad237f 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/storage/data_file.h"
#include "mongo/db/storage/extent.h"
#include "mongo/db/storage/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
namespace PdfileTests {
@@ -51,7 +52,7 @@ namespace PdfileTests {
virtual ~Base() {
if ( !collection() )
return;
- _context.db()->dropCollection( ns() );
+ _context.db()->dropCollection( &_txn, ns() );
}
protected:
const char *ns() {
@@ -63,6 +64,7 @@ namespace PdfileTests {
Lock::GlobalWrite lk_;
Client::Context _context;
+ DurTransaction _txn;
};
class InsertNoId : public Base {
@@ -70,15 +72,15 @@ namespace PdfileTests {
void run() {
BSONObj x = BSON( "x" << 1 );
ASSERT( x["_id"].type() == 0 );
- Collection* collection = _context.db()->getOrCreateCollection( ns() );
- StatusWith<DiskLoc> dl = collection->insertDocument( x, true );
+ Collection* collection = _context.db()->getOrCreateCollection( &_txn, ns() );
+ StatusWith<DiskLoc> dl = collection->insertDocument( &_txn, x, true );
ASSERT( !dl.isOK() );
StatusWith<BSONObj> fixed = fixDocumentForInsert( x );
ASSERT( fixed.isOK() );
x = fixed.getValue();
ASSERT( x["_id"].type() == jstOID );
- dl = collection->insertDocument( x, true );
+ dl = collection->insertDocument( &_txn, x, true );
ASSERT( dl.isOK() );
}
};
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 46689927bec..8f59112c0a8 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/pdfile.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/mongoutils/str.h"
@@ -126,10 +127,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -227,10 +229,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -312,10 +315,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -360,10 +364,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
// Generate large keys for {foo: 1, big: 1} index.
@@ -411,10 +416,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
// Generate large keys for {baz: 1, big: 1} index.
@@ -461,10 +467,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -521,10 +528,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
// Generate large keys for {bar: 1, big: 1} index.
@@ -579,10 +587,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -637,10 +646,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 10; ++i) {
@@ -684,10 +694,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -739,10 +750,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
// Insert a bunch of data
@@ -855,10 +867,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
// Insert a bunch of data
@@ -907,10 +920,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
@@ -951,10 +965,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -998,10 +1013,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -1042,10 +1058,11 @@ namespace QueryStageAnd {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
for (int i = 0; i < 50; ++i) {
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index dbb450714da..7bdfca3ba3f 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/storage/extent.h"
#include "mongo/db/storage/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/structure/record_store.h"
#include "mongo/dbtests/dbtests.h"
@@ -56,7 +57,7 @@ namespace QueryStageCollectionScan {
QueryStageCollectionScanCappedBase() : _context(ns()) { }
virtual ~QueryStageCollectionScanCappedBase() {
- _context.db()->dropCollection( ns() );
+ _context.db()->dropCollection( &_txn, ns() );
}
void run() {
@@ -64,7 +65,7 @@ namespace QueryStageCollectionScan {
stringstream spec;
spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
- ASSERT( userCreateNS( db(), ns(), fromjson( spec.str() ), false ).isOK() );
+ ASSERT( userCreateNS( &_txn, db(), ns(), fromjson( spec.str() ), false ).isOK() );
// Tell the test to add data/extents/etc.
insertTestData();
@@ -172,6 +173,7 @@ namespace QueryStageCollectionScan {
Lock::GlobalWrite lk_;
Client::Context _context;
+ DurTransaction _txn;
};
class QueryStageCollscanEmpty : public QueryStageCollectionScanCappedBase {
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 568f28a301e..4eea0e115ad 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/pdfile.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/fail_point.h"
@@ -90,10 +91,11 @@ namespace QueryStageFetch {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
@@ -161,10 +163,11 @@ namespace QueryStageFetch {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
@@ -226,10 +229,11 @@ namespace QueryStageFetch {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
@@ -298,10 +302,11 @@ namespace QueryStageFetch {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
@@ -362,10 +367,11 @@ namespace QueryStageFetch {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 46a990146f1..43c9fc941ac 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/pdfile.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/fail_point.h"
@@ -104,10 +105,11 @@ namespace QueryStageKeep {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 97bee896f03..0a58310d0ca 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
@@ -106,10 +107,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
const int N = 50;
@@ -169,10 +171,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
const int N = 50;
@@ -231,10 +234,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
const int N = 50;
@@ -294,10 +298,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
const int N = 50;
@@ -358,10 +363,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
const int N = 50;
@@ -420,10 +426,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet* ws = new WorkingSet();
@@ -472,10 +479,11 @@ namespace QueryStageMergeSortTests {
public:
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet ws;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index d8e5d2fc340..c8499a28976 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
@@ -180,10 +181,11 @@ namespace QueryStageSortTests {
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
fillData();
@@ -198,10 +200,11 @@ namespace QueryStageSortTests {
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
fillData();
@@ -225,10 +228,11 @@ namespace QueryStageSortTests {
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
fillData();
@@ -243,10 +247,11 @@ namespace QueryStageSortTests {
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
fillData();
@@ -332,10 +337,11 @@ namespace QueryStageSortTests {
void run() {
Client::WriteContext ctx(ns());
+ DurTransaction txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
WorkingSet* ws = new WorkingSet();
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index c8634a6c6b1..96f83c61913 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/query/new_find.h"
#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/timer.h"
@@ -56,19 +57,20 @@ namespace QueryTests {
Client::Context _context;
Database* _database;
Collection* _collection;
+ DurTransaction _txn;
public:
Base() : _context( ns() ) {
_database = _context.db();
_collection = _database->getCollection( ns() );
if ( _collection ) {
- _database->dropCollection( ns() );
+ _database->dropCollection( &_txn, ns() );
}
- _collection = _database->createCollection( ns() );
+ _collection = _database->createCollection( &_txn, ns() );
addIndex( fromjson( "{\"a\":1}" ) );
}
~Base() {
try {
- uassertStatusOK( _database->dropCollection( ns() ) );
+ uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
}
catch ( ... ) {
FAIL( "Exception while cleaning up collection" );
@@ -97,10 +99,10 @@ namespace QueryTests {
oid.init();
b.appendOID( "_id", &oid );
b.appendElements( o );
- _collection->insertDocument( b.obj(), false );
+ _collection->insertDocument( &_txn, b.obj(), false );
}
else {
- _collection->insertDocument( o, false );
+ _collection->insertDocument( &_txn, o, false );
}
}
};
@@ -154,13 +156,14 @@ namespace QueryTests {
// an empty object (one might be allowed inside a reserved namespace at some point).
Lock::GlobalWrite lk;
Client::Context ctx( "unittests.querytests" );
+ DurTransaction txn;
Database* db = ctx.db();
if ( db->getCollection( ns() ) ) {
_collection = NULL;
- db->dropCollection( ns() );
+ db->dropCollection( &txn, ns() );
}
- _collection = db->createCollection( ns(), CollectionOptions(), true, false );
+ _collection = db->createCollection( &txn, ns(), CollectionOptions(), true, false );
ASSERT( _collection );
DBDirectClient cl;
@@ -1169,9 +1172,10 @@ namespace QueryTests {
string err;
Client::WriteContext ctx( "unittests" );
+ DurTransaction txn;
// note that extents are always at least 4KB now - so this will get rounded up a bit.
- ASSERT( userCreateNS( ctx.ctx().db(), ns(),
+ ASSERT( userCreateNS( &txn, ctx.ctx().db(), ns(),
fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() );
for ( int i=0; i<200; i++ ) {
insertNext();
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
index 8b75aa21b41..95818b0c383 100644
--- a/src/mongo/dbtests/queryutiltests.cpp
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/json.h"
#include "mongo/db/pdfile.h"
#include "mongo/db/queryutil.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/dbtests/dbtests.h"
@@ -1585,14 +1586,15 @@ namespace QueryUtilTests {
class IndexBase {
Lock::DBWrite _lk;
Client::Context _ctx;
+ DurTransaction _txn;
public:
IndexBase() : _lk(ns()), _ctx( ns() ) , indexNum_( 0 ) {
- userCreateNS( _ctx.db(), ns(), BSONObj(), false );
+ userCreateNS( &_txn, _ctx.db(), ns(), BSONObj(), false );
}
~IndexBase() {
if ( !nsd() )
return;
- _ctx.db()->dropCollection( ns() );
+ _ctx.db()->dropCollection( &_txn, ns() );
}
protected:
static const char *ns() { return "unittests.FieldRangeSetPairTests"; }
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index 04250dd8264..8e3de83391c 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_settings.h" // replSettings
#include "mongo/db/repl/rs.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/time_support.h"
@@ -148,14 +149,15 @@ namespace ReplSetTests {
static void insert( const BSONObj &o, bool god = false ) {
Lock::DBWrite lk(ns());
Client::Context ctx(ns());
+ DurTransaction txn;
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(ns());
+ coll = db->createCollection(&txn, ns());
}
if (o.hasField("_id")) {
- coll->insertDocument(o, true);
+ coll->insertDocument(&txn, o, true);
return;
}
@@ -164,7 +166,7 @@ namespace ReplSetTests {
id.init();
b.appendOID("_id", &id);
b.appendElements(o);
- coll->insertDocument(b.obj(), true);
+ coll->insertDocument(&txn, b.obj(), true);
}
BSONObj findOne( const BSONObj &query = BSONObj() ) const {
@@ -173,13 +175,15 @@ namespace ReplSetTests {
void drop() {
Client::WriteContext c(ns());
+ DurTransaction txn;
+
Database* db = c.ctx().db();
if ( db->getCollection( ns() ) == NULL ) {
return;
}
- db->dropCollection(ns());
+ db->dropCollection(&txn, ns());
}
static void setup() {
replSettings.replSet = "foo";
@@ -316,14 +320,16 @@ namespace ReplSetTests {
void create() {
Client::Context c(_cappedNs);
- ASSERT( userCreateNS( c.db(), _cappedNs, fromjson( spec() ), false ).isOK() );
+ DurTransaction txn;
+ ASSERT( userCreateNS( &txn, c.db(), _cappedNs, fromjson( spec() ), false ).isOK() );
}
void dropCapped() {
Client::Context c(_cappedNs);
+ DurTransaction txn;
Database* db = c.db();
- if ( db->getCollection( _cappedNs ) ) {
- db->dropCollection( _cappedNs );
+ if ( db->getCollection( &txn, _cappedNs ) ) {
+ db->dropCollection( &txn, _cappedNs );
}
}
@@ -389,14 +395,15 @@ namespace ReplSetTests {
void insert() {
Client::Context ctx(cappedNs());
+ DurTransaction txn;
Database* db = ctx.db();
- Collection* coll = db->getCollection(cappedNs());
+ Collection* coll = db->getCollection(&txn, cappedNs());
if (!coll) {
- coll = db->createCollection(cappedNs());
+ coll = db->createCollection(&txn, cappedNs());
}
BSONObj o = BSON(GENOID << "x" << 456);
- DiskLoc loc = coll->insertDocument(o, true).getValue();
+ DiskLoc loc = coll->insertDocument(&txn, o, true).getValue();
verify(!loc.isNull());
}
public:
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index b5ce577fdc5..c7a51d65238 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/repl/rs.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/dbtests/dbtests.h"
@@ -55,6 +56,7 @@ namespace ReplTests {
class Base {
Lock::GlobalWrite lk;
Client::Context _context;
+ mutable DurTransaction _txn;
public:
Base() : _context( ns() ) {
oldRepl();
@@ -65,7 +67,7 @@ namespace ReplTests {
Collection* c = _context.db()->getCollection( ns() );
if ( ! c ) {
- c = _context.db()->createCollection( ns() );
+ c = _context.db()->createCollection( &_txn, ns() );
}
c->getIndexCatalog()->ensureHaveIdIndex();
}
@@ -114,9 +116,9 @@ namespace ReplTests {
Lock::GlobalWrite lk;
Client::Context ctx( ns() );
Database* db = ctx.db();
- Collection* coll = db->getCollection( ns() );
+ Collection* coll = db->getCollection( &_txn, ns() );
if ( !coll ) {
- coll = db->createCollection( ns() );
+ coll = db->createCollection( &_txn, ns() );
}
int count = 0;
@@ -131,10 +133,11 @@ namespace ReplTests {
static int opCount() {
Lock::GlobalWrite lk;
Client::Context ctx( cllNS() );
+ DurTransaction txn;
Database* db = ctx.db();
Collection* coll = db->getCollection( cllNS() );
if ( !coll ) {
- coll = db->createCollection( cllNS() );
+ coll = db->createCollection( &txn, cllNS() );
}
int count = 0;
@@ -148,6 +151,7 @@ namespace ReplTests {
}
static void applyAllOperations() {
Lock::GlobalWrite lk;
+ DurTransaction txn;
vector< BSONObj > ops;
{
Client::Context ctx( cllNS() );
@@ -179,10 +183,11 @@ namespace ReplTests {
static void printAll( const char *ns ) {
Lock::GlobalWrite lk;
Client::Context ctx( ns );
+ DurTransaction txn;
Database* db = ctx.db();
Collection* coll = db->getCollection( ns );
if ( !coll ) {
- coll = db->createCollection( ns );
+ coll = db->createCollection( &txn, ns );
}
RecordIterator* it = coll->getIterator( DiskLoc(), false,
@@ -198,10 +203,11 @@ namespace ReplTests {
static void deleteAll( const char *ns ) {
Lock::GlobalWrite lk;
Client::Context ctx( ns );
+ DurTransaction txn;
Database* db = ctx.db();
Collection* coll = db->getCollection( ns );
if ( !coll ) {
- coll = db->createCollection( ns );
+ coll = db->createCollection( &txn, ns );
}
vector< DiskLoc > toDelete;
@@ -212,20 +218,21 @@ namespace ReplTests {
}
delete it;
for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
- coll->deleteDocument( *i, true );
+ coll->deleteDocument( &txn, *i, true );
}
}
static void insert( const BSONObj &o ) {
Lock::GlobalWrite lk;
Client::Context ctx( ns() );
+ DurTransaction txn;
Database* db = ctx.db();
Collection* coll = db->getCollection( ns() );
if ( !coll ) {
- coll = db->createCollection( ns() );
+ coll = db->createCollection( &txn, ns() );
}
if ( o.hasField( "_id" ) ) {
- coll->insertDocument( o, true );
+ coll->insertDocument( &txn, o, true );
return;
}
@@ -234,7 +241,7 @@ namespace ReplTests {
id.init();
b.appendOID( "_id", &id );
b.appendElements( o );
- coll->insertDocument( b.obj(), true );
+ coll->insertDocument( &txn, b.obj(), true );
}
static BSONObj wid( const char *json ) {
class BSONObjBuilder b;
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 8f30bd0a287..2b2a4721123 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -66,6 +66,7 @@
#include "mongo/db/repl/rs.h"
#include "mongo/db/repl/rs_config.h"
#include "mongo/db/repl/write_concern.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/logger/ramlog.h"
#include "mongo/s/chunk.h"
#include "mongo/s/chunk_version.h"
@@ -1644,6 +1645,7 @@ namespace mongo {
{
// 0. copy system.namespaces entry if collection doesn't already exist
Client::WriteContext ctx( ns );
+ DurTransaction txn;
// Only copy if ns doesn't already exist
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( ns );
@@ -1652,14 +1654,14 @@ namespace mongo {
string system_namespaces = nsToDatabase(ns) + ".system.namespaces";
BSONObj entry = conn->findOne( system_namespaces, BSON( "name" << ns ) );
if ( entry["options"].isABSONObj() ) {
- Status status = userCreateNS( db, ns, entry["options"].Obj(), true, 0 );
+ Status status = userCreateNS( &txn, db, ns, entry["options"].Obj(), true, 0 );
if ( !status.isOK() ) {
warning() << "failed to create collection [" << ns << "] "
<< " with options: " << status;
}
}
else {
- db->createCollection( ns );
+ db->createCollection( &txn, ns );
}
}
}