summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/database.cpp2
-rw-r--r--src/mongo/db/catalog/index_create.cpp6
-rw-r--r--src/mongo/db/client.cpp6
-rw-r--r--src/mongo/db/cloner.cpp16
-rw-r--r--src/mongo/db/commands/apply_ops.cpp2
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp4
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp2
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp6
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp6
-rw-r--r--src/mongo/db/commands/rename_collection.cpp10
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp6
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/update.cpp4
-rw-r--r--src/mongo/db/index/btree_based_bulk_access_method.cpp4
-rw-r--r--src/mongo/db/index_builder.cpp4
-rw-r--r--src/mongo/db/index_rebuilder.cpp4
-rw-r--r--src/mongo/db/instance.cpp6
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/operation_context.h15
-rw-r--r--src/mongo/db/repl/oplog.cpp8
-rw-r--r--src/mongo/db/repl/repl_set_impl.cpp6
-rw-r--r--src/mongo/db/repl/resync.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/sync.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp6
-rw-r--r--src/mongo/db/storage/recovery_unit.h11
-rw-r--r--src/mongo/db/storage/rocks/rocks_engine_test.cpp6
-rw-r--r--src/mongo/db/storage/rocks/rocks_record_store_test.cpp36
-rw-r--r--src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp58
-rw-r--r--src/mongo/dbtests/basictests.cpp2
-rw-r--r--src/mongo/dbtests/counttests.cpp2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp6
-rw-r--r--src/mongo/dbtests/namespacetests.cpp8
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp2
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp10
-rw-r--r--src/mongo/dbtests/repltests.cpp12
-rw-r--r--src/mongo/s/d_migrate.cpp2
50 files changed, 166 insertions, 162 deletions
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index bd8527b5e62..b1d0594ee2e 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -162,7 +162,7 @@ namespace mongo {
return StatusWith<CompactStats>( status );
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 3418a5d3592..136f42631d6 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -541,7 +541,7 @@ namespace mongo {
for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) {
if( *i != "local" ) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, *i);
dropDatabase(txn, ctx.db());
wunit.commit();
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index de75707458f..86bed4182fe 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -80,7 +80,7 @@ namespace mongo {
return;
try {
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
// This cleans up all index builds. Because that may need to write, it is done inside
// of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
for (size_t i = 0; i < _indexes.size(); i++) {
@@ -111,7 +111,7 @@ namespace mongo {
}
Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
const string& ns = _collection->ns().ns();
Status status = _collection->getIndexCatalog()->checkUnfinished();
@@ -209,7 +209,7 @@ namespace mongo {
while (PlanExecutor::ADVANCED == exec->getNext(&objToIndex, &loc)) {
{
bool shouldCommitWUnit = true;
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
Status ret = insert(objToIndex, loc);
if (!ret.isOK()) {
if (dupsOut && ret.code() == ErrorCodes::DuplicateKey) {
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index d626766de21..f1a503debbc 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -210,7 +210,7 @@ namespace mongo {
<< "_DEBUG ReadContext db wasn't open, will try to open " << ns << endl;
if (txn->lockState()->isW()) {
// write locked already
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
DEV RARELY log(LogComponent::kStorage)
<< "write locked on ReadContext construction " << ns << endl;
_c.reset(new Context(txn, ns, doVersion));
@@ -220,7 +220,7 @@ namespace mongo {
_lk.reset(0);
{
Lock::GlobalWrite w(txn->lockState());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Context c(txn, ns, doVersion);
wunit.commit();
}
@@ -242,7 +242,7 @@ namespace mongo {
Client::WriteContext::WriteContext(
OperationContext* opCtx, const std::string& ns, bool doVersion)
: _lk(opCtx->lockState(), ns),
- _wunit(opCtx->recoveryUnit()),
+ _wunit(opCtx),
_c(opCtx, ns, doVersion) {
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index d4cd38a2c8e..5c4fe93d63a 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -124,7 +124,7 @@ namespace mongo {
<< "collection dropped during clone ["
<< to_collection.ns() << "]",
!createdCollection );
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
createdCollection = true;
collection = db->createCollection( txn, to_collection.ns() );
verify( collection );
@@ -153,7 +153,7 @@ namespace mongo {
}
++numSeen;
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
BSONObj js = tmp;
@@ -255,7 +255,7 @@ namespace mongo {
Collection* collection = db->getCollection( txn, to_collection );
if ( !collection ) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
collection = db->createCollection( txn, to_collection.ns() );
invariant(collection);
wunit.commit();
@@ -277,7 +277,7 @@ namespace mongo {
uassertStatusOK(indexer.init(indexesToBuild));
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
if (logForRepl) {
for (vector<BSONObj>::const_iterator it = indexesToBuild.begin();
@@ -309,7 +309,7 @@ namespace mongo {
string temp = dbName + ".system.namespaces";
BSONObj config = _conn->findOne(temp , BSON("name" << ns));
if (config["options"].isABSONObj()) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Status status = userCreateNS(txn, db, ns, config["options"].Obj(), logForRepl, 0);
if ( !status.isOK() ) {
errmsg = status.toString();
@@ -477,7 +477,7 @@ namespace mongo {
Database* db;
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
// Copy releases the lock, so we need to re-load the database. This should
// probably throw if the database has changed in between, but for now preserve
// the existing behaviour.
@@ -533,7 +533,7 @@ namespace mongo {
uassertStatusOK(indexer.insertAllDocumentsInCollection(&dups));
for (set<DiskLoc>::const_iterator it = dups.begin(); it != dups.end(); ++it) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
BSONObj id;
c->deleteDocument(txn, *it, true, true, opts.logForRepl ? &id : NULL);
@@ -546,7 +546,7 @@ namespace mongo {
log() << "index build dropped: " << dups.size() << " dups";
}
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
if (opts.logForRepl) {
repl::logOp(txn,
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index bd69352c350..6de22bd8d99 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -84,7 +84,7 @@ namespace mongo {
// SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
// ns used so locking individually requires more analysis
Lock::GlobalWrite globalWriteLock(txn->lockState());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
DBDirectClient db(txn);
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index cefe28ba570..ca5e1d79127 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -118,7 +118,7 @@ namespace mongo {
Lock::DBWrite dbXLock(txn->lockState(), dbname);
// SERVER-14085: This unit of work should go away and be put in the individual ops
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Cloner cloner;
bool rval = cloner.go(txn, dbname, from, opts, &clonedColls, errmsg);
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 7fb73bead4c..1e014162e20 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -159,7 +159,7 @@ namespace mongo {
}
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true );
@@ -216,7 +216,7 @@ namespace mongo {
// calls renamecollection which does a global lock, so we must too:
//
Lock::GlobalWrite globalWriteLock(txn->lockState());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 0ec15bcc8c3..b0d99c9a9b8 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -146,7 +146,7 @@ namespace mongo {
Lock::DBWrite lk(txn->lockState(), ns.ns());
// SERVER-14085: The following will have to go as we push down WOUW
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 597cbf62d93..a994e9cd6c8 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -188,7 +188,7 @@ namespace mongo {
scoped_ptr<Lock::ScopedLock> lk( fromSelf ?
static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite(txn->lockState())) :
static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb)));
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
if (!cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg )) {
return false;
}
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index 3c96323fad9..748d8e08a76 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -155,7 +155,7 @@ namespace mongo {
BSONObjBuilder &result,
bool fromRepl ) {
Lock::DBWrite dbXLock(txn->lockState(), db);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, db);
::ProfilerStop();
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 2b4e5c56c06..3b9af7bc48d 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -141,7 +141,7 @@ namespace mongo {
Collection* collection = db->getCollection( txn, ns.ns() );
result.appendBool( "createdCollectionAutomatically", collection == NULL );
if ( !collection ) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
collection = db->createCollection( txn, ns.ns() );
invariant( collection );
wunit.commit();
@@ -181,7 +181,7 @@ namespace mongo {
uassertStatusOK(indexer.insertAllDocumentsInCollection());
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 57d13401372..51484b2f39a 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -104,7 +104,7 @@ namespace mongo {
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) {
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder);
if (!ok) {
return false;
@@ -274,7 +274,7 @@ namespace mongo {
result.appendNumber( "nIndexesWas", all.size() );
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
if ( !s.isOK() ) {
errmsg = "dropIndexes failed";
@@ -296,7 +296,7 @@ namespace mongo {
return appendCommandStatus( result, status );
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 9d84859b5e3..3af81204952 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -137,7 +137,7 @@ namespace mongo {
string& errmsg) {
Lock::DBWrite lk(txn->lockState(), ns);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context cx(txn, ns);
Collection* collection = cx.db()->getCollection( txn, ns );
@@ -335,7 +335,7 @@ namespace mongo {
}
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
BSONObj out = db.findOne(ns, q, fields);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index acc5ab3f94c..342c79da15f 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -585,7 +585,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
while ( cursor->more() ) {
Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace);
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
BSONObj o = cursor->nextSafe();
Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
_txn->recoveryUnit()->commitIfNeeded();
@@ -605,7 +605,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
while ( cursor->more() ) {
Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
BSONObj temp = cursor->nextSafe();
BSONObj old;
@@ -1106,7 +1106,7 @@ namespace mongo {
return;
Lock::DBWrite kl(_txn->lockState(), _config.incLong);
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
BSONList& all = i->second;
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index fb981a83dff..872f115377c 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -95,7 +95,7 @@ namespace mongo {
}
static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
if (db->dropCollection(txn, collName).isOK()) {
// ignoring failure case
wunit.commit();
@@ -181,7 +181,7 @@ namespace mongo {
Database* const targetDB = dbHolder().getOrCreate(txn, nsToDatabase(target), unused);
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
// Check if the target namespace exists and if dropTarget is true.
// If target exists and dropTarget is not true, return false.
@@ -235,7 +235,7 @@ namespace mongo {
options.cappedSize = sourceColl->getRecordStore()->storageSize(txn);
}
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
targetColl = targetDB->createCollection(txn, target, options);
@@ -277,7 +277,7 @@ namespace mongo {
const BSONObj obj = sourceColl->docFor(sourceIt->getNext());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
// No logOp necessary because the entire renameCollection command is one logOp.
Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
if (!status.isOK())
@@ -292,7 +292,7 @@ namespace mongo {
{
// Getting here means we successfully built the target copy. We now remove the
// source collection and finalize the rename.
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Status status = sourceDB->dropCollection(txn, source);
if (!status.isOK())
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 7583929a3eb..e3ca97d5545 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -65,7 +65,7 @@ namespace mongo {
BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
Lock::DBWrite lk(txn->lockState(), ns);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
Database* db = ctx.db();
Collection* collection = db->getCollection( txn, ns );
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index ce0eea74e78..e73e06efcb6 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -932,7 +932,7 @@ namespace mongo {
dassert(database);
_collection = database->getCollection(txn, request->getTargetingNS());
if (!_collection) {
- WriteUnitOfWork wunit (txn->recoveryUnit());
+ WriteUnitOfWork wunit (txn);
// Implicitly create if it doesn't exist
_collection = database->createCollection(txn, request->getTargetingNS());
if (!_collection) {
@@ -1039,7 +1039,7 @@ namespace mongo {
txn->lockState()->assertWriteLocked( insertNS );
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
StatusWith<DiskLoc> status = collection->insertDocument( txn, docToInsert, true );
if ( !status.isOK() ) {
@@ -1087,7 +1087,7 @@ namespace mongo {
return;
}
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
repl::logOp( txn, "i", indexNS.c_str(), indexDesc );
result->getStats().n = 1;
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 0bfd4b06997..a801bb63f69 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -332,7 +332,7 @@ namespace mongo {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
vector< string > dbNames;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 4949d79bdff..8b8301053fd 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -187,7 +187,7 @@ namespace mongo {
// and that may need a global lock.
Lock::GlobalWrite lk(txn->lockState());
Client::Context context(txn, dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
log() << "dropDatabase " << dbname << " starting" << endl;
@@ -338,7 +338,7 @@ namespace mongo {
// in the local database.
//
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
BSONElement e = cmdObj.firstElement();
@@ -445,7 +445,7 @@ namespace mongo {
}
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, nsToDrop);
Database* db = ctx.db();
@@ -545,7 +545,7 @@ namespace mongo {
options.hasField("$nExtents"));
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
// Create collection.
@@ -972,7 +972,7 @@ namespace mongo {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
Lock::DBWrite dbXLock(txn->lockState(), dbname);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
Collection* coll = ctx.db()->getCollection( txn, ns );
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index aa9fbb07b31..baccd4ab7df 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -87,7 +87,7 @@ namespace mongo {
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index fda4b8ba779..19de5a7a742 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -91,7 +91,7 @@ namespace mongo {
BSONObj deletedDoc;
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
// TODO: Do we want to buffer docs and delete them in a group rather than
// saving/restoring state repeatedly?
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 5f12324fffe..637ad9ce010 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -488,7 +488,7 @@ namespace mongo {
}
}
- WriteUnitOfWork wunit(request->getOpCtx()->recoveryUnit());
+ WriteUnitOfWork wunit(request->getOpCtx());
// Save state before making changes
saveState();
@@ -637,7 +637,7 @@ namespace mongo {
str::stream() << "Document to upsert is larger than " << BSONObjMaxUserSize,
newObj.objsize() <= BSONObjMaxUserSize);
- WriteUnitOfWork wunit(request->getOpCtx()->recoveryUnit());
+ WriteUnitOfWork wunit(request->getOpCtx());
// Only create the collection if the doc will be inserted.
if (!_collection) {
_collection = _db->getCollection(request->getOpCtx(),
diff --git a/src/mongo/db/index/btree_based_bulk_access_method.cpp b/src/mongo/db/index/btree_based_bulk_access_method.cpp
index 03372363011..1a31b744012 100644
--- a/src/mongo/db/index/btree_based_bulk_access_method.cpp
+++ b/src/mongo/db/index/btree_based_bulk_access_method.cpp
@@ -130,7 +130,7 @@ namespace mongo {
scoped_ptr<SortedDataBuilderInterface> builder;
{
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
if (_isMultiKey) {
_real->_btreeState->setMultikey( _txn );
@@ -144,7 +144,7 @@ namespace mongo {
if (mayInterrupt)
_txn->checkForInterrupt(/*heedMutex*/ false);
- WriteUnitOfWork wunit(_txn->recoveryUnit());
+ WriteUnitOfWork wunit(_txn);
// Get the next datum and add it to the builder.
BSONObjExternalSorter::Data d = i->next();
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index d2ba234e5ee..219a10a25b8 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -94,7 +94,7 @@ namespace mongo {
Collection* c = db->getCollection( txn, ns );
if ( !c ) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
c = db->getOrCreateCollection( txn, ns );
verify(c);
wunit.commit();
@@ -119,7 +119,7 @@ namespace mongo {
status = indexer.insertAllDocumentsInCollection();
if (status.isOK()) {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 0445bdc50c6..39d9544a30c 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -83,7 +83,7 @@ namespace {
MultiIndexBlock indexer(txn, collection);
{
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
// The indexes have now been removed from system.indexes, so the only record is
@@ -120,7 +120,7 @@ namespace {
try {
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index bb77e2af469..ef37863a96b 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -795,7 +795,7 @@ namespace mongo {
Collection* collection = ctx.db()->getCollection( txn, targetNS );
if ( !collection ) {
// implicitly create
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
collection = ctx.db()->createCollection( txn, targetNS );
verify( collection );
wunit.commit();
@@ -820,7 +820,7 @@ namespace mongo {
uassertStatusOK(status);
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
repl::logOp(txn, "i", ns, js);
wunit.commit();
@@ -833,7 +833,7 @@ namespace mongo {
if ( !fixed.getValue().isEmpty() )
js = fixed.getValue();
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Collection* collection = ctx.db()->getCollection( txn, ns );
if ( !collection ) {
collection = ctx.db()->createCollection( txn, ns );
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 4e6cf821604..c2f11b34e6d 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -140,7 +140,7 @@ namespace {
Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) {
// We are ok with the profiling happening in a different WUOW from the actual op.
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context cx(txn, currentOp.getNS(), false);
_profile(txn, c, cx.db(), currentOp, profileBufBuilder);
wunit.commit();
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 8ca67393a7d..c452f3d777b 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -127,4 +127,19 @@ namespace mongo {
OperationContext() { }
};
+ class WriteUnitOfWork {
+ MONGO_DISALLOW_COPYING(WriteUnitOfWork);
+ public:
+ WriteUnitOfWork(OperationContext* txn)
+ : _txn(txn) {
+ _txn->recoveryUnit()->beginUnitOfWork();
+ }
+
+ ~WriteUnitOfWork(){ _txn->recoveryUnit()->endUnitOfWork(); }
+
+ void commit() { _txn->recoveryUnit()->commitUnitOfWork(); }
+
+ OperationContext* const _txn;
+ };
+
} // namespace mongo
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 5a288d6da18..8baef920496 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -123,7 +123,7 @@ namespace repl {
Lock::DBWrite lk(txn->lockState(), "local");
// XXX soon this needs to be part of an outer WUOW not its own.
// We can't do this yet due to locking limitations.
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
const OpTime ts = op["ts"]._opTime();
long long h = op["h"].numberLong();
@@ -244,7 +244,7 @@ namespace repl {
bool *bb,
bool fromMigrate ) {
Lock::DBWrite lk1(txn->lockState(), "local");
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
if ( strncmp(ns, "local.", 6) == 0 ) {
if ( strncmp(ns, "local.slaves", 12) == 0 )
@@ -336,7 +336,7 @@ namespace repl {
bool *bb,
bool fromMigrate ) {
Lock::DBWrite lk(txn->lockState(), "local");
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
if ( strncmp(ns, "local.", 6) == 0 ) {
@@ -520,7 +520,7 @@ namespace repl {
options.cappedSize = sz;
options.autoIndexId = CollectionOptions::NO;
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
invariant(ctx.db()->createCollection(txn, ns, options));
if( !rs )
logOp(txn, "n", "", BSONObj() );
diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp
index afdbf06b622..d07cea3a15b 100644
--- a/src/mongo/db/repl/repl_set_impl.cpp
+++ b/src/mongo/db/repl/repl_set_impl.cpp
@@ -871,14 +871,14 @@ namespace {
void ReplSetImpl::clearInitialSyncFlag(OperationContext* txn) {
Lock::DBWrite lk(txn->lockState(), "local");
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, "local.replset.minvalid", BSON("$unset" << _initialSyncFlag));
wunit.commit();
}
void ReplSetImpl::setInitialSyncFlag(OperationContext* txn) {
Lock::DBWrite lk(txn->lockState(), "local");
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, "local.replset.minvalid", BSON("$set" << _initialSyncFlag));
wunit.commit();
}
@@ -900,7 +900,7 @@ namespace {
subobj.done();
Lock::DBWrite lk(txn->lockState(), "local");
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, "local.replset.minvalid", builder.obj());
wunit.commit();
}
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index b020b00e92f..35f70fde320 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -68,7 +68,7 @@ namespace repl {
const std::string ns = parseNs(dbname, cmdObj);
Lock::GlobalWrite globalWriteLock(txn->lockState());
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
if (!theReplSet) {
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index bbdb3585ec1..73539b5cc8d 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -121,7 +121,7 @@ namespace repl {
// Make database stable
Lock::DBWrite dbWrite(txn->lockState(), db);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
if (!cloner.go(txn, db, master, options, NULL, err, &errCode)) {
sethbmsg(str::stream() << "initial sync: error while "
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index 9999774d1d7..7444b4f4526 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -129,7 +129,7 @@ namespace repl {
return false;
}
else {
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
Collection* collection = ctx.db()->getOrCreateCollection(txn, ns);
invariant(collection != NULL); // should never happen
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index cfbbefe16a7..c8250b35545 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -111,7 +111,7 @@ namespace repl {
}
Client::Context ctx(txn, ns);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
ctx.getClient()->curop()->reset();
// For non-initial-sync, we convert updates to upserts
// to suppress errors when replaying oplog entries.
@@ -488,7 +488,7 @@ namespace repl {
{
OperationContextImpl txn; // XXX?
Lock::DBWrite lk(txn.lockState(), "local");
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
while (!ops->empty()) {
const BSONObj& op = ops->front();
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index d1e9364876f..8b548edac4a 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -373,7 +373,7 @@ namespace mongo {
Collection* tempCollection = NULL;
{
Client::Context tempContext(txn, ns, tempDatabase );
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
tempCollection = tempDatabase->createCollection(txn, ns, options, true, false);
wunit.commit();
}
@@ -412,7 +412,7 @@ namespace mongo {
Client::Context tempContext(txn, ns, tempDatabase);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
StatusWith<DiskLoc> result = tempCollection->insertDocument(txn,
doc,
&indexer,
@@ -430,7 +430,7 @@ namespace mongo {
{
Client::Context tempContext(txn, ns, tempDatabase);
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
wunit.commit();
}
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index b80d134bec0..5279cf26b96 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -166,15 +166,4 @@ namespace mongo {
RecoveryUnit() { }
};
- class WriteUnitOfWork {
- MONGO_DISALLOW_COPYING(WriteUnitOfWork);
- public:
- WriteUnitOfWork(RecoveryUnit* ru) : _ru(ru) { _ru->beginUnitOfWork(); }
- ~WriteUnitOfWork(){ _ru->endUnitOfWork(); }
-
- void commit() { _ru->commitUnitOfWork(); }
-
- RecoveryUnit* const _ru;
- };
-
} // namespace mongo
diff --git a/src/mongo/db/storage/rocks/rocks_engine_test.cpp b/src/mongo/db/storage/rocks/rocks_engine_test.cpp
index 624e2b3a1f6..92f1a8f0eae 100644
--- a/src/mongo/db/storage/rocks/rocks_engine_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_engine_test.cpp
@@ -88,7 +88,7 @@ namespace mongo {
MyOperationContext opCtx( &engine );
DiskLoc loc;
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
@@ -252,7 +252,7 @@ namespace mongo {
{
MyOperationContext opCtx( &engine );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status status = engine.createCollection( &opCtx,
"test.foo",
CollectionOptions() );
@@ -266,7 +266,7 @@ namespace mongo {
MyOperationContext opCtx( &engine );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
diff --git a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
index d40c38b2d03..77c9757ac93 100644
--- a/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_record_store_test.cpp
@@ -162,7 +162,7 @@ namespace mongo {
MyOperationContext opCtx( db.get() );
DiskLoc loc;
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
@@ -194,7 +194,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord(&opCtx,
s.c_str(),
s.size() + 1,
@@ -212,7 +212,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
rs.deleteRecord( &opCtx, loc );
ASSERT_EQUALS( 0, rs.numRecords() );
@@ -236,7 +236,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
s1.c_str(),
s1.size() + 1,
@@ -251,7 +251,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.updateRecord( &opCtx,
loc,
s2.c_str(),
@@ -283,7 +283,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
s1.c_str(),
s1.size() + 1,
@@ -298,7 +298,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
const char* damageSource = "222";
mutablebson::DamageVector dv;
dv.push_back( mutablebson::DamageEvent() );
@@ -346,7 +346,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> result = rs1.insertRecord( &opCtx, "a", 2, -1 );
ASSERT_OK( result.getStatus() );
@@ -379,7 +379,7 @@ namespace mongo {
MyOperationContext opCtx( db.get() );
DiskLoc loc;
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
@@ -412,7 +412,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, origStr.c_str(),
origStr.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
@@ -435,7 +435,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.updateRecord( &opCtx, loc, newStr.c_str(),
newStr.size() + 1, -1, NULL );
ASSERT_OK( res.getStatus() );
@@ -457,7 +457,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
rs.deleteRecord( &opCtx, loc );
}
}
@@ -493,7 +493,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s1.c_str(), s1.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -553,7 +553,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s1.c_str(), s1.size() +1, -1 );
ASSERT_OK( res.getStatus() );
loc1 = res.getValue();
@@ -606,7 +606,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
@@ -615,7 +615,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status stat = rs.truncate( &opCtx );
ASSERT_OK( stat );
@@ -626,7 +626,7 @@ namespace mongo {
// Test that truncate does not fail on an empty collection
{
MyOperationContext opCtx( db.get() );
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status stat = rs.truncate( &opCtx );
ASSERT_OK( stat );
@@ -652,7 +652,7 @@ namespace mongo {
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
ASSERT_OK( res.getStatus() );
loc = res.getValue();
diff --git a/src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp b/src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp
index a3ab8e1f8f4..03b21c26563 100644
--- a/src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp
+++ b/src/mongo/db/storage/rocks/rocks_sorted_data_impl_test.cpp
@@ -90,7 +90,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( !sortedData.unindex( &opCtx, key, loc ) );
uow.commit();
}
@@ -99,7 +99,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
@@ -109,7 +109,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, key, loc ) );
uow.commit();
}
@@ -118,7 +118,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
sortedData.unindex( &opCtx, key, loc );
uow.commit();
}
@@ -147,7 +147,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
@@ -174,7 +174,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -224,7 +224,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
@@ -253,7 +253,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
uow.commit();
@@ -268,7 +268,7 @@ namespace mongo {
// insert some more stuff
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
@@ -297,7 +297,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -351,7 +351,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -395,7 +395,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -418,7 +418,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK(
sortedData.insert( &opCtx, BSON( "" << 4 ), DiskLoc(1,4), true ) );
uow.commit();
@@ -447,7 +447,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -470,7 +470,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
@@ -495,7 +495,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -518,7 +518,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 3 ), DiskLoc(1,3) ) );
uow.commit();
}
@@ -562,7 +562,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
Status res = sortedData.insert( &opCtx, key, loc, true );
ASSERT_OK( res );
uow.commit();
@@ -591,7 +591,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "a" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "a" << 3 ), DiskLoc(1,1), true ) );
@@ -620,7 +620,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -674,7 +674,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
@@ -718,7 +718,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 3 ), DiskLoc(1,3), true ) );
@@ -741,7 +741,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK(
sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
uow.commit();
@@ -775,7 +775,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -799,7 +799,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 3 ), DiskLoc(1,3) ) );
uow.commit();
}
@@ -825,7 +825,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -849,7 +849,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
@@ -874,7 +874,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 1 ), DiskLoc(1,1), true ) );
ASSERT_OK( sortedData.insert( &opCtx, BSON( "" << 2 ), DiskLoc(1,2), true ) );
@@ -898,7 +898,7 @@ namespace mongo {
{
MyOperationContext opCtx( db.get() );
{
- WriteUnitOfWork uow( opCtx.recoveryUnit() );
+ WriteUnitOfWork uow( &opCtx );
ASSERT( sortedData.unindex( &opCtx, BSON( "" << 1 ), DiskLoc(1,1) ) );
uow.commit();
}
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index fc8ec367167..70094651edf 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -379,7 +379,7 @@ namespace BasicTests {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Database db( &txn, "dbtests_basictests_ownsns", NULL );
wunit.commit();
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index b18fb31af79..71cb99539d0 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -43,7 +43,7 @@ namespace CountTests {
class Base {
public:
- Base() : lk(_txn.lockState(), ns()), _wunit(_txn.recoveryUnit()), _context(&_txn, ns()) {
+ Base() : lk(_txn.lockState(), ns()), _wunit(&_txn), _context(&_txn, ns()) {
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 5877cfc85a2..5509ff18822 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -62,7 +62,7 @@ namespace mongo {
{
// Remove _id range [_min, _max).
Lock::DBWrite lk(txn.lockState(), ns);
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, ns );
KeyRange range( ns,
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 3e4d4566b25..6698efe287f 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -101,7 +101,7 @@ namespace IndexUpdateTests {
uassertStatusOK(indexer.init(key));
uassertStatusOK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(_txn.recoveryUnit());
+ WriteUnitOfWork wunit(&_txn);
indexer.commit();
wunit.commit();
}
@@ -360,7 +360,7 @@ namespace IndexUpdateTests {
ASSERT_OK(indexer.init(spec));
ASSERT_OK(indexer.insertAllDocumentsInCollection());
- WriteUnitOfWork wunit(_txn.recoveryUnit());
+ WriteUnitOfWork wunit(&_txn);
indexer.commit();
wunit.commit();
}
@@ -589,7 +589,7 @@ namespace IndexUpdateTests {
class HelpersEnsureIndexInterruptDisallowed : public IndexBuildBase {
public:
void run() {
- WriteUnitOfWork wunit (_txn.recoveryUnit());
+ WriteUnitOfWork wunit (&_txn);
// Insert some documents.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 6c5b444e68b..72a27859efa 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -532,7 +532,7 @@ namespace NamespaceTests {
Collection* committedColl;
{
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
ASSERT_FALSE(db->getCollection(&txn, committedName));
committedColl = db->createCollection(&txn, committedName);
ASSERT_EQUALS(db->getCollection(&txn, committedName), committedColl);
@@ -542,7 +542,7 @@ namespace NamespaceTests {
ASSERT_EQUALS(db->getCollection(&txn, committedName), committedColl);
{
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
ASSERT_FALSE(db->getCollection(&txn, rolledBackName));
Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
ASSERT_EQUALS(db->getCollection(&txn, rolledBackName), rolledBackColl);
@@ -574,7 +574,7 @@ namespace NamespaceTests {
ASSERT(justCreated);
{
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
ASSERT_FALSE(db->getCollection(&txn, droppedName));
Collection* droppedColl;
droppedColl = db->createCollection(&txn, droppedName);
@@ -587,7 +587,7 @@ namespace NamespaceTests {
ASSERT_FALSE(db->getCollection(&txn, droppedName));
{
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
ASSERT_FALSE(db->getCollection(&txn, rolledBackName));
Collection* rolledBackColl = db->createCollection(&txn, rolledBackName);
wunit.commit();
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index d23a85456a1..f2d42598895 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -36,7 +36,7 @@ namespace OplogStartTests {
class Base {
public:
Base() : _lk(_txn.lockState()),
- _wunit(_txn.recoveryUnit()),
+ _wunit(&_txn),
_context(&_txn, ns()),
_client(&_txn) {
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index aa6026d3d40..5f154456423 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -48,7 +48,7 @@ namespace PdfileTests {
class Base {
public:
Base() : _lk(_txn.lockState()),
- _wunit(_txn.recoveryUnit()),
+ _wunit(&_txn),
_context(&_txn, ns()) {
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index e23f524f7ae..fe5b356d2bb 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -65,7 +65,7 @@ namespace QueryTests {
public:
Base() : _lk(_txn.lockState()),
- _wunit(_txn.recoveryUnit()),
+ _wunit(&_txn),
_context(&_txn, ns()) {
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
@@ -240,7 +240,7 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
Lock::DBWrite lk(_txn.lockState(), ns);
- WriteUnitOfWork wunit(_txn.recoveryUnit());
+ WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
// pq doesn't exist if it's a runner inside of the clientcursor.
@@ -589,7 +589,7 @@ namespace QueryTests {
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
Lock::DBWrite lk(_txn.lockState(), ns);
- WriteUnitOfWork wunit(_txn.recoveryUnit());
+ WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
BSONObj info;
@@ -1055,7 +1055,7 @@ namespace QueryTests {
void run() {
Lock::GlobalWrite lk(_txn.lockState());
Client::Context ctx(&_txn, "unittests.DirectLocking");
- WriteUnitOfWork wunit(_txn.recoveryUnit());
+ WriteUnitOfWork wunit(&_txn);
_client.remove( "a.b", BSONObj() );
wunit.commit();
ASSERT_EQUALS( "unittests", ctx.db()->name() );
@@ -1424,7 +1424,7 @@ namespace QueryTests {
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
_lk(_txn.lockState(), ns() ),
- _wunit( _txn.recoveryUnit() ),
+ _wunit( &_txn ),
_ctx(&_txn, ns()) {
}
~CollectionInternalBase() {
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 0c385d51db4..95afe2d49f8 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -64,7 +64,7 @@ namespace ReplTests {
mutable DBDirectClient _client;
public:
- Base() : _wunit( _txn.recoveryUnit()),
+ Base() : _wunit(&_txn),
_client(&_txn) {
ReplSettings replSettings;
@@ -151,7 +151,7 @@ namespace ReplTests {
static int opCount() {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, cllNS() );
Database* db = ctx.db();
@@ -173,7 +173,7 @@ namespace ReplTests {
static void applyAllOperations() {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
vector< BSONObj > ops;
{
Client::Context ctx(&txn, cllNS() );
@@ -206,7 +206,7 @@ namespace ReplTests {
static void printAll( const char *ns ) {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, ns );
Database* db = ctx.db();
@@ -230,7 +230,7 @@ namespace ReplTests {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
Client::Context ctx(&txn, ns );
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Database* db = ctx.db();
Collection* coll = db->getCollection( &txn, ns );
if ( !coll ) {
@@ -253,7 +253,7 @@ namespace ReplTests {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
Client::Context ctx(&txn, ns() );
- WriteUnitOfWork wunit(txn.recoveryUnit());
+ WriteUnitOfWork wunit(&txn);
Database* db = ctx.db();
Collection* coll = db->getCollection( &txn, ns() );
if ( !coll ) {
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 99ee08c6e65..a5c2719232a 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1787,7 +1787,7 @@ namespace mongo {
return;
}
- WriteUnitOfWork wunit(txn->recoveryUnit());
+ WriteUnitOfWork wunit(txn);
indexer.commit();
for (size_t i = 0; i < indexSpecs.size(); i++) {