summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-04-29 11:58:58 -0400
committerMathias Stearn <mathias@10gen.com>2014-04-30 10:48:16 -0400
commit6d0f155cdd27b019b2f1ccc4b6b7e8e2c8271b54 (patch)
tree6dc5450045211c727ba375ca8ccf1b4121c10c82 /src/mongo
parent694c36dbe7de51db9396696c2b89944e1a7dd001 (diff)
downloadmongo-6d0f155cdd27b019b2f1ccc4b6b7e8e2c8271b54.tar.gz
SERVER-13643 Plumb TransactionExperiment though logOp
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/index_create.cpp4
-rw-r--r--src/mongo/db/cloner.cpp7
-rw-r--r--src/mongo/db/commands/apply_ops.cpp6
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp12
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp5
-rw-r--r--src/mongo/db/dbcommands.cpp3
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/instance.cpp4
-rw-r--r--src/mongo/db/ops/delete_executor.cpp2
-rw-r--r--src/mongo/db/ops/update.cpp4
-rw-r--r--src/mongo/db/pdfile.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp11
-rw-r--r--src/mongo/db/repl/master_slave.h3
-rw-r--r--src/mongo/db/repl/oplog.cpp89
-rw-r--r--src/mongo/db/repl/oplog.h13
-rw-r--r--src/mongo/db/repl/rs_config.cpp6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp4
-rw-r--r--src/mongo/dbtests/replsettests.cpp3
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp5
22 files changed, 121 insertions, 70 deletions
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index e4cd89af29f..0b488e469c1 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -141,7 +141,7 @@ namespace mongo {
runner->saveState();
BSONObj toDelete;
collection->deleteDocument( &txn, loc, false, true, &toDelete );
- logOp( "d", ns.c_str(), toDelete );
+ logOp( &txn, "d", ns.c_str(), toDelete );
if (!runner->restoreState()) {
// Runner got killed somehow. This probably shouldn't happen.
@@ -290,7 +290,7 @@ namespace mongo {
true /* noWarn */,
&toDelete );
if ( isMaster( ns.c_str() ) ) {
- logOp( "d", ns.c_str(), toDelete );
+ logOp( &txn, "d", ns.c_str(), toDelete );
}
getDur().commitIfNeeded();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 9e782f42ce9..be4bc6118f9 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -189,7 +189,7 @@ namespace mongo {
}
uassertStatusOK( loc.getStatus() );
if ( logForRepl )
- logOp("i", to_collection, js);
+ logOp(&txn, "i", to_collection, js);
getDur().commitIfNeeded();
@@ -222,6 +222,8 @@ namespace mongo {
bool logForRepl, bool masterSameProcess, bool slaveOk, bool mayYield,
bool mayBeInterrupted, Query query) {
+ DurTransaction txn; // XXX
+
list<BSONObj> indexesToBuild;
LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on " << _conn->getServerAddress() << " with filter " << query.toString() << endl;
@@ -253,7 +255,6 @@ namespace mongo {
string ns = spec["ns"].String(); // this was fixed when pulled off network
Collection* collection = f.context.db()->getCollection( ns );
if ( !collection ) {
- DurTransaction txn; // XXX
collection = f.context.db()->createCollection( &txn, ns );
verify( collection );
}
@@ -269,7 +270,7 @@ namespace mongo {
}
if ( logForRepl )
- logOp("i", to_collection, spec);
+ logOp(&txn, "i", to_collection, spec);
getDur().commitIfNeeded();
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index 15eed3a5678..6c9f944df8e 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/matcher.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
namespace mongo {
class ApplyOpsCmd : public Command {
@@ -83,6 +84,7 @@ namespace mongo {
// SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
// ns used so locking individually requires more analysis
Lock::GlobalWrite globalWriteLock;
+ DurTransaction txn;
// Preconditions check reads the database state, so needs to be done locked
if ( cmdObj["preCondition"].type() == Array ) {
@@ -128,7 +130,7 @@ namespace mongo {
invariant(Lock::nested());
Client::Context ctx(ns);
- bool failed = applyOperation_inlock(ctx.db(), temp, false, alwaysUpsert);
+ bool failed = applyOperation_inlock(&txn, ctx.db(), temp, false, alwaysUpsert);
ab.append(!failed);
if ( failed )
errors++;
@@ -159,7 +161,7 @@ namespace mongo {
}
}
- logOp("c", tempNS.c_str(), cmdBuilder.done());
+ logOp(&txn, "c", tempNS.c_str(), cmdBuilder.done());
}
return errors == 0;
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 67eaa4751ff..7b77f3615e1 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -107,7 +107,7 @@ namespace mongo {
toCollection->insertDocument( txn, obj, true );
if ( logForReplication )
- logOp( "i", toNs.c_str(), obj );
+ logOp( txn, "i", toNs.c_str(), obj );
getDur().commitIfNeeded();
}
}
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 4c5bc195c1c..50d3514e0cb 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -206,7 +206,7 @@ namespace mongo {
if ( !fromRepl ) {
std::string systemIndexes = ns.getSystemIndexesCollection();
- logOp( "i", systemIndexes.c_str(), spec );
+ logOp( &txn, "i", systemIndexes.c_str(), spec );
}
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 32035de83c6..c69b8c63b16 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -354,7 +354,7 @@ namespace mongo {
b.append( "create", nsToCollectionSubstring( _config.incLong ));
b.appendElements( options.toBSON() );
string logNs = nsToDatabase( _config.incLong ) + ".$cmd";
- logOp( "c", logNs.c_str(), b.obj() );
+ logOp( &txn, "c", logNs.c_str(), b.obj() );
}
BSONObj indexSpec = BSON( "key" << BSON( "0" << 1 ) << "ns" << _config.incLong
@@ -362,7 +362,7 @@ namespace mongo {
Status status = incColl->getIndexCatalog()->createIndex( indexSpec, false );
// Log the createIndex operation.
string logNs = nsToDatabase( _config.incLong ) + ".system.indexes";
- logOp( "i", logNs.c_str(), indexSpec );
+ logOp( &txn, "i", logNs.c_str(), indexSpec );
if ( !status.isOK() ) {
uasserted( 17305 , str::stream() << "createIndex failed for mr incLong ns: " <<
_config.incLong << " err: " << status.code() );
@@ -414,7 +414,7 @@ namespace mongo {
b.append( "create", nsToCollectionSubstring( _config.tempNamespace ));
b.appendElements( options.toBSON() );
string logNs = nsToDatabase( _config.tempNamespace ) + ".$cmd";
- logOp( "c", logNs.c_str(), b.obj() );
+ logOp( &txn, "c", logNs.c_str(), b.obj() );
}
for ( vector<BSONObj>::iterator it = indexesToInsert.begin();
@@ -422,7 +422,7 @@ namespace mongo {
tempColl->getIndexCatalog()->createIndex( *it, false );
// Log the createIndex operation.
string logNs = nsToDatabase( _config.tempNamespace ) + ".system.indexes";
- logOp( "i", logNs.c_str(), *it );
+ logOp( &txn, "i", logNs.c_str(), *it );
}
}
@@ -646,7 +646,7 @@ namespace mongo {
BSONObj bo = b.obj();
coll->insertDocument( &txn, bo, true );
- logOp( "i", ns.c_str(), bo );
+ logOp( &txn, "i", ns.c_str(), bo );
}
/**
@@ -664,7 +664,7 @@ namespace mongo {
" collection expected: " << _config.incLong );
coll->insertDocument( &txn, o, true );
- logOp( "i", _config.incLong.c_str(), o );
+ logOp( &txn, "i", _config.incLong.c_str(), o );
getDur().commitIfNeeded();
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 843ad07f995..8e4acb7f00a 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -1023,7 +1023,7 @@ namespace mongo {
result->setError(toWriteError(status.getStatus()));
}
else {
- logOp( "i", insertNS.c_str(), docToInsert );
+ logOp( &txn, "i", insertNS.c_str(), docToInsert );
getDur().commitIfNeeded();
result->getStats().n = 1;
}
@@ -1038,6 +1038,7 @@ namespace mongo {
static void singleCreateIndex( const BSONObj& indexDesc,
Collection* collection,
WriteOpResult* result ) {
+ DurTransaction txn;
const string indexNS = collection->ns().getSystemIndexesCollection();
@@ -1052,7 +1053,7 @@ namespace mongo {
result->setError(toWriteError(status));
}
else {
- logOp( "i", indexNS.c_str(), indexDesc );
+ logOp( &txn, "i", indexNS.c_str(), indexDesc );
result->getStats().n = 1;
}
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index ed0ff4a3015..a0ad3cec77b 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1645,7 +1645,8 @@ namespace mongo {
retval = _execCommand(c, dbname, cmdObj, queryOptions, errmsg, result, fromRepl);
if ( retval && c->logTheOp() && ! fromRepl ) {
- logOp("c", cmdns, cmdObj);
+ DurTransaction txn; // XXX
+ logOp(&txn, "c", cmdns, cmdObj);
}
appendCommandStatus(result, retval, errmsg);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 8a502fe8adb..eab2ba0864e 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -430,7 +430,7 @@ namespace mongo {
if ( callback )
callback->goingToDelete( obj );
- logOp("d", ns.c_str(), obj["_id"].wrap(), 0, 0, fromMigrate);
+ logOp(&txn, "d", ns.c_str(), obj["_id"].wrap(), 0, 0, fromMigrate);
collection->deleteDocument( &txn, rloc );
numDeleted++;
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 9ae661da723..f46f749cd9b 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -824,7 +824,7 @@ namespace mongo {
return;
uassertStatusOK( status );
- logOp( "i", ns, js );
+ logOp( txn, "i", ns, js );
return;
}
@@ -841,7 +841,7 @@ namespace mongo {
StatusWith<DiskLoc> status = collection->insertDocument( txn, js, true );
uassertStatusOK( status.getStatus() );
- logOp("i", ns, js);
+ logOp(txn, "i", ns, js);
}
NOINLINE_DECL void insertMulti(TransactionExperiment* txn,
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index 24b1a6a112c..2ac14ac7afa 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -176,7 +176,7 @@ namespace mongo {
}
else {
bool replJustOne = true;
- logOp("d", ns.ns().c_str(), toDelete, 0, &replJustOne);
+ logOp(txn, "d", ns.ns().c_str(), toDelete, 0, &replJustOne);
}
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 04b7e26a073..0d04aa63918 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -763,7 +763,7 @@ namespace mongo {
// Call logOp if requested.
if (request.shouldCallLogOp() && !logObj.isEmpty()) {
BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request.isMulti());
- logOp("u", nsString.ns().c_str(), logObj , &idQuery,
+ logOp(txn, "u", nsString.ns().c_str(), logObj , &idQuery,
NULL, request.isFromMigration(), &newObj);
}
@@ -878,7 +878,7 @@ namespace mongo {
!request.isGod() /*enforceQuota*/);
uassertStatusOK(newLoc.getStatus());
if (request.shouldCallLogOp()) {
- logOp("i", nsString.ns().c_str(), newObj,
+ logOp(txn, "i", nsString.ns().c_str(), newObj,
NULL, NULL, request.isFromMigration(), &newObj);
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 93e8367f698..02e8dfbddf5 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -141,7 +141,7 @@ namespace mongo {
options = b.obj();
}
string logNs = nsToDatabase(ns) + ".$cmd";
- logOp("c", logNs.c_str(), options);
+ logOp(txn, "c", logNs.c_str(), options);
}
return Status::OK();
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index ad215b9854f..c4e9b351b31 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -499,15 +499,15 @@ namespace mongo {
return true;
}
- void ReplSource::applyOperation(Database* db, const BSONObj& op) {
+ void ReplSource::applyOperation(TransactionExperiment* txn, Database* db, const BSONObj& op) {
try {
- bool failedUpdate = applyOperation_inlock( db, op );
+ bool failedUpdate = applyOperation_inlock( txn, db, op );
if (failedUpdate) {
Sync sync(hostName);
if (sync.shouldRetry(op)) {
uassert(15914,
"Failure retrying initial sync update",
- !applyOperation_inlock(db, op));
+ !applyOperation_inlock(txn, db, op));
}
}
}
@@ -605,6 +605,7 @@ namespace mongo {
}
Client::Context ctx( ns );
+ DurTransaction txn;
ctx.getClient()->curop()->reset();
bool empty = ctx.db()->isEmpty();
@@ -615,7 +616,7 @@ namespace mongo {
// always apply admin command command
// this is a bit hacky -- the semantics of replication/commands aren't well specified
if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) {
- applyOperation( ctx.db(), op );
+ applyOperation( &txn, ctx.db(), op );
return;
}
@@ -644,7 +645,7 @@ namespace mongo {
save();
}
else {
- applyOperation( ctx.db(), op );
+ applyOperation( &txn, ctx.db(), op );
addDbNextPass.erase( clientName );
}
}
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index 24279627ab2..97255cbecde 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -42,6 +42,7 @@
namespace mongo {
class Database;
+ class TransactionExperiment;
// Main entry point for master/slave at startup time.
void startMasterSlave();
@@ -119,7 +120,7 @@ namespace mongo {
public:
OplogReader oplogReader;
- void applyOperation(Database* db, const BSONObj& op);
+ void applyOperation(TransactionExperiment* txn, Database* db, const BSONObj& op);
string hostName; // ip addr or hostname plus optionally, ":<port>"
string _sourceName; // a logical source name.
string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; }
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 4ec5c9a2df1..e828b46056c 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -85,7 +85,14 @@ namespace mongo {
result.isOK() );
}
- static void _logOpUninitialized(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) {
+ static void _logOpUninitialized(TransactionExperiment* txn,
+ const char *opstr,
+ const char *ns,
+ const char *logNS,
+ const BSONObj& obj,
+ BSONObj *o2,
+ bool *bb,
+ bool fromMigrate ) {
uassert(13288, "replSet error write op to db before replSet initialized", str::startsWith(ns, "local.") || *opstr == 'n');
}
@@ -94,7 +101,7 @@ namespace mongo {
*/
void _logOpObjRS(const BSONObj& op) {
Lock::DBWrite lk("local");
- DurTransaction txn; //XXX should be part of parent txn
+ DurTransaction txn;
const OpTime ts = op["ts"]._opTime();
long long h = op["h"].numberLong();
@@ -104,7 +111,7 @@ namespace mongo {
Client::Context ctx(rsoplog, storageGlobalParams.dbpath);
localDB = ctx.db();
verify( localDB );
- localOplogRSCollection = localDB->getCollection( rsoplog );
+ localOplogRSCollection = localDB->getCollection( &txn, rsoplog );
massert(13389,
"local.oplog.rs missing. did you drop it? if so restart server",
localOplogRSCollection);
@@ -203,7 +210,14 @@ namespace mongo {
// on every logop call.
static BufBuilder logopbufbuilder(8*1024);
static const int OPLOG_VERSION = 2;
- static void _logOpRS(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) {
+ static void _logOpRS(TransactionExperiment* txn,
+ const char *opstr,
+ const char *ns,
+ const char *logNS,
+ const BSONObj& obj,
+ BSONObj *o2,
+ bool *bb,
+ bool fromMigrate ) {
Lock::DBWrite lk1("local");
if ( strncmp(ns, "local.", 6) == 0 ) {
@@ -254,14 +268,13 @@ namespace mongo {
Client::Context ctx(rsoplog, storageGlobalParams.dbpath);
localDB = ctx.db();
verify( localDB );
- localOplogRSCollection = localDB->getCollection( rsoplog );
+ localOplogRSCollection = localDB->getCollection( txn, rsoplog );
massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection);
}
Client::Context ctx(rsoplog, localDB);
- DurTransaction txn; // XXX
OplogDocWriter writer( partial, obj );
- checkOplogInsert( localOplogRSCollection->insertDocument( &txn, &writer, false ) );
+ checkOplogInsert( localOplogRSCollection->insertDocument( txn, &writer, false ) );
/* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy.
this code (or code in now() maybe) should be improved.
@@ -285,7 +298,14 @@ namespace mongo {
}
- static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) {
+ static void _logOpOld(TransactionExperiment* txn,
+ const char *opstr,
+ const char *ns,
+ const char *logNS,
+ const BSONObj& obj,
+ BSONObj *o2,
+ bool *bb,
+ bool fromMigrate ) {
Lock::DBWrite lk("local");
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
@@ -326,19 +346,25 @@ namespace mongo {
Client::Context ctx(logNS, storageGlobalParams.dbpath);
localDB = ctx.db();
verify( localDB );
- localOplogMainCollection = localDB->getCollection(logNS);
+ localOplogMainCollection = localDB->getCollection(txn, logNS);
verify( localOplogMainCollection );
}
Client::Context ctx(logNS , localDB);
- DurTransaction txn; //XXX should be part of parent txn
OplogDocWriter writer( partial, obj );
- checkOplogInsert( localOplogMainCollection->insertDocument( &txn, &writer, false ) );
+ checkOplogInsert( localOplogMainCollection->insertDocument( txn, &writer, false ) );
context.getClient()->setLastOp( ts );
}
- static void (*_logOp)(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, bool fromMigrate ) = _logOpOld;
+ static void (*_logOp)(TransactionExperiment* txn,
+ const char *opstr,
+ const char *ns,
+ const char *logNS,
+ const BSONObj& obj,
+ BSONObj *o2,
+ bool *bb,
+ bool fromMigrate ) = _logOpOld;
void newReplUp() {
replSettings.master = true;
_logOp = _logOpRS;
@@ -350,13 +376,15 @@ namespace mongo {
void oldRepl() { _logOp = _logOpOld; }
void logKeepalive() {
- _logOp("n", "", 0, BSONObj(), 0, 0, false);
+ DurTransaction txn;
+ _logOp(&txn, "n", "", 0, BSONObj(), 0, 0, false);
}
void logOpComment(const BSONObj& obj) {
- _logOp("n", "", 0, obj, 0, 0, false);
+ DurTransaction txn;
+ _logOp(&txn, "n", "", 0, obj, 0, 0, false);
}
- void logOpInitiate(const BSONObj& obj) {
- _logOpRS("n", "", 0, obj, 0, 0, false);
+ void logOpInitiate(TransactionExperiment* txn, const BSONObj& obj) {
+ _logOpRS(txn, "n", "", 0, obj, 0, 0, false);
}
/*@ @param opstr:
@@ -366,7 +394,8 @@ namespace mongo {
d delete / remove
u update
*/
- void logOp(const char* opstr,
+ void logOp(TransactionExperiment* txn,
+ const char* opstr,
const char* ns,
const BSONObj& obj,
BSONObj* patt,
@@ -374,7 +403,7 @@ namespace mongo {
bool fromMigrate,
const BSONObj* fullObj) {
if ( replSettings.master ) {
- _logOp(opstr, ns, 0, obj, patt, b, fromMigrate);
+ _logOp(txn, opstr, ns, 0, obj, patt, b, fromMigrate);
}
logOpForSharding(opstr, ns, obj, patt, fullObj, fromMigrate);
@@ -397,8 +426,8 @@ namespace mongo {
ns = rsoplog;
Client::Context ctx(ns);
- DurTransaction txn; // XXX
- Collection* collection = ctx.db()->getCollection( ns );
+ DurTransaction txn;
+ Collection* collection = ctx.db()->getCollection( &txn, ns );
if ( collection ) {
@@ -460,7 +489,7 @@ namespace mongo {
invariant( ctx.db()->createCollection( &txn, ns, options ) );
if( !rs )
- logOp( "n", "", BSONObj() );
+ logOp( &txn, "n", "", BSONObj() );
/* sync here so we don't get any surprising lag later when we try to sync */
MemoryMappedFile::flushAll(true);
@@ -472,9 +501,11 @@ namespace mongo {
/** @param fromRepl false if from ApplyOpsCmd
@return true if was and update should have happened and the document DNE. see replset initial sync code.
*/
- bool applyOperation_inlock(Database* db, const BSONObj& op,
- bool fromRepl, bool convertUpdateToUpsert) {
- DurTransaction txn; //XXX should be part of parent txn
+ bool applyOperation_inlock(TransactionExperiment* txn,
+ Database* db,
+ const BSONObj& op,
+ bool fromRepl,
+ bool convertUpdateToUpsert) {
LOG(3) << "applying op: " << op << endl;
bool failedUpdate = false;
@@ -503,7 +534,7 @@ namespace mongo {
Lock::assertWriteLocked(ns);
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( txn, ns );
IndexCatalog* indexCatalog = collection == NULL ? NULL : collection->getIndexCatalog();
// operation type -- see logOp() comments for types
@@ -559,7 +590,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(&txn, request, &debug);
+ update(txn, request, &debug);
if( t.millis() >= 2 ) {
RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
@@ -588,7 +619,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- update(&txn, request, &debug);
+ update(txn, request, &debug);
}
}
}
@@ -615,7 +646,7 @@ namespace mongo {
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult ur = update(&txn, request, &debug);
+ UpdateResult ur = update(txn, request, &debug);
if( ur.numMatched == 0 ) {
if( ur.modifiers ) {
@@ -656,7 +687,7 @@ namespace mongo {
else if ( *opType == 'd' ) {
opCounters->gotDelete();
if ( opType[1] == 0 )
- deleteObjects(&txn, ns, o, /*justOne*/ valueB);
+ deleteObjects(txn, ns, o, /*justOne*/ valueB);
else
verify( opType[1] == 'b' ); // "db" advertisement
}
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index bf32e1e062c..119579ee3bf 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -32,6 +32,7 @@ namespace mongo {
class BSONObj;
class Database;
+ class TransactionExperiment;
// These functions redefine the function for logOp(),
// for either master/slave or replica sets.
@@ -66,8 +67,13 @@ namespace mongo {
See _logOp() in oplog.cpp for more details.
*/
- void logOp( const char *opstr, const char *ns, const BSONObj& obj,
- BSONObj *patt = NULL, bool *b = NULL, bool fromMigrate = false,
+ void logOp( TransactionExperiment* txn,
+ const char *opstr,
+ const char *ns,
+ const BSONObj& obj,
+ BSONObj *patt = NULL,
+ bool *b = NULL,
+ bool fromMigrate = false,
const BSONObj* fullObj = NULL );
// Log an empty no-op operation to the local oplog
@@ -90,7 +96,8 @@ namespace mongo {
* @param convertUpdateToUpsert convert some updates to upserts for idempotency reasons
* Returns if the op was an update that could not be applied (true on failure)
*/
- bool applyOperation_inlock(Database* db,
+ bool applyOperation_inlock(TransactionExperiment* txn,
+ Database* db,
const BSONObj& op,
bool fromRepl = true,
bool convertUpdateToUpsert = false);
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index c0e4c88d348..1ab66cca9c2 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_settings.h" // replSettings
#include "mongo/db/repl/rs.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/text.h"
@@ -50,7 +51,7 @@ namespace mongo {
const int ReplSetConfig::DEFAULT_HB_TIMEOUT = 10;
static AtomicUInt _warnedAboutVotes = 0;
- void logOpInitiate(const bo&);
+ void logOpInitiate(TransactionExperiment* txn, const bo&);
void assertOnlyHas(BSONObj o, const set<string>& fields) {
BSONObj::iterator i(o);
@@ -81,6 +82,7 @@ namespace mongo {
<< newConfigBSON << rsLog;
{
Client::WriteContext cx( rsConfigNs );
+ DurTransaction txn;
//theReplSet->lastOpTimeWritten = ??;
//rather than above, do a logOp()? probably
@@ -88,7 +90,7 @@ namespace mongo {
newConfigBSON,
false/*logOp=false; local db so would work regardless...*/);
if( !comment.isEmpty() && (!theReplSet || theReplSet->isPrimary()) )
- logOpInitiate(comment);
+ logOpInitiate(&txn, comment);
}
log() << "replSet saveConfigLocally done" << rsLog;
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 4e8ac1d96d7..61f9bb6390e 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/stats/timer_stats.h"
+#include "mongo/db/storage/mmap_v1/dur_transaction.h"
#include "mongo/util/fail_point_service.h"
namespace mongo {
@@ -105,10 +106,11 @@ namespace replset {
}
Client::Context ctx(ns, storageGlobalParams.dbpath);
+ DurTransaction txn;
ctx.getClient()->curop()->reset();
// For non-initial-sync, we convert updates to upserts
// to suppress errors when replaying oplog entries.
- bool ok = !applyOperation_inlock(ctx.db(), op, true, convertUpdateToUpsert);
+ bool ok = !applyOperation_inlock(&txn, ctx.db(), op, true, convertUpdateToUpsert);
opsAppliedStats.increment();
getDur().commitIfNeeded();
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index 6ab400350b5..c17e1e98427 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -367,8 +367,9 @@ namespace ReplSetTests {
// returns true on success, false on failure
bool apply(const BSONObj& op) {
Client::Context ctx( _cappedNs );
+ DurTransaction txn;
// in an annoying twist of api, returns true on failure
- return !applyOperation_inlock(ctx.db(), op, true);
+ return !applyOperation_inlock(&txn, ctx.db(), op, true);
}
void run() {
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 46152ad3555..f5e69b0d81f 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -176,7 +176,7 @@ namespace ReplTests {
if ( 0 ) {
mongo::unittest::log() << "op: " << *i << endl;
}
- a.applyOperation( ctx.db(), *i );
+ a.applyOperation( &txn, ctx.db(), *i );
}
}
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 050f35ee211..72017467ced 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1658,8 +1658,9 @@ namespace mongo {
for ( unsigned i=0; i<all.size(); i++ ) {
BSONObj idx = all[i];
Client::WriteContext ctx( ns );
+ DurTransaction txn;
Database* db = ctx.ctx().db();
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection( &txn, ns );
if ( !collection ) {
errmsg = str::stream() << "collection dropped during migration: " << ns;
warning() << errmsg;
@@ -1678,7 +1679,7 @@ namespace mongo {
}
// make sure to create index on secondaries as well
- logOp( "i", db->getSystemIndexesName().c_str(), idx,
+ logOp( &txn, "i", db->getSystemIndexesName().c_str(), idx,
NULL, NULL, true /* fromMigrate */ );
}