summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/database.cpp16
-rw-r--r--src/mongo/db/cloner.cpp86
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.h2
-rw-r--r--src/mongo/db/ops/update.cpp36
-rw-r--r--src/mongo/db/repl/bgsync.cpp19
-rw-r--r--src/mongo/db/repl/minvalid.cpp68
-rw-r--r--src/mongo/db/repl/oplog.cpp17
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp48
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp13
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp12
-rw-r--r--src/mongo/db/repl/sync.cpp59
11 files changed, 218 insertions, 158 deletions
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 49727f6f709..a4428eee435 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -558,10 +558,18 @@ namespace mongo {
for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
if (*i != "local") {
- Database* db = dbHolder().get(txn, *i);
- invariant(db);
-
- dropDatabase(txn, db);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ Database* db = dbHolder().get(txn, *i);
+ // This is needed since dropDatabase can't be rolled back.
+ // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
+ if (db == nullptr) {
+ log() << "database disappeared after listDatabases but before drop: " << *i;
+ } else {
+ dropDatabase(txn, db);
+ }
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
+ "dropAllDatabasesExceptLocal",
+ *i);
}
}
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 88fdec2ece3..eb1975b417c 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -49,6 +49,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/commands/copydb.h"
#include "mongo/db/commands/rename_collection.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/service_context.h"
@@ -147,11 +148,12 @@ namespace mongo {
<< "collection dropped during clone ["
<< to_collection.ns() << "]",
!createdCollection );
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
- verify(collection);
-
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
+ verify(collection);
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
}
while( i.moreInCurrentBatch() ) {
@@ -217,19 +219,18 @@ namespace mongo {
}
++numSeen;
- WriteUnitOfWork wunit(txn);
-
- BSONObj js = tmp;
-
- StatusWith<RecordId> loc = collection->insertDocument( txn, js, true );
- if ( !loc.isOK() ) {
- error() << "error: exception cloning object in " << from_collection
- << ' ' << loc.getStatus() << " obj:" << js;
- }
- uassertStatusOK( loc.getStatus() );
-
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ BSONObj doc = tmp;
+ StatusWith<RecordId> loc = collection->insertDocument( txn, doc, true );
+ if ( !loc.isOK() ) {
+ error() << "error: exception cloning object in " << from_collection
+ << ' ' << loc.getStatus() << " obj:" << doc;
+ }
+ uassertStatusOK( loc.getStatus() );
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloner insert", to_collection.ns());
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << numSeen << " objects cloned so far from collection " << from_collection;
saveLast = time( 0 );
@@ -325,10 +326,12 @@ namespace mongo {
Collection* collection = db->getCollection( to_collection );
if ( !collection ) {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
- invariant(collection);
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
+ invariant(collection);
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
}
// TODO pass the MultiIndexBlock when inserting into the collection rather than building the
@@ -389,13 +392,15 @@ namespace mongo {
invariant(collList.size() <= 1);
BSONObj col = collList.front();
if (col["options"].isABSONObj()) {
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, db, ns, col["options"].Obj(), 0);
- if ( !status.isOK() ) {
- errmsg = status.toString();
- return false;
- }
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ Status status = userCreateNS(txn, db, ns, col["options"].Obj(), 0);
+ if ( !status.isOK() ) {
+ errmsg = status.toString();
+ return false;
+ }
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", ns);
}
}
@@ -569,19 +574,20 @@ namespace mongo {
Database* db = dbHolder().openDb(txn, toDBName);
{
- WriteUnitOfWork wunit(txn);
-
- // we defer building id index for performance - building it in batch is much
- // faster
- Status createStatus = userCreateNS(txn, db, to_name.ns(), options, false);
- if ( !createStatus.isOK() ) {
- errmsg = str::stream() << "failed to create collection \""
- << to_name.ns() << "\": "
- << createStatus.reason();
- return false;
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
+ // we defer building id index for performance - building it in batch is much
+ // faster
+ Status createStatus = userCreateNS(txn, db, to_name.ns(), options, false);
+ if ( !createStatus.isOK() ) {
+ errmsg = str::stream() << "failed to create collection \""
+ << to_name.ns() << "\": "
+ << createStatus.reason();
+ return false;
+ }
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", to_name.ns());
}
LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
diff --git a/src/mongo/db/concurrency/write_conflict_exception.h b/src/mongo/db/concurrency/write_conflict_exception.h
index 9d923da003c..ee91721b847 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.h
+++ b/src/mongo/db/concurrency/write_conflict_exception.h
@@ -45,7 +45,7 @@
continue; \
} \
break; \
- } while (true); } while (false)
+ } while (true); } while (false);
namespace mongo {
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index ffdc244e42c..ab2a48a72fb 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/exec/update.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/op_observer.h"
@@ -75,23 +76,24 @@ namespace mongo {
locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()),
MODE_X));
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db());
-
- if (userInitiatedWritesAndNotPrimary) {
- uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating collection " << nsString.ns()
- << " during upsert"));
- }
-
- WriteUnitOfWork wuow(txn);
- collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
- invariant(collection);
-
- wuow.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
+
+ const bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(
+ nsString.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
+ uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream()
+ << "Not primary while creating collection " << nsString.ns()
+ << " during upsert"));
+ }
+ WriteUnitOfWork wuow(txn);
+ collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
+ invariant(collection);
+ wuow.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nsString.ns());
}
// Parse the update, get an executor for it, run the executor, get stats out.
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index b1343272d52..b02514e4f24 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/fsync.h"
#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/repl/oplog.h"
@@ -493,14 +494,16 @@ namespace {
long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
BSONObj oplogEntry;
try {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), "local", MODE_X);
- bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
- if (!success) {
- // This can happen when we are to do an initial sync. lastHash will be set
- // after the initial sync is complete.
- return 0;
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), "local", MODE_X);
+ bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
+ if (!success) {
+ // This can happen when we are to do an initial sync. lastHash will be set
+ // after the initial sync is complete.
+ return 0;
+ }
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
}
catch (const DBException& ex) {
severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index c648dcd5fa7..8eee71c3d87 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -34,6 +34,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
@@ -51,49 +52,60 @@ namespace {
// Writes
void clearInitialSyncFlag(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_IX);
- // TODO: Investigate correctness of taking MODE_IX for DB/Collection locks
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ // TODO: Investigate correctness of taking MODE_IX for DB/Collection locks
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
+ Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "clearInitialSyncFlags", minvalidNS);
+
}
void setInitialSyncFlag(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
+ Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setInitialSyncFlags", minvalidNS);
}
void setMinValid(OperationContext* ctx, Timestamp ts) {
- ScopedTransaction transaction(ctx, MODE_IX);
- Lock::DBLock dblk(ctx->lockState(), "local", MODE_X);
- Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts)));
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(ctx, MODE_IX);
+ Lock::DBLock dblk(ctx->lockState(), "local", MODE_X);
+ Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts)));
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(ctx, "setMinValid", minvalidNS);
}
// Reads
bool getInitialSyncFlag() {
OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_IS);
- Lock::DBLock dblk(txn.lockState(), "local", MODE_IS);
- Lock::CollectionLock lk(txn.lockState(), minvalidNS, MODE_IS);
- BSONObj mv;
- bool found = Helpers::getSingleton( &txn, minvalidNS, mv);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(&txn, MODE_IS);
+ Lock::DBLock dblk(txn.lockState(), "local", MODE_IS);
+ Lock::CollectionLock lk(txn.lockState(), minvalidNS, MODE_IS);
+ BSONObj mv;
+ bool found = Helpers::getSingleton( &txn, minvalidNS, mv);
- if (found) {
- return mv[initialSyncFlagString].trueValue();
- }
- return false;
+ if (found) {
+ return mv[initialSyncFlagString].trueValue();
+ }
+ return false;
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "getInitialSyncFlags", minvalidNS);
}
Timestamp getMinValid(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_IS);
- Lock::CollectionLock lk(txn->lockState(), minvalidNS, MODE_IS);
- BSONObj mv;
- bool found = Helpers::getSingleton(txn, minvalidNS, mv);
- if (found) {
- return mv["ts"].timestamp();
- }
- return Timestamp();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IS);
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_IS);
+ Lock::CollectionLock lk(txn->lockState(), minvalidNS, MODE_IS);
+ BSONObj mv;
+ bool found = Helpers::getSingleton(txn, minvalidNS, mv);
+ if (found) {
+ return mv["ts"].timestamp();
+ }
+ return Timestamp();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "getMinValid", minvalidNS);
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 3dfa275a1a7..7032cf1994d 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -321,10 +321,11 @@ namespace {
Timestamp writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- Timestamp lastOptime = replCoord->getMyLastOptime();
- invariant(!ops.empty());
+ Timestamp lastOptime;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ lastOptime = replCoord->getMyLastOptime();
+ invariant(!ops.empty());
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), "local", MODE_X);
@@ -435,11 +436,13 @@ namespace {
options.cappedSize = sz;
options.autoIndexId = CollectionOptions::NO;
- WriteUnitOfWork uow( txn );
- invariant(ctx.db()->createCollection(txn, _oplogCollectionName, options));
- if( !rs )
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj());
- uow.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork uow( txn );
+ invariant(ctx.db()->createCollection(txn, _oplogCollectionName, options));
+ if( !rs )
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj());
+ uow.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", _oplogCollectionName);
/* sync here so we don't get any surprising lag later when we try to sync */
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 6a83bba6d50..5e3f498fdeb 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
@@ -113,12 +114,14 @@ namespace {
void ReplicationCoordinatorExternalStateImpl::initiateOplog(OperationContext* txn) {
createOplog(txn);
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWrite(txn->lockState());
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSON("msg" << "initiating set"));
- wuow.commit();
+ WriteUnitOfWork wuow(txn);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSON("msg" << "initiating set"));
+ wuow.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
}
void ReplicationCoordinatorExternalStateImpl::forwardSlaveProgress() {
@@ -134,6 +137,7 @@ namespace {
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
+ // TODO: handle WriteConflictExceptions below
if (!Helpers::getSingleton(txn, meCollectionName, me) ||
!me.hasField("host") ||
me["host"].String() != myname) {
@@ -158,14 +162,18 @@ namespace {
StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocument(
OperationContext* txn) {
try {
- BSONObj config;
- if (!Helpers::getSingleton(txn, configCollectionName, config)) {
- return StatusWith<BSONObj>(
- ErrorCodes::NoMatchingDocument,
- str::stream() << "Did not find replica set configuration document in " <<
- configCollectionName);
- }
- return StatusWith<BSONObj>(config);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ BSONObj config;
+ if (!Helpers::getSingleton(txn, configCollectionName, config)) {
+ return StatusWith<BSONObj>(
+ ErrorCodes::NoMatchingDocument,
+ str::stream() << "Did not find replica set configuration document in "
+ << configCollectionName);
+ }
+ return StatusWith<BSONObj>(config);
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
+ "load replica set config",
+ configCollectionName);
}
catch (const DBException& ex) {
return StatusWith<BSONObj>(ex.toStatus());
@@ -176,14 +184,19 @@ namespace {
OperationContext* txn,
const BSONObj& config) {
try {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X);
- Helpers::putSingleton(txn, configCollectionName, config);
- return Status::OK();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X);
+ Helpers::putSingleton(txn, configCollectionName, config);
+ return Status::OK();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
+ "save replica set config",
+ configCollectionName);
}
catch (const DBException& ex) {
return ex.toStatus();
}
+
}
void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(const Timestamp& newTime) {
@@ -193,6 +206,7 @@ namespace {
StatusWith<Timestamp> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(
OperationContext* txn) {
+ // TODO: handle WriteConflictExceptions below
try {
BSONObj oplogEntry;
if (!Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry)) {
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index d85bef6b746..e39a574a650 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/client.h"
#include "mongo/db/cloner.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
@@ -90,10 +91,12 @@ namespace {
// Truncate the oplog in case there was a prior initial sync that failed.
Collection* collection = autoDb.getDb()->getCollection(rsOplogName);
fassert(28565, collection);
- WriteUnitOfWork wunit(txn);
- Status status = collection->truncate(txn);
- fassert(28564, status);
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ Status status = collection->truncate(txn);
+ fassert(28564, status);
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "truncate", collection->ns().ns());
}
/**
@@ -448,13 +451,11 @@ namespace {
// Initial sync is now complete. Flag this by setting minValid to the last thing
// we synced.
- WriteUnitOfWork wunit(&txn);
setMinValid(&txn, lastOpTimeWritten);
// Clear the initial sync flag.
clearInitialSyncFlag(&txn);
BackgroundSync::get()->setInitialSyncRequestedFlag(false);
- wunit.commit();
}
// If we just cloned & there were no ops applied, we still want the primary to know where
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index ed92efd5785..039541841ac 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/ops/delete.h"
@@ -616,9 +617,14 @@ namespace {
catch (DBException& e) {
if (e.getCode() == 13415) {
// hack: need to just make cappedTruncate do this...
- WriteUnitOfWork wunit(txn);
- uassertStatusOK(collection->truncate(txn));
- wunit.commit();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ uassertStatusOK(collection->truncate(txn));
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ txn,
+ "truncate",
+ collection->ns().ns());
}
else {
throw e;
diff --git a/src/mongo/db/repl/sync.cpp b/src/mongo/db/repl/sync.cpp
index 1f5cef2f773..7ed10780dda 100644
--- a/src/mongo/db/repl/sync.cpp
+++ b/src/mongo/db/repl/sync.cpp
@@ -37,8 +37,11 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
+#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
#include "mongo/db/repl/oplogreader.h"
#include "mongo/util/assert_util.h"
@@ -115,41 +118,43 @@ namespace repl {
bool Sync::shouldRetry(OperationContext* txn, const BSONObj& o) {
const NamespaceString nss(o.getStringField("ns"));
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ // Take an X lock on the database in order to preclude other modifications.
+ // Also, the database might not exist yet, so create it.
+ AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
+ Database* const db = autoDb.getDb();
- // Take an X lock on the database in order to preclude other modifications. Also, the
- // database might not exist yet, so create it.
- AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
- Database* const db = autoDb.getDb();
+ // we don't have the object yet, which is possible on initial sync. get it.
+ log() << "adding missing object" << endl; // rare enough we can log
- // we don't have the object yet, which is possible on initial sync. get it.
- log() << "adding missing object" << endl; // rare enough we can log
+ BSONObj missingObj = getMissingDoc(txn, db, o);
- BSONObj missingObj = getMissingDoc(txn, db, o);
+ if( missingObj.isEmpty() ) {
+ log() << "missing object not found on source."
+ " presumably deleted later in oplog";
+ log() << "o2: " << o.getObjectField("o2").toString();
+ log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName();
- if( missingObj.isEmpty() ) {
- log() << "missing object not found on source. presumably deleted later in oplog" << endl;
- log() << "o2: " << o.getObjectField("o2").toString() << endl;
- log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName() << endl;
-
- return false;
- }
- else {
- WriteUnitOfWork wunit(txn);
+ return false;
+ }
+ else {
+ WriteUnitOfWork wunit(txn);
- Collection* const collection = db->getOrCreateCollection(txn, nss.toString());
- invariant(collection);
+ Collection* const coll = db->getOrCreateCollection(txn, nss.toString());
+ invariant(coll);
- StatusWith<RecordId> result = collection->insertDocument(txn, missingObj, true);
- uassert(15917,
- str::stream() << "failed to insert missing doc: "
- << result.getStatus().toString(),
- result.isOK() );
+ StatusWith<RecordId> result = coll->insertDocument(txn, missingObj, true);
+ uassert(15917,
+ str::stream() << "failed to insert missing doc: "
+ << result.getStatus().toString(),
+ result.isOK() );
- LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;
+ LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;
- wunit.commit();
- return true;
- }
+ wunit.commit();
+ return true;
+ }
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns());
}
} // namespace repl