summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2012-09-28 14:54:44 -0400
committerEric Milkie <milkie@10gen.com>2012-10-02 17:45:53 -0400
commit1e852c70e5ad7907ec97142d27278a21324ae544 (patch)
tree6dad6acf284ced79c832eb918f7a36abd43f3318
parent2ac26a3c808772c639ee0932527431aee1f4d91e (diff)
downloadmongo-1e852c70e5ad7907ec97142d27278a21324ae544.tar.gz
SERVER-6671 suppress unique index enforcement during STARTUP2 or RECOVERING secondary states
Operations involving unique indexes may be repeated during a recovery or initial sync. We need to ignore unique key violations in this situation. This is okay because by the time we reach the end of the oplog stream (and are finally caught up), all the violations should be resolved (as we should end up with an exact copy of the primary).
-rw-r--r--src/mongo/db/index.cpp11
-rw-r--r--src/mongo/db/index.h7
-rw-r--r--src/mongo/db/index_update.cpp3
-rw-r--r--src/mongo/db/pdfile.cpp5
-rw-r--r--src/mongo/db/repl/rs.h12
-rw-r--r--src/mongo/db/repl/rs_sync.cpp7
6 files changed, 30 insertions, 15 deletions
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 6787befd8fa..583a2a8afcc 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -434,4 +434,15 @@ namespace mongo {
_init();
}
+ void IndexChanges::dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
+ if (added.empty() ||
+ !idx.unique() ||
+ ignoreUniqueIndexes()) {
+ return;
+ }
+ const Ordering ordering = Ordering::make(idx.keyPattern());
+
+ // "E11001 duplicate key on update"
+ idx.idxInterface().uassertIfDups(idx, added, idx.head, curObjLoc, ordering);
+ }
}
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
index 4942af17daf..b4e093da12b 100644
--- a/src/mongo/db/index.h
+++ b/src/mongo/db/index.h
@@ -249,12 +249,7 @@ namespace mongo {
/** @curObjLoc - the object we want to add's location. if it is already in the
index, that is allowed here (for bg indexing case).
*/
- void dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
- if( added.empty() || !idx.unique() )
- return;
- const Ordering ordering = Ordering::make(idx.keyPattern());
- idx.idxInterface().uassertIfDups(idx, added, idx.head, curObjLoc, ordering); // "E11001 duplicate key on update"
- }
+ void dupCheck(IndexDetails& idx, DiskLoc curObjLoc);
};
class NamespaceDetails;
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index fcb36f3ba46..b4471230fc1 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -28,6 +28,7 @@
#include "mongo/db/namespace_details.h"
#include "mongo/db/pdfile_private.h"
#include "mongo/db/replutil.h"
+#include "mongo/db/repl/rs.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/startup_test.h"
@@ -126,7 +127,7 @@ namespace mongo {
BSONObjSet keys;
for ( int i = 0; i < n; i++ ) {
// this call throws on unique constraint violation. we haven't done any writes yet so that is fine.
- fetchIndexInserters(/*out*/keys, inserter, d, i, obj, loc);
+ fetchIndexInserters(/*out*/keys, inserter, d, i, obj, loc, ignoreUniqueIndexes());
if( keys.size() > 1 ) {
multi.push_back(i);
multiKeys.push_back(BSONObjSet());
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 64e86613296..3c64200f398 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -1451,7 +1451,10 @@ namespace mongo {
// If the collection is capped, check if the new object will violate a unique index
// constraint before allocating space.
- if ( d->nIndexes && d->isCapped() && !god ) {
+ if (d->nIndexes &&
+ d->isCapped() &&
+ !god &&
+ !ignoreUniqueIndexes()) {
checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
}
diff --git a/src/mongo/db/repl/rs.h b/src/mongo/db/repl/rs.h
index b23b491dd50..6e45726ce88 100644
--- a/src/mongo/db/repl/rs.h
+++ b/src/mongo/db/repl/rs.h
@@ -670,4 +670,16 @@ namespace mongo {
_hbinfo.health = 1.0;
}
+ inline bool ignoreUniqueIndexes() {
+ if (theReplSet) {
+ // see SERVER-6671
+ MemberState ms = theReplSet->state();
+ if ((ms == MemberState::RS_STARTUP2) ||
+ (ms == MemberState::RS_RECOVERING)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
}
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index ce1d61656e7..92b21c7ad85 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -145,13 +145,6 @@ namespace replset {
}
}
catch (DBException& e) {
- // Skip duplicate key exceptions.
- // These are relatively common on initial sync: if a document is inserted
- // early in the clone step, the insert will be replayed but the document
- // will probably already have been cloned over.
- if( e.getCode() == 11000 || e.getCode() == 11001 || e.getCode() == 12582) {
- return; // ignore
- }
error() << "exception: " << e.what() << " on: " << it->toString() << endl;
fassertFailed(16361);
}