summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-12-11 13:01:36 -0500
committerAndy Schwerin <schwerin@mongodb.com>2015-12-11 13:14:33 -0500
commit0e0977a87c4ec30954b3071a9f437aaa7a8151e5 (patch)
tree3360c2b714617564ffa26c9247530bb3689f1254
parent0546be88fea63554f847423d40ccaaa0b8902c84 (diff)
downloadmongo-0e0977a87c4ec30954b3071a9f437aaa7a8151e5.tar.gz
SERVER-21382 In chunk migration out-of-range deletes on the donor shard.
-rw-r--r--src/mongo/db/cloner.cpp9
-rw-r--r--src/mongo/db/dbhelpers.cpp3
-rw-r--r--src/mongo/db/exec/delete.cpp17
-rw-r--r--src/mongo/db/repl/oplog.cpp40
-rw-r--r--src/mongo/db/repl/oplog.h18
-rw-r--r--src/mongo/s/d_migrate.cpp15
-rw-r--r--src/mongo/s/d_state.h5
7 files changed, 88 insertions, 19 deletions
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index dff8e496473..fdabfbf85ac 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -673,7 +673,14 @@ bool Cloner::go(OperationContext* txn,
c->deleteDocument(txn, *it, true, true, opts.logForRepl ? &id : NULL);
if (opts.logForRepl)
- repl::logOp(txn, "d", c->ns().ns().c_str(), id);
+ repl::logDeleteOp(txn,
+ c->ns().ns().c_str(),
+ id,
+ false, // fromMigrate
+ false); // isInMigratingChunk; must be false as
+ // this code path is only used during initial
+ // sync, and a node cannot be donating a chunk
+ // and doing an initial sync simultaneously.
wunit.commit();
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 21e4a049162..b99ebf1f9ff 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -449,10 +449,11 @@ long long Helpers::removeRange(OperationContext* txn,
if (callback)
callback->goingToDelete(obj);
+ const auto inMigratingRange = isInMigratingChunk(ns, obj);
BSONObj deletedId;
collection->deleteDocument(txn, rloc, false, false, &deletedId);
// The above throws on failure, and so is not logged
- repl::logOp(txn, "d", ns.c_str(), deletedId, 0, 0, fromMigrate);
+ repl::logDeleteOp(txn, ns.c_str(), deletedId, fromMigrate, inMigratingRange);
wuow.commit();
numDeleted++;
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index ec0fcc009fb..ac6a57e9076 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/s/d_state.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -119,6 +120,9 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
}
}
+ const bool inMigratingRange =
+ isInMigratingChunk(_collection->ns().ns(), member->obj.value());
+
_ws->free(id);
BSONObj deletedDoc;
@@ -151,14 +155,11 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
log() << "Deleted object without id in collection " << _collection->ns()
<< ", not logging.";
} else {
- bool replJustOne = true;
- repl::logOp(_txn,
- "d",
- _collection->ns().ns().c_str(),
- deletedDoc,
- 0,
- &replJustOne,
- _params.fromMigrate);
+ repl::logDeleteOp(_txn,
+ _collection->ns().ns().c_str(),
+ deletedDoc,
+ _params.fromMigrate,
+ inMigratingRange);
}
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1d131d0af1e..5e6e78418f1 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -347,13 +347,14 @@ void logOpInitiate(OperationContext* txn, const BSONObj& obj) {
d delete / remove
u update
*/
-void logOp(OperationContext* txn,
- const char* opstr,
- const char* ns,
- const BSONObj& obj,
- BSONObj* patt,
- bool* b,
- bool fromMigrate) {
+static void logOpInternal(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* patt,
+ bool* b,
+ bool fromMigrate,
+ bool isDeleteInMigratingChunk) {
if (getGlobalReplicationCoordinator()->isReplEnabled()) {
_logOp(txn, opstr, ns, 0, obj, patt, b, fromMigrate);
}
@@ -363,13 +364,36 @@ void logOp(OperationContext* txn,
// rollback-safe logOp listeners
//
getGlobalAuthorizationManager()->logOp(txn, opstr, ns, obj, patt, b);
- logOpForSharding(txn, opstr, ns, obj, patt, fromMigrate);
+ logOpForSharding(txn, opstr, ns, obj, patt, fromMigrate || !isDeleteInMigratingChunk);
logOpForDbHash(txn, ns);
if (strstr(ns, ".system.js")) {
Scope::storedFuncMod(txn);
}
}
+void logOp(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* patt,
+ bool* b,
+ bool fromMigrate) {
+ if (MONGO_unlikely(opstr[0] == 'd' && opstr[1] == '\0')) {
+ severe() << "logOp called with opstr == 'd'; use logDeleteOp instead";
+ invariant(*opstr != 'd');
+ }
+ logOpInternal(txn, opstr, ns, obj, patt, b, fromMigrate, false);
+}
+
+void logDeleteOp(OperationContext* txn,
+ const char* ns,
+ const BSONObj& idDoc,
+ bool fromMigrate,
+ bool isInMigratingChunk) {
+ bool justOne = true;
+ logOpInternal(txn, "d", ns, idDoc, NULL, &justOne, fromMigrate, isInMigratingChunk);
+}
+
OpTime writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
OpTime lastOptime;
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index f6f7bc3c82a..8f9038deaa1 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -62,7 +62,6 @@ static const int OPLOG_VERSION = 2;
@param opstr
"i" insert
"u" update
- "d" delete
"c" db cmd
"n" no-op
"db" declares presence of a database (ns is set to the db name + '.')
@@ -71,6 +70,8 @@ static const int OPLOG_VERSION = 2;
the object itself. In that case, we provide also 'fullObj' which is the
image of the object _after_ the mutation logged here was applied.
+ Deletes are logged using logDeleteOp, below.
+
See _logOp() in oplog.cpp for more details.
*/
void logOp(OperationContext* txn,
@@ -81,6 +82,21 @@ void logOp(OperationContext* txn,
bool* b = NULL,
bool fromMigrate = false);
+/**
+ * Log a single document delete to the local oplog.
+ *
+ * "ns" is the fully qualified collection name.
+ * "idDoc" is a document containing the primary key (_id) for the deleted document.
+ * "fromMigrate" is as in "logOp".
+ * "isInMigratingChunk" should be set to the value that isInMigratingChunk() would have returned on
+ * the deleted document, before it was deleted.
+ */
+void logDeleteOp(OperationContext* txn,
+ const char* ns,
+ const BSONObj& idDoc,
+ bool fromMigrate,
+ bool isInMigratingChunk);
+
// Log an empty no-op operation to the local oplog
void logKeepalive(OperationContext* txn);
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 8ec1b1e219c..3c089f01593 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -388,6 +388,17 @@ public:
txn->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idObj, op));
}
+ bool isInMigratingChunk(StringData ns, const BSONObj& doc) {
+ dassert(txn->lockState()->isWriteLocked()); // Must have Global IX.
+ if (!_active) {
+ return false;
+ }
+ if (ns != _ns) {
+ return false;
+ }
+ return isInRange(doc, _min, _max, _shardKeyPattern);
+ }
+
/**
* Insert items from docIdList to a new array with the given fieldName in the given
* builder. If explode is true, the inserted object will be the full version of the
@@ -902,6 +913,10 @@ void logOpForSharding(OperationContext* txn,
migrateFromStatus.logOp(txn, opstr, ns, obj, patt, notInActiveChunk);
}
+bool isInMigratingChunk(StringData ns, const BSONObj& doc) {
+ return migrateFromStatus.isInMigratingChunk(ns, doc);
+}
+
class TransferModsCommand : public ChunkCommandHelper {
public:
void help(stringstream& h) const {
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index d798d17deb7..4843d900567 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -381,4 +381,9 @@ void logOpForSharding(OperationContext* txn,
const BSONObj& obj,
BSONObj* patt,
bool forMigrateCleanup);
+
+/**
+ * Returns true if "doc" in collection "ns" is part of a chunk that is migrating from this node.
+ */
+bool isInMigratingChunk(StringData ns, const BSONObj& doc);
}