summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-12-11 13:17:28 -0500
committerAndy Schwerin <schwerin@mongodb.com>2015-12-11 13:17:28 -0500
commita0fe6521116f44557523c37b2b99866d41985629 (patch)
treee7890d260401b0972a52bb9bf2311c74ffb46fe3 /src/mongo
parent0e0977a87c4ec30954b3071a9f437aaa7a8151e5 (diff)
downloadmongo-a0fe6521116f44557523c37b2b99866d41985629.tar.gz
Revert "SERVER-21382 In chunk migration out-of-range deletes on the donor shard."
This reverts commit 0e0977a87c4ec30954b3071a9f437aaa7a8151e5.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/cloner.cpp9
-rw-r--r--src/mongo/db/dbhelpers.cpp3
-rw-r--r--src/mongo/db/exec/delete.cpp17
-rw-r--r--src/mongo/db/repl/oplog.cpp40
-rw-r--r--src/mongo/db/repl/oplog.h18
-rw-r--r--src/mongo/s/d_migrate.cpp15
-rw-r--r--src/mongo/s/d_state.h5
7 files changed, 19 insertions, 88 deletions
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index fdabfbf85ac..dff8e496473 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -673,14 +673,7 @@ bool Cloner::go(OperationContext* txn,
c->deleteDocument(txn, *it, true, true, opts.logForRepl ? &id : NULL);
if (opts.logForRepl)
- repl::logDeleteOp(txn,
- c->ns().ns().c_str(),
- id,
- false, // fromMigrate
- false); // isInMigratingChunk; must be false as
- // this code path is only used during initial
- // sync, and a node cannot be donating a chunk
- // and doing an initial sync simultaneously.
+ repl::logOp(txn, "d", c->ns().ns().c_str(), id);
wunit.commit();
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index b99ebf1f9ff..21e4a049162 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -449,11 +449,10 @@ long long Helpers::removeRange(OperationContext* txn,
if (callback)
callback->goingToDelete(obj);
- const auto inMigratingRange = isInMigratingChunk(ns, obj);
BSONObj deletedId;
collection->deleteDocument(txn, rloc, false, false, &deletedId);
// The above throws on failure, and so is not logged
- repl::logDeleteOp(txn, ns.c_str(), deletedId, fromMigrate, inMigratingRange);
+ repl::logOp(txn, "d", ns.c_str(), deletedId, 0, 0, fromMigrate);
wuow.commit();
numDeleted++;
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index ac6a57e9076..ec0fcc009fb 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -39,7 +39,6 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/s/d_state.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -120,9 +119,6 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
}
}
- const bool inMigratingRange =
- isInMigratingChunk(_collection->ns().ns(), member->obj.value());
-
_ws->free(id);
BSONObj deletedDoc;
@@ -155,11 +151,14 @@ PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
log() << "Deleted object without id in collection " << _collection->ns()
<< ", not logging.";
} else {
- repl::logDeleteOp(_txn,
- _collection->ns().ns().c_str(),
- deletedDoc,
- _params.fromMigrate,
- inMigratingRange);
+ bool replJustOne = true;
+ repl::logOp(_txn,
+ "d",
+ _collection->ns().ns().c_str(),
+ deletedDoc,
+ 0,
+ &replJustOne,
+ _params.fromMigrate);
}
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 5e6e78418f1..1d131d0af1e 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -347,14 +347,13 @@ void logOpInitiate(OperationContext* txn, const BSONObj& obj) {
d delete / remove
u update
*/
-static void logOpInternal(OperationContext* txn,
- const char* opstr,
- const char* ns,
- const BSONObj& obj,
- BSONObj* patt,
- bool* b,
- bool fromMigrate,
- bool isDeleteInMigratingChunk) {
+void logOp(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* patt,
+ bool* b,
+ bool fromMigrate) {
if (getGlobalReplicationCoordinator()->isReplEnabled()) {
_logOp(txn, opstr, ns, 0, obj, patt, b, fromMigrate);
}
@@ -364,36 +363,13 @@ static void logOpInternal(OperationContext* txn,
// rollback-safe logOp listeners
//
getGlobalAuthorizationManager()->logOp(txn, opstr, ns, obj, patt, b);
- logOpForSharding(txn, opstr, ns, obj, patt, fromMigrate || !isDeleteInMigratingChunk);
+ logOpForSharding(txn, opstr, ns, obj, patt, fromMigrate);
logOpForDbHash(txn, ns);
if (strstr(ns, ".system.js")) {
Scope::storedFuncMod(txn);
}
}
-void logOp(OperationContext* txn,
- const char* opstr,
- const char* ns,
- const BSONObj& obj,
- BSONObj* patt,
- bool* b,
- bool fromMigrate) {
- if (MONGO_unlikely(opstr[0] == 'd' && opstr[1] == '\0')) {
- severe() << "logOp called with opstr == 'd'; use logDeleteOp instead";
- invariant(*opstr != 'd');
- }
- logOpInternal(txn, opstr, ns, obj, patt, b, fromMigrate, false);
-}
-
-void logDeleteOp(OperationContext* txn,
- const char* ns,
- const BSONObj& idDoc,
- bool fromMigrate,
- bool isInMigratingChunk) {
- bool justOne = true;
- logOpInternal(txn, "d", ns, idDoc, NULL, &justOne, fromMigrate, isInMigratingChunk);
-}
-
OpTime writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops) {
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
OpTime lastOptime;
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index 8f9038deaa1..f6f7bc3c82a 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -62,6 +62,7 @@ static const int OPLOG_VERSION = 2;
@param opstr
"i" insert
"u" update
+ "d" delete
"c" db cmd
"n" no-op
"db" declares presence of a database (ns is set to the db name + '.')
@@ -70,8 +71,6 @@ static const int OPLOG_VERSION = 2;
the object itself. In that case, we provide also 'fullObj' which is the
image of the object _after_ the mutation logged here was applied.
- Deletes are logged using logDeleteOp, below.
-
See _logOp() in oplog.cpp for more details.
*/
void logOp(OperationContext* txn,
@@ -82,21 +81,6 @@ void logOp(OperationContext* txn,
bool* b = NULL,
bool fromMigrate = false);
-/**
- * Log a single document delete to the local oplog.
- *
- * "ns" is the fully qualified collection name.
- * "idDoc" is a document containing the primary key (_id) for the deleted document.
- * "fromMigrate" is as in "logOp".
- * "isInMigratingChunk" should be set to the value that isInMigratingChunk() would have returned on
- * the deleted document, before it was deleted.
- */
-void logDeleteOp(OperationContext* txn,
- const char* ns,
- const BSONObj& idDoc,
- bool fromMigrate,
- bool isInMigratingChunk);
-
// Log an empty no-op operation to the local oplog
void logKeepalive(OperationContext* txn);
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 3c089f01593..8ec1b1e219c 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -388,17 +388,6 @@ public:
txn->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idObj, op));
}
- bool isInMigratingChunk(StringData ns, const BSONObj& doc) {
- dassert(txn->lockState()->isWriteLocked()); // Must have Global IX.
- if (!_active) {
- return false;
- }
- if (ns != _ns) {
- return false;
- }
- return isInRange(doc, _min, _max, _shardKeyPattern);
- }
-
/**
* Insert items from docIdList to a new array with the given fieldName in the given
* builder. If explode is true, the inserted object will be the full version of the
@@ -913,10 +902,6 @@ void logOpForSharding(OperationContext* txn,
migrateFromStatus.logOp(txn, opstr, ns, obj, patt, notInActiveChunk);
}
-bool isInMigratingChunk(StringData ns, const BSONObj& doc) {
- return migrateFromStatus.isInMigratingChunk(ns, doc);
-}
-
class TransferModsCommand : public ChunkCommandHelper {
public:
void help(stringstream& h) const {
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index 4843d900567..d798d17deb7 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -381,9 +381,4 @@ void logOpForSharding(OperationContext* txn,
const BSONObj& obj,
BSONObj* patt,
bool forMigrateCleanup);
-
-/**
- * Returns true if "doc" in collection "ns" is part of a chunk that is migrating from this node.
- */
-bool isInMigratingChunk(StringData ns, const BSONObj& doc);
}