summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2018-02-05 11:26:04 -0500
committerLouis Williams <louis.williams@mongodb.com>2018-02-12 10:42:48 -0500
commit4b2f7438ec3494cdf5f4f996bc1ac955a0698f40 (patch)
tree89e81377db85bc90f55b2029e258a04ce721a753
parentb7a81ef8a47b6f375d014c15382646947a8f8db4 (diff)
downloadmongo-4b2f7438ec3494cdf5f4f996bc1ac955a0698f40.tar.gz
SERVER-28594 non-atomic applyOps should log each operation individually
(cherry picked from commit 975804ed16ed446e32e7e73643188c9276686311)
-rw-r--r--jstests/noPassthrough/non_atomic_apply_ops_logging.js62
-rw-r--r--jstests/replsets/libs/apply_ops_concurrent_non_atomic.js9
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp266
-rw-r--r--src/mongo/db/repl/oplog.cpp107
-rw-r--r--src/mongo/db/repl/oplog.h9
5 files changed, 265 insertions, 188 deletions
diff --git a/jstests/noPassthrough/non_atomic_apply_ops_logging.js b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
new file mode 100644
index 00000000000..f93e9d38189
--- /dev/null
+++ b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
@@ -0,0 +1,62 @@
+// SERVER-28594 Ensure non-atomic ops are individually logged in applyOps
+// and atomic ops are collectively logged in applyOps.
+(function() {
+ "use strict";
+
+ let rst = new ReplSetTest({nodes: 1});
+ rst.startSet();
+ rst.initiate();
+
+ let primary = rst.getPrimary();
+ let testDB = primary.getDB("test");
+ let oplogColl = primary.getDB("local").oplog.rs;
+ let testCollName = "testColl";
+ let rerenamedCollName = "rerenamedColl";
+
+ testDB.runCommand({drop: testCollName});
+ testDB.runCommand({drop: rerenamedCollName});
+ assert.commandWorked(testDB.runCommand({create: testCollName}));
+ let testColl = testDB[testCollName];
+
+ // Ensure atomic apply ops logging only produces one oplog entry
+ // per call to apply ops and does not log individual operations
+ // separately.
+ assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {op: "i", ns: testColl.getFullName(), o: {_id: 1, a: "foo"}},
+ {op: "i", ns: testColl.getFullName(), o: {_id: 2, a: "bar"}}
+ ]
+ }));
+ assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+ assert.eq(oplogColl.find({"op": "i"}).count(), 0);
+ // Ensure non-atomic apply ops logging produces an oplog entry for
+ // each operation in the apply ops call and no record of applyOps
+ // appears for these operations.
+ assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.testColl",
+ to: "test.renamedColl",
+ stayTemp: false,
+ dropTarget: false
+ }
+ },
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.renamedColl",
+ to: "test." + rerenamedCollName,
+ stayTemp: false,
+ dropTarget: false
+ }
+ }
+ ]
+ }));
+ assert.eq(oplogColl.find({"o.renameCollection": {"$exists": true}}).count(), 2);
+ assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+ rst.stopSet();
+})();
diff --git a/jstests/replsets/libs/apply_ops_concurrent_non_atomic.js b/jstests/replsets/libs/apply_ops_concurrent_non_atomic.js
index 7e36cfa6677..5e170378de2 100644
--- a/jstests/replsets/libs/apply_ops_concurrent_non_atomic.js
+++ b/jstests/replsets/libs/apply_ops_concurrent_non_atomic.js
@@ -127,14 +127,11 @@ var ApplyOpsConcurrentNonAtomicTest = function(options) {
/**
* Returns number of insert operations reported by serverStatus.
- * Depending on the server version, applyOps may increment either 'opcounters' or
- * 'opcountersRepl':
- * since 3.6: 'opcounters.insert'
- * 3.4 and older: 'opcountersRepl.insert'
+ * In 3.4 'opcountersRepl', not 'opcounters' was previously the correct field. Now non-atomic
+ * ops are now replicated as they are applied and are counted toward the global op counter.
*/
function getInsertOpCount(serverStatus) {
- return (serverStatus.version.substr(0, 3) === "3.4") ? serverStatus.opcountersRepl.insert
- : serverStatus.opcounters.insert;
+ return serverStatus.opcounters.insert;
}
/**
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index e380b5fb4c0..8b147f6855c 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/op_observer.h"
@@ -50,6 +51,7 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/service_context.h"
+#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -114,64 +116,86 @@ Status _applyOps(OperationContext* opCtx,
applyOpCmd.hasField("alwaysUpsert") ? applyOpCmd["alwaysUpsert"].trueValue() : true;
const bool haveWrappingWUOW = opCtx->lockState()->inAWriteUnitOfWork();
- {
- repl::UnreplicatedWritesBlock uwb(opCtx);
+ while (i.more()) {
+ BSONElement e = i.next();
+ const BSONObj& opObj = e.Obj();
- while (i.more()) {
- BSONElement e = i.next();
- const BSONObj& opObj = e.Obj();
+ // Ignore 'n' operations.
+ const char* opType = opObj["op"].valuestrsafe();
+ if (*opType == 'n')
+ continue;
- // Ignore 'n' operations.
- const char* opType = opObj["op"].valuestrsafe();
- if (*opType == 'n')
- continue;
+ const NamespaceString nss(opObj["ns"].String());
- const NamespaceString nss(opObj["ns"].String());
-
- // Need to check this here, or OldClientContext may fail an invariant.
- if (*opType != 'c' && !nss.isValid())
- return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
+ // Need to check this here, or OldClientContext may fail an invariant.
+ if (*opType != 'c' && !nss.isValid())
+ return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
- Status status(ErrorCodes::InternalError, "");
+ Status status(ErrorCodes::InternalError, "");
- if (haveWrappingWUOW) {
- invariant(opCtx->lockState()->isW());
- invariant(*opType != 'c');
+ if (haveWrappingWUOW) {
+ invariant(opCtx->lockState()->isW());
+ invariant(*opType != 'c');
+ auto db = dbHolder().get(opCtx, nss.ns());
+ if (!db) {
+ throw DBException(
+ "cannot create a database in atomic applyOps mode; will retry without "
+ "atomicity",
+ ErrorCodes::NamespaceNotFound);
+ }
- auto db = dbHolder().get(opCtx, nss.ns());
- if (!db) {
- throw DBException(
- "cannot create a database in atomic applyOps mode; will retry without "
- "atomicity",
- ErrorCodes::NamespaceNotFound);
- }
+ // When processing an update on a non-existent collection, applyOperation_inlock()
+ // returns UpdateOperationFailed on updates and allows the collection to be
+ // implicitly created on upserts. We detect both cases here and fail early with
+ // NamespaceNotFound.
+ auto collection = db->getCollection(nss);
+ if (!collection && !nss.isSystemDotIndexes() && (*opType == 'i' || *opType == 'u')) {
+ throw DBException(str::stream() << "cannot apply insert or update operation on "
+ "a non-existent namespace "
+ << nss.ns()
+ << ": "
+ << redact(opObj),
+ ErrorCodes::NamespaceNotFound);
+ }
- // When processing an update on a non-existent collection, applyOperation_inlock()
- // returns UpdateOperationFailed on updates and allows the collection to be
- // implicitly created on upserts. We detect both cases here and fail early with
- // NamespaceNotFound.
- auto collection = db->getCollection(nss);
- if (!collection && !nss.isSystemDotIndexes() &&
- (*opType == 'i' || *opType == 'u')) {
- throw DBException(str::stream() << "cannot apply insert or update operation on "
- "a non-existent namespace "
- << nss.ns()
- << ": "
- << redact(opObj),
- ErrorCodes::NamespaceNotFound);
- }
+ OldClientContext ctx(opCtx, nss.ns());
+ status = repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
+ if (!status.isOK())
+ return status;
+ logOpForDbHash(opCtx, nss.ns().c_str());
+ } else {
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ if (*opType == 'c') {
+ invariant(opCtx->lockState()->isW());
+ status = repl::applyCommand_inlock(opCtx, opObj, true);
+ } else {
+ const char* names[] = {"o", "ns"};
+ BSONElement fields[2];
+ opObj.getFields(2, names, fields);
+ BSONElement& fieldO = fields[0];
+ BSONElement& fieldNs = fields[1];
+ const StringData ns = fieldNs.valueStringData();
+ NamespaceString requestNss{ns};
+
+ if (nss.isSystemDotIndexes()) {
+ BSONObj indexSpec;
+ NamespaceString indexNss;
+ std::tie(indexSpec, indexNss) =
+ repl::prepForApplyOpsIndexInsert(fieldO, opObj, requestNss);
+ BSONObjBuilder command;
+ command.append("createIndexes", indexNss.coll());
+ {
+ BSONArrayBuilder indexes(command.subarrayStart("indexes"));
+ indexes.append(indexSpec);
+ indexes.doneFast();
+ }
+ const BSONObj commandObj = command.done();
- OldClientContext ctx(opCtx, nss.ns());
- status = repl::applyOperation_inlock(opCtx, ctx.db(), opObj, alwaysUpsert);
- if (!status.isOK())
- return status;
- logOpForDbHash(opCtx, nss.ns().c_str());
- } else {
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (*opType == 'c') {
- invariant(opCtx->lockState()->isW());
- status = repl::applyCommand_inlock(opCtx, opObj, true);
+ DBDirectClient client(opCtx);
+ BSONObj infoObj;
+ client.runCommand(nsToDatabase(ns), commandObj, infoObj);
+ status = getStatusFromCommandResult(infoObj);
} else {
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
if (!autoColl.getCollection() && !nss.isSystemDotIndexes()) {
@@ -190,100 +214,53 @@ Status _applyOps(OperationContext* opCtx,
}
} else {
OldClientContext ctx(opCtx, nss.ns());
-
status = repl::applyOperation_inlock(
opCtx, ctx.db(), opObj, alwaysUpsert);
}
}
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", nss.ns());
- } catch (const DBException& ex) {
- ab.append(false);
- result->append("applied", ++(*numApplied));
- result->append("code", ex.getCode());
- result->append("codeName",
- ErrorCodes::errorString(ErrorCodes::fromInt(ex.getCode())));
- result->append("errmsg", ex.what());
- result->append("results", ab.arr());
- return Status(ErrorCodes::UnknownError, ex.what());
- }
- WriteUnitOfWork wuow(opCtx);
- logOpForDbHash(opCtx, nss.ns().c_str());
- wuow.commit();
- }
-
- ab.append(status.isOK());
- if (!status.isOK()) {
- log() << "applyOps error applying: " << status;
- errors++;
- }
-
- (*numApplied)++;
-
- if (MONGO_FAIL_POINT(applyOpsPauseBetweenOperations)) {
- // While holding a database lock under MMAPv1, we would be implicitly holding the
- // flush lock here. This would prevent other threads from acquiring the global
- // lock or any database locks. We release all locks temporarily while the fail
- // point is enabled to allow other threads to make progress.
- boost::optional<Lock::TempRelease> release;
- auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
- if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) {
- release.emplace(opCtx->lockState());
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(applyOpsPauseBetweenOperations);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "applyOps", nss.ns());
+ } catch (const DBException& ex) {
+ ab.append(false);
+ result->append("applied", ++(*numApplied));
+ result->append("code", ex.getCode());
+ result->append("codeName",
+ ErrorCodes::errorString(ErrorCodes::fromInt(ex.getCode())));
+ result->append("errmsg", ex.what());
+ result->append("results", ab.arr());
+ return Status(ErrorCodes::UnknownError, ex.what());
}
+ WriteUnitOfWork wuow(opCtx);
+ logOpForDbHash(opCtx, nss.ns().c_str());
+ wuow.commit();
}
- result->append("applied", *numApplied);
- result->append("results", ab.arr());
- } // set replicatedWrites back to original value
-
- if (opCtx->writesAreReplicated()) {
- // We want this applied atomically on slaves
- // so we re-wrap without the pre-condition for speed
-
- std::string tempNS = str::stream() << dbName << ".$cmd";
-
- // TODO: possibly use mutable BSON to remove preCondition field
- // once it is available
- BSONObjBuilder cmdBuilder;
-
- for (auto elem : applyOpCmd) {
- auto name = elem.fieldNameStringData();
- if (name == kPreconditionFieldName)
- continue;
- if (name == "bypassDocumentValidation")
- continue;
- cmdBuilder.append(elem);
+ ab.append(status.isOK());
+ if (!status.isOK()) {
+ log() << "applyOps error applying: " << status;
+ errors++;
}
- const BSONObj cmdRewritten = cmdBuilder.done();
-
- auto opObserver = getGlobalServiceContext()->getOpObserver();
- invariant(opObserver);
- if (haveWrappingWUOW) {
- opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
- } else {
- // When executing applyOps outside of a wrapping WriteUnitOfWOrk, always logOp the
- // command regardless of whether the individial ops succeeded and rely on any
- // failures to also on secondaries. This isn't perfect, but it's what the command
- // has always done and is part of its "correct" behavior.
- while (true) {
- try {
- WriteUnitOfWork wunit(opCtx);
- opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
-
- wunit.commit();
- break;
- } catch (const WriteConflictException& wce) {
- LOG(2) << "WriteConflictException while logging applyOps command, retrying.";
- opCtx->recoveryUnit()->abandonSnapshot();
- continue;
- }
+ (*numApplied)++;
+
+ if (MONGO_FAIL_POINT(applyOpsPauseBetweenOperations)) {
+ // While holding a database lock under MMAPv1, we would be implicitly holding the
+ // flush lock here. This would prevent other threads from acquiring the global
+ // lock or any database locks. We release all locks temporarily while the fail
+ // point is enabled to allow other threads to make progress.
+ boost::optional<Lock::TempRelease> release;
+ auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
+ if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) {
+ release.emplace(opCtx->lockState());
}
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET(applyOpsPauseBetweenOperations);
}
}
+ result->append("applied", *numApplied);
+ result->append("results", ab.arr());
+
if (errors != 0) {
return Status(ErrorCodes::UnknownError, "applyOps had one or more errors applying ops");
}
@@ -390,7 +367,36 @@ Status applyOps(OperationContext* opCtx,
BSONObjBuilder intermediateResult;
WriteUnitOfWork wunit(opCtx);
numApplied = 0;
- uassertStatusOK(_applyOps(opCtx, dbName, applyOpCmd, &intermediateResult, &numApplied));
+ {
+ // Suppress replication for atomic operations until end of applyOps.
+ repl::UnreplicatedWritesBlock uwb(opCtx);
+ uassertStatusOK(
+ _applyOps(opCtx, dbName, applyOpCmd, &intermediateResult, &numApplied));
+ }
+ // Generate oplog entry for all atomic ops collectively.
+ if (opCtx->writesAreReplicated()) {
+ // We want this applied atomically on slaves so we rewrite the oplog entry without
+ // the pre-condition for speed.
+
+ std::string tempNS = str::stream() << dbName << ".$cmd";
+
+ BSONObjBuilder cmdBuilder;
+
+ for (auto elem : applyOpCmd) {
+ auto name = elem.fieldNameStringData();
+ if (name == kPreconditionFieldName)
+ continue;
+ if (name == "bypassDocumentValidation")
+ continue;
+ cmdBuilder.append(elem);
+ }
+
+ const BSONObj cmdRewritten = cmdBuilder.done();
+
+ auto opObserver = getGlobalServiceContext()->getOpObserver();
+ invariant(opObserver);
+ opObserver->onApplyOps(opCtx, tempNS, cmdRewritten);
+ }
wunit.commit();
result->appendElements(intermediateResult.obj());
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index fb5270bf370..3ed5e01b7c1 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -668,6 +668,45 @@ std::map<std::string, ApplyOpMetadata> opsMap = {
} // namespace
+std::pair<BSONObj, NamespaceString> prepForApplyOpsIndexInsert(const BSONElement& fieldO,
+ const BSONObj& op,
+ const NamespaceString& requestNss) {
+ uassert(ErrorCodes::NoSuchKey,
+ str::stream() << "Missing expected index spec in field 'o': " << op,
+ !fieldO.eoo());
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected object for index spec in field 'o': " << op,
+ fieldO.isABSONObj());
+ BSONObj indexSpec = fieldO.embeddedObject();
+
+ std::string indexNs;
+ uassertStatusOK(bsonExtractStringField(indexSpec, "ns", &indexNs));
+ const NamespaceString indexNss(indexNs);
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid namespace in index spec: " << op,
+ indexNss.isValid());
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Database name mismatch for database (" << requestNss.db()
+ << ") while creating index: "
+ << op,
+ requestNss.db() == indexNss.db());
+
+ if (!indexSpec["v"]) {
+ // If the "v" field isn't present in the index specification, then we assume it is a
+ // v=1 index from an older version of MongoDB. This is because
+ // (1) we haven't built v=0 indexes as the default for a long time, and
+ // (2) the index version has been included in the corresponding oplog entry since
+ // v=2 indexes were introduced.
+ BSONObjBuilder bob;
+
+ bob.append("v", static_cast<int>(IndexVersion::kV1));
+ bob.appendElements(indexSpec);
+
+ indexSpec = bob.obj();
+ }
+
+ return std::make_pair(indexSpec, indexNss);
+}
// @return failure status if an update should have happened and the document DNE.
// See replset initial sync code.
Status applyOperation_inlock(OperationContext* txn,
@@ -693,6 +732,7 @@ Status applyOperation_inlock(OperationContext* txn,
o = fieldO.embeddedObject();
const StringData ns = fieldNs.valueStringData();
+ NamespaceString requestNss{ns};
BSONObj o2;
if (fieldO2.isABSONObj())
@@ -723,27 +763,11 @@ Status applyOperation_inlock(OperationContext* txn,
invariant(*opType != 'c'); // commands are processed in applyCommand_inlock()
if (*opType == 'i') {
- if (nsToCollectionSubstring(ns) == "system.indexes") {
- uassert(ErrorCodes::NoSuchKey,
- str::stream() << "Missing expected index spec in field 'o': " << op,
- !fieldO.eoo());
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "Expected object for index spec in field 'o': " << op,
- fieldO.isABSONObj());
- BSONObj indexSpec = fieldO.embeddedObject();
-
- std::string indexNs;
- uassertStatusOK(bsonExtractStringField(indexSpec, "ns", &indexNs));
- const NamespaceString indexNss(indexNs);
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid namespace in index spec: " << op,
- indexNss.isValid());
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "Database name mismatch for database ("
- << nsToDatabaseSubstring(ns)
- << ") while creating index: "
- << op,
- nsToDatabaseSubstring(ns) == indexNss.db());
+ if (requestNss.isSystemDotIndexes()) {
+ BSONObj indexSpec;
+ NamespaceString indexNss;
+ std::tie(indexSpec, indexNss) =
+ repl::prepForApplyOpsIndexInsert(fieldO, op, requestNss);
// Check if collection exists.
auto indexCollection = db->getCollection(indexNss);
@@ -754,20 +778,6 @@ Status applyOperation_inlock(OperationContext* txn,
opCounters->gotInsert();
- if (!indexSpec["v"]) {
- // If the "v" field isn't present in the index specification, then we assume it is a
- // v=1 index from an older version of MongoDB. This is because
- // (1) we haven't built v=0 indexes as the default for a long time, and
- // (2) the index version has been included in the corresponding oplog entry since
- // v=2 indexes were introduced.
- BSONObjBuilder bob;
-
- bob.append("v", static_cast<int>(IndexVersion::kV1));
- bob.appendElements(indexSpec);
-
- indexSpec = bob.obj();
- }
-
bool relaxIndexConstraints =
ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(indexNss);
if (indexSpec["background"].trueValue()) {
@@ -868,13 +878,12 @@ Status applyOperation_inlock(OperationContext* txn,
BSONObjBuilder b;
b.append(o.getField("_id"));
- const NamespaceString requestNs(ns);
- UpdateRequest request(requestNs);
+ UpdateRequest request(requestNss);
request.setQuery(b.done());
request.setUpdates(o);
request.setUpsert();
- UpdateLifecycleImpl updateLifecycle(requestNs);
+ UpdateLifecycleImpl updateLifecycle(requestNss);
request.setLifecycle(&updateLifecycle);
UpdateResult res = update(txn, db, request);
@@ -899,13 +908,12 @@ Status applyOperation_inlock(OperationContext* txn,
str::stream() << "Failed to apply update due to missing _id: " << op.toString(),
updateCriteria.hasField("_id"));
- const NamespaceString requestNs(ns);
- UpdateRequest request(requestNs);
+ UpdateRequest request(requestNss);
request.setQuery(updateCriteria);
request.setUpdates(o);
request.setUpsert(upsert);
- UpdateLifecycleImpl updateLifecycle(requestNs);
+ UpdateLifecycleImpl updateLifecycle(requestNss);
request.setLifecycle(&updateLifecycle);
UpdateResult ur = update(txn, db, request);
@@ -959,7 +967,12 @@ Status applyOperation_inlock(OperationContext* txn,
o.hasField("_id"));
if (opType[1] == 0) {
- deleteObjects(txn, collection, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
+ deleteObjects(txn,
+ collection,
+ requestNss.ns().c_str(),
+ o,
+ PlanExecutor::YIELD_MANUAL,
+ /*justOne*/ valueB);
} else
verify(opType[1] == 'b'); // "db" advertisement
if (incrementOpsAppliedStats) {
@@ -975,16 +988,6 @@ Status applyOperation_inlock(OperationContext* txn,
14825, str::stream() << "error in applyOperation : unknown opType " << *opType);
}
- // AuthorizationManager's logOp method registers a RecoveryUnit::Change and to do so we need
- // to a new WriteUnitOfWork, if we dont have a wrapping unit of work already. If we already
- // have a wrapping WUOW, the extra nexting is harmless. The logOp really should have been
- // done in the WUOW that did the write, but this won't happen because applyOps turns off
- // observers.
- WriteUnitOfWork wuow(txn);
- getGlobalAuthorizationManager()->logOp(
- txn, opType, ns.toString().c_str(), o, fieldO2.isABSONObj() ? &o2 : NULL);
- wuow.commit();
-
return Status::OK();
}
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index b2078e93d28..f51aadb8dcf 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -104,6 +104,15 @@ void oplogCheckCloseDatabase(OperationContext* txn, Database* db);
using IncrementOpsAppliedStatsFn = stdx::function<void()>;
/**
+ * Take the object field of a BSONObj, the BSONObj, and the namespace of
+ * the operation and perform necessary validation to ensure the BSONObj is a
+ * properly-formed command to insert into system.indexes. This is only to
+ * be used for insert operations into system.indexes. It is called via applyOps.
+ */
+std::pair<BSONObj, NamespaceString> prepForApplyOpsIndexInsert(const BSONElement& fieldO,
+ const BSONObj& op,
+ const NamespaceString& requestNss);
+/**
* Take a non-command op and apply it locally
* Used for applying from an oplog
* @param inSteadyStateReplication convert some updates to upserts for idempotency reasons