From 3c2b2dbf124ff5c891f41ccc5a81eb340d32e4b0 Mon Sep 17 00:00:00 2001 From: Benety Goh Date: Sat, 5 Aug 2017 19:11:27 -0400 Subject: SERVER-29802 add applyOpsPauseBetweenOperations fail point (cherry picked from commit 948776e76550e4ddf4a3d0f50b729f15bef0a97b) --- src/mongo/db/catalog/apply_ops.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp index 9118c2b6d18..92c2be22772 100644 --- a/src/mongo/db/catalog/apply_ops.cpp +++ b/src/mongo/db/catalog/apply_ops.cpp @@ -50,10 +50,15 @@ #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator_global.h" #include "mongo/db/service_context.h" +#include "mongo/util/fail_point_service.h" #include "mongo/util/log.h" namespace mongo { namespace { + +// If enabled, causes loop in _applyOps() to hang after applying current operation. +MONGO_FP_DECLARE(applyOpsPauseBetweenOperations); + /** * Return true iff the applyOpsCmd can be executed in a single WriteUnitOfWork. */ @@ -198,6 +203,19 @@ Status _applyOps(OperationContext* opCtx, } (*numApplied)++; + + if (MONGO_FAIL_POINT(applyOpsPauseBetweenOperations)) { + // While holding a database lock under MMAPv1, we would be implicitly holding the + // flush lock here. This would prevent other threads from acquiring the global + // lock or any database locks. We release all locks temporarily while the fail + // point is enabled to allow other threads to make progress. + boost::optional release; + auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); + if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) { + release.emplace(opCtx->lockState()); + } + MONGO_FAIL_POINT_PAUSE_WHILE_SET(applyOpsPauseBetweenOperations); + } } result->append("applied", *numApplied); -- cgit v1.2.1