summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMedha Potluri <medha.potluri@mongodb.com>2019-07-25 15:42:15 -0400
committerMedha Potluri <medha.potluri@mongodb.com>2019-07-26 16:58:12 -0400
commit9dd3058b8a07745cba090d27122619cb50acc7f3 (patch)
tree8e2791ff1990e1ff3409d673c27ec1b6b3880e95
parentb5a6ad7f0a9ca638c0c3c06ccf9a1620739efdb3 (diff)
downloadmongo-9dd3058b8a07745cba090d27122619cb50acc7f3.tar.gz
SERVER-41284 Add failpoint to surface idempotency issues of transaction operations in initial sync
(cherry picked from commit ce1e63a5a7487531f0f53ec1b5570fd49d222dec)
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp11
1 files changed, 11 insertions, 0 deletions
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 886805244ec..09127b6c6ba 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -107,6 +107,9 @@ MONGO_FAIL_POINT_DEFINE(failAndHangInitialSync);
// Failpoint which fails initial sync before it applies the next batch of oplog entries.
MONGO_FAIL_POINT_DEFINE(failInitialSyncBeforeApplyingBatch);
+// Failpoint which fasserts if applying a batch fails.
+MONGO_FAIL_POINT_DEFINE(initialSyncFassertIfApplyingBatchFails);
+
namespace {
using namespace executor;
using CallbackArgs = executor::TaskExecutor::CallbackArgs;
@@ -1266,6 +1269,14 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
stdx::lock_guard<stdx::mutex> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(multiApplierStatus, "error applying batch");
+
+ // Set to cause initial sync to fassert instead of restart if applying a batch fails, so that
+ // tests can be robust to network errors but not oplog idempotency errors.
+ if (MONGO_FAIL_POINT(initialSyncFassertIfApplyingBatchFails)) {
+ log() << "initialSyncFassertIfApplyingBatchFails fail point enabled.";
+ fassert(31210, status);
+ }
+
if (!status.isOK()) {
error() << "Failed to apply batch due to '" << redact(status) << "'";
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);