summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2019-09-10 19:03:36 +0000
committerevergreen <evergreen@mongodb.com>2019-09-10 19:03:36 +0000
commit5a0f718e1309a4484580d8038016d043ef3b887f (patch)
tree7a331d57ba6e33e61f96ed69b2aa387003a039ab /src
parentb9e29cd56ebc9aca06f68eeeda7c523d3dfd6d41 (diff)
downloadmongo-5a0f718e1309a4484580d8038016d043ef3b887f.tar.gz
SERVER-43119 FailPoint cleanup
- Don't use MONGO_INITIALIZER to declare each fail point. We only need one init task in total: freeze and iterate the registry. - remove MONGO_FAIL_POINT_DECLARE macro (extern) - remove MONGO_FAIL_POINT_SHOULD_FAIL macro (FailPoint::shouldFail) - remove MONGO_FAIL_POINT_BLOCK_IF (FailPoint::executeIf) - remove MONGO_FAIL_POINT_BLOCK (FailPoint::execute) - clean up FailPointRegistry and fail_point_service implementation.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/dbclient_connection.cpp4
-rw-r--r--src/mongo/client/replica_set_monitor.cpp13
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp4
-rw-r--r--src/mongo/db/catalog/collection_catalog_helper.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp78
-rw-r--r--src/mongo/db/catalog/collection_validation.cpp4
-rw-r--r--src/mongo/db/catalog/database_impl.cpp2
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp12
-rw-r--r--src/mongo/db/catalog/drop_database.cpp8
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp31
-rw-r--r--src/mongo/db/catalog/multi_index_block.h2
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp6
-rw-r--r--src/mongo/db/catalog/throttle_cursor.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp6
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands.cpp68
-rw-r--r--src/mongo/db/commands.h4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp14
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp5
-rw-r--r--src/mongo/db/commands/generic_servers.cpp14
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp21
-rw-r--r--src/mongo/db/commands/txn_cmds.cpp11
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp5
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.h5
-rw-r--r--src/mongo/db/curop.cpp4
-rw-r--r--src/mongo/db/curop_failpoint_helpers.cpp62
-rw-r--r--src/mongo/db/curop_failpoint_helpers.h2
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/exec/update_stage.cpp7
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp13
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp12
-rw-r--r--src/mongo/db/index_builds_coordinator.h6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp2
-rw-r--r--src/mongo/db/key_generator.cpp2
-rw-r--r--src/mongo/db/keys_collection_manager.cpp10
-rw-r--r--src/mongo/db/matcher/expression.h4
-rw-r--r--src/mongo/db/matcher/expression_expr.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp14
-rw-r--r--src/mongo/db/operation_context.cpp14
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp2
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp4
-rw-r--r--src/mongo/db/query/find.cpp11
-rw-r--r--src/mongo/db/query/find_common.h10
-rw-r--r--src/mongo/db/query/plan_executor_impl.cpp9
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp27
-rw-r--r--src/mongo/db/repair_database_and_check_version.cpp4
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/bgsync.cpp8
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp64
-rw-r--r--src/mongo/db/repl/database_cloner.cpp29
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp42
-rw-r--r--src/mongo/db/repl/initial_syncer.h6
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp4
-rw-r--r--src/mongo/db/repl/oplog.cpp10
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp33
-rw-r--r--src/mongo/db/repl/oplog_fetcher.h2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp14
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/replication_info.cpp2
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.h6
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp5
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp17
-rw-r--r--src/mongo/db/repl/sync_tail.cpp21
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp6
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp7
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp20
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp42
-rw-r--r--src/mongo/db/s/metadata_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp20
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp8
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp12
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp4
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp5
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection.cpp5
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp9
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp8
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp33
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp13
-rw-r--r--src/mongo/db/server_options_helpers.cpp2
-rw-r--r--src/mongo/db/service_entry_point_common.cpp41
-rw-r--r--src/mongo/db/service_entry_point_common.h6
-rw-r--r--src/mongo/db/snapshot_window_util.cpp4
-rw-r--r--src/mongo/db/storage/flow_control.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h18
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp4
-rw-r--r--src/mongo/db/storage/write_unit_of_work.cpp2
-rw-r--r--src/mongo/db/transaction_participant.cpp24
-rw-r--r--src/mongo/db/transaction_participant_test.cpp4
-rw-r--r--src/mongo/db/ttl.cpp4
-rw-r--r--src/mongo/db/write_concern.cpp2
-rw-r--r--src/mongo/executor/network_interface.h4
-rw-r--r--src/mongo/executor/network_interface_tl.cpp4
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp6
-rw-r--r--src/mongo/executor/scoped_task_executor.h6
-rw-r--r--src/mongo/executor/scoped_task_executor_test.cpp2
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp10
-rw-r--r--src/mongo/executor/thread_pool_task_executor.h4
-rw-r--r--src/mongo/executor/thread_pool_task_executor_test.cpp12
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp5
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp2
-rw-r--r--src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp5
-rw-r--r--src/mongo/s/commands/commands_public.cpp2
-rw-r--r--src/mongo/s/commands/document_shard_key_update_util.cpp4
-rw-r--r--src/mongo/s/commands/strategy.cpp22
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp4
-rw-r--r--src/mongo/s/query/cluster_find.cpp6
-rw-r--r--src/mongo/s/server.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp4
-rw-r--r--src/mongo/scripting/engine.cpp2
-rw-r--r--src/mongo/transport/session_asio.h4
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp2
-rw-r--r--src/mongo/transport/transport_layer_asio.h4
-rw-r--r--src/mongo/util/diagnostic_info.cpp2
-rw-r--r--src/mongo/util/diagnostic_info.h2
-rw-r--r--src/mongo/util/fail_point.cpp28
-rw-r--r--src/mongo/util/fail_point.h309
-rw-r--r--src/mongo/util/fail_point_registry.cpp43
-rw-r--r--src/mongo/util/fail_point_registry.h6
-rw-r--r--src/mongo/util/fail_point_service.cpp35
-rw-r--r--src/mongo/util/fail_point_service.h28
-rw-r--r--src/mongo/util/fail_point_test.cpp74
-rw-r--r--src/mongo/util/net/sock.cpp6
-rw-r--r--src/mongo/util/net/sock_test.cpp2
-rw-r--r--src/mongo/util/net/ssl/detail/io.hpp4
150 files changed, 919 insertions, 978 deletions
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index bc65eb35938..0d058396c0f 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -577,7 +577,7 @@ void DBClientConnection::say(Message& toSend, bool isRetry, string* actualServer
toSend.header().setId(nextMessageId());
toSend.header().setResponseToMsgId(0);
- if (!MONGO_FAIL_POINT(dbClientConnectionDisableChecksum)) {
+ if (!MONGO_unlikely(dbClientConnectionDisableChecksum.shouldFail())) {
#ifdef MONGO_CONFIG_SSL
if (!SSLPeerInfo::forSession(_session).isTLS) {
OpMsg::appendChecksum(&toSend);
@@ -627,7 +627,7 @@ bool DBClientConnection::call(Message& toSend,
toSend.header().setId(nextMessageId());
toSend.header().setResponseToMsgId(0);
- if (!MONGO_FAIL_POINT(dbClientConnectionDisableChecksum)) {
+ if (!MONGO_unlikely(dbClientConnectionDisableChecksum.shouldFail())) {
#ifdef MONGO_CONFIG_SSL
if (!SSLPeerInfo::forSession(_session).isTLS) {
OpMsg::appendChecksum(&toSend);
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index 77d0788466a..fd79717719c 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -180,13 +180,12 @@ const Seconds ReplicaSetMonitor::kDefaultFindHostTimeout(15);
bool ReplicaSetMonitor::useDeterministicHostSelection = false;
Seconds ReplicaSetMonitor::getDefaultRefreshPeriod() {
- MONGO_FAIL_POINT_BLOCK_IF(modifyReplicaSetMonitorDefaultRefreshPeriod,
- data,
- [&](const BSONObj& data) { return data.hasField("period"); }) {
- return Seconds{data.getData().getIntField("period")};
- }
-
- return kDefaultRefreshPeriod;
+ Seconds r = kDefaultRefreshPeriod;
+ static constexpr auto kPeriodField = "period"_sd;
+ modifyReplicaSetMonitorDefaultRefreshPeriod.executeIf(
+ [&r](const BSONObj& data) { r = Seconds{data.getIntField(kPeriodField)}; },
+ [](const BSONObj& data) { return data.hasField(kPeriodField); });
+ return r;
}
ReplicaSetMonitor::ReplicaSetMonitor(const SetStatePtr& initialState) : _state(initialState) {}
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index a1f87acd21e..699acb8d407 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -267,7 +267,7 @@ Status _collModInternal(OperationContext* opCtx,
Database* const db = autoDb.getDb();
Collection* coll = db ? db->getCollection(opCtx, nss) : nullptr;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterDatabaseLock);
+ hangAfterDatabaseLock.pauseWhileSet();
// May also modify a view instead of a collection.
boost::optional<ViewDefinition> view;
@@ -370,7 +370,7 @@ Status _collModInternal(OperationContext* opCtx,
opCtx->recoveryUnit()->registerChange(
std::make_unique<CollModResultChange>(oldExpireSecs, newExpireSecs, result));
- if (MONGO_FAIL_POINT(assertAfterIndexUpdate)) {
+ if (MONGO_unlikely(assertAfterIndexUpdate.shouldFail())) {
log() << "collMod - assertAfterIndexUpdate fail point enabled.";
uasserted(50970, "trigger rollback after the index update");
}
diff --git a/src/mongo/db/catalog/collection_catalog_helper.cpp b/src/mongo/db/catalog/collection_catalog_helper.cpp
index 620a9a2a08a..1085d7edb51 100644
--- a/src/mongo/db/catalog/collection_catalog_helper.cpp
+++ b/src/mongo/db/catalog/collection_catalog_helper.cpp
@@ -76,7 +76,7 @@ void forEachCollectionFromDb(OperationContext* opCtx,
if (!callback(collection))
break;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeGettingNextCollection);
+ hangBeforeGettingNextCollection.pauseWhileSet();
}
}
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 44cc050ce07..dae155bdb28 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -104,19 +104,21 @@ MONGO_FAIL_POINT_DEFINE(failAfterBulkLoadDocInsert);
* Otherwise, the function should fail and return early with the error Status.
*/
Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSONObj& firstDoc) {
- MONGO_FAIL_POINT_BLOCK(failCollectionInserts, extraData) {
- const BSONObj& data = extraData.getData();
- const auto collElem = data["collectionNS"];
- // If the failpoint specifies no collection or matches the existing one, fail.
- if (!collElem || ns.ns() == collElem.str()) {
+ Status s = Status::OK();
+ failCollectionInserts.executeIf(
+ [&](const BSONObj& data) {
const std::string msg = str::stream()
<< "Failpoint (failCollectionInserts) has been enabled (" << data
<< "), so rejecting insert (first doc): " << firstDoc;
log() << msg;
- return {ErrorCodes::FailPointEnabled, msg};
- }
- }
- return Status::OK();
+ s = {ErrorCodes::FailPointEnabled, msg};
+ },
+ [&](const BSONObj& data) {
+ // If the failpoint specifies no collection or matches the existing one, fail.
+ const auto collElem = data["collectionNS"];
+ return !collElem || ns.ns() == collElem.str();
+ });
+ return s;
}
// Uses the collator factory to convert the BSON representation of a collator to a
@@ -186,10 +188,6 @@ StatusWith<CollectionImpl::ValidationAction> _parseValidationAction(StringData n
} // namespace
-using std::string;
-using std::unique_ptr;
-using std::vector;
-
CollectionImpl::CollectionImpl(OperationContext* opCtx,
const NamespaceString& nss,
UUID uuid,
@@ -361,8 +359,8 @@ Status CollectionImpl::insertDocumentsForOplog(OperationContext* opCtx,
Status CollectionImpl::insertDocuments(OperationContext* opCtx,
- const vector<InsertStatement>::const_iterator begin,
- const vector<InsertStatement>::const_iterator end,
+ const std::vector<InsertStatement>::const_iterator begin,
+ const std::vector<InsertStatement>::const_iterator end,
OpDebug* opDebug,
bool fromMigrate) {
@@ -401,25 +399,27 @@ Status CollectionImpl::insertDocuments(OperationContext* opCtx,
opCtx->recoveryUnit()->onCommit(
[this](boost::optional<Timestamp>) { notifyCappedWaitersIfNeeded(); });
- MONGO_FAIL_POINT_BLOCK(hangAfterCollectionInserts, extraData) {
- const BSONObj& data = extraData.getData();
- const auto collElem = data["collectionNS"];
- const auto firstIdElem = data["first_id"];
- // If the failpoint specifies no collection or matches the existing one, hang.
- if ((!collElem || _ns.ns() == collElem.str()) &&
- (!firstIdElem ||
- (begin != end && firstIdElem.type() == mongo::String &&
- begin->doc["_id"].str() == firstIdElem.str()))) {
- string whenFirst =
- firstIdElem ? (string(" when first _id is ") + firstIdElem.str()) : "";
- while (MONGO_FAIL_POINT(hangAfterCollectionInserts)) {
- log() << "hangAfterCollectionInserts fail point enabled for " << _ns << whenFirst
- << ". Blocking until fail point is disabled.";
- mongo::sleepsecs(1);
- opCtx->checkForInterrupt();
+ hangAfterCollectionInserts.executeIf(
+ [&](const BSONObj& data) {
+ const auto& firstIdElem = data["first_id"];
+ std::string whenFirst;
+ if (firstIdElem) {
+ whenFirst += " when first _id is ";
+ whenFirst += firstIdElem.str();
}
- }
- }
+ log() << "hangAfterCollectionInserts fail point enabled for " << _ns << whenFirst
+ << ". Blocking until fail point is disabled.";
+ hangAfterCollectionInserts.pauseWhileSet(opCtx);
+ },
+ [&](const BSONObj& data) {
+ const auto& collElem = data["collectionNS"];
+ const auto& firstIdElem = data["first_id"];
+ // If the failpoint specifies no collection or matches the existing one, hang.
+ return (!collElem || _ns.ns() == collElem.str()) &&
+ (!firstIdElem ||
+ (begin != end && firstIdElem.type() == mongo::String &&
+ begin->doc["_id"].str() == firstIdElem.str()));
+ });
return Status::OK();
}
@@ -428,7 +428,7 @@ Status CollectionImpl::insertDocument(OperationContext* opCtx,
const InsertStatement& docToInsert,
OpDebug* opDebug,
bool fromMigrate) {
- vector<InsertStatement> docs;
+ std::vector<InsertStatement> docs;
docs.push_back(docToInsert);
return insertDocuments(opCtx, docs.begin(), docs.end(), opDebug, fromMigrate);
}
@@ -459,13 +459,13 @@ Status CollectionImpl::insertDocumentForBulkLoader(OperationContext* opCtx,
status = onRecordInserted(loc.getValue());
- if (MONGO_FAIL_POINT(failAfterBulkLoadDocInsert)) {
+ if (MONGO_unlikely(failAfterBulkLoadDocInsert.shouldFail())) {
log() << "Failpoint failAfterBulkLoadDocInsert enabled for " << _ns.ns()
<< ". Throwing WriteConflictException.";
throw WriteConflictException();
}
- vector<InsertStatement> inserts;
+ std::vector<InsertStatement> inserts;
OplogSlot slot;
// Fetch a new optime now, if necessary.
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
@@ -485,8 +485,8 @@ Status CollectionImpl::insertDocumentForBulkLoader(OperationContext* opCtx,
}
Status CollectionImpl::_insertDocuments(OperationContext* opCtx,
- const vector<InsertStatement>::const_iterator begin,
- const vector<InsertStatement>::const_iterator end,
+ const std::vector<InsertStatement>::const_iterator begin,
+ const std::vector<InsertStatement>::const_iterator end,
OpDebug* opDebug) {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_IX));
@@ -785,7 +785,7 @@ Status CollectionImpl::truncate(OperationContext* opCtx) {
invariant(_indexCatalog->numIndexesInProgress(opCtx) == 0);
// 1) store index specs
- vector<BSONObj> indexSpecs;
+ std::vector<BSONObj> indexSpecs;
{
std::unique_ptr<IndexCatalog::IndexIterator> ii =
_indexCatalog->getIndexIterator(opCtx, false);
diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp
index bfd16f5e1a2..e7c0b469b71 100644
--- a/src/mongo/db/catalog/collection_validation.cpp
+++ b/src/mongo/db/catalog/collection_validation.cpp
@@ -565,11 +565,11 @@ Status validate(OperationContext* opCtx,
//
// Only useful for background validation because we hold an intent lock instead of an
// exclusive lock, and thus allow concurrent operations.
- if (MONGO_FAIL_POINT(pauseCollectionValidationWithLock)) {
+ if (MONGO_unlikely(pauseCollectionValidationWithLock.shouldFail())) {
invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IX));
_validationIsPausedForTest.store(true);
log() << "Failpoint 'pauseCollectionValidationWithLock' activated.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(pauseCollectionValidationWithLock);
+ pauseCollectionValidationWithLock.pauseWhileSet();
_validationIsPausedForTest.store(false);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index c916effda65..df1f9de3931 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -685,7 +685,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
}
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLoggingCreateCollection);
+ hangBeforeLoggingCreateCollection.pauseWhileSet();
opCtx->getServiceContext()->getOpObserver()->onCreateCollection(
opCtx, collection, nss, optionsWithUUID, fullIdIndexSpec, createOplogSlot);
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index b5f30ae2a3e..e3f9f9795ab 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -67,10 +67,10 @@ Status _dropView(OperationContext* opCtx,
// Operations all lock system.views in the end to prevent deadlock.
Lock::CollectionLock systemViewsLock(opCtx, db->getSystemViewsName(), MODE_X);
- if (MONGO_FAIL_POINT(hangDuringDropCollection)) {
+ if (MONGO_unlikely(hangDuringDropCollection.shouldFail())) {
log() << "hangDuringDropCollection fail point enabled. Blocking until fail point is "
"disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDuringDropCollection);
+ hangDuringDropCollection.pauseWhileSet();
}
AutoStatsTracker statsTracker(opCtx,
@@ -108,10 +108,10 @@ Status _dropCollection(OperationContext* opCtx,
return Status(ErrorCodes::NamespaceNotFound, "ns not found");
}
- if (MONGO_FAIL_POINT(hangDuringDropCollection)) {
+ if (MONGO_unlikely(hangDuringDropCollection.shouldFail())) {
log() << "hangDuringDropCollection fail point enabled. Blocking until fail point is "
"disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDuringDropCollection);
+ hangDuringDropCollection.pauseWhileSet();
}
AutoStatsTracker statsTracker(opCtx,
@@ -156,9 +156,9 @@ Status dropCollection(OperationContext* opCtx,
log() << "CMD: drop " << collectionName;
}
- if (MONGO_FAIL_POINT(hangDropCollectionBeforeLockAcquisition)) {
+ if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
log() << "Hanging drop collection before lock acquisition while fail point is set";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDropCollectionBeforeLockAcquisition);
+ hangDropCollectionBeforeLockAcquisition.pauseWhileSet();
}
return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] {
AutoGetDb autoDb(opCtx, collectionName.db(), MODE_IX);
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 60b507f6d9d..d81908b9d48 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -85,10 +85,10 @@ void _finishDropDatabase(OperationContext* opCtx,
log() << "dropDatabase " << dbName << " - dropped " << numCollections << " collection(s)";
log() << "dropDatabase " << dbName << " - finished";
- if (MONGO_FAIL_POINT(dropDatabaseHangBeforeLog)) {
+ if (MONGO_unlikely(dropDatabaseHangBeforeLog.shouldFail())) {
log() << "dropDatabase - fail point dropDatabaseHangBeforeLog enabled. "
"Blocking until fail point is disabled. ";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(dropDatabaseHangBeforeLog);
+ dropDatabaseHangBeforeLog.pauseWhileSet();
}
writeConflictRetry(opCtx, "dropDatabase_database", dbName, [&] {
@@ -292,10 +292,10 @@ Status dropDatabase(OperationContext* opCtx, const std::string& dbName) {
<< result.duration << ". dropping database";
}
- if (MONGO_FAIL_POINT(dropDatabaseHangAfterAllCollectionsDrop)) {
+ if (MONGO_unlikely(dropDatabaseHangAfterAllCollectionsDrop.shouldFail())) {
log() << "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop enabled. "
"Blocking until fail point is disabled. ";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(dropDatabaseHangAfterAllCollectionsDrop);
+ dropDatabaseHangAfterAllCollectionsDrop.pauseWhileSet();
}
AutoGetDb autoDB(opCtx, dbName, MODE_X);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 4b1f35ce1a6..164876fe330 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -503,7 +503,7 @@ Status validateIdIndexSpec(const BSONObj& indexSpec) {
* expected fields are present at creation time
*/
Status validateIndexSpecFieldNames(const BSONObj& indexSpec) {
- if (MONGO_FAIL_POINT(skipIndexCreateFieldNameValidation)) {
+ if (MONGO_unlikely(skipIndexCreateFieldNameValidation.shouldFail())) {
return Status::OK();
}
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index eb65fd51f11..1eb834bd5bb 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -415,13 +415,16 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(OperationContext* opCtx,
}
void failPointHangDuringBuild(FailPoint* fp, StringData where, const BSONObj& doc) {
- MONGO_FAIL_POINT_BLOCK(*fp, data) {
- int i = doc.getIntField("i");
- if (data.getData()["i"].numberInt() == i) {
+ fp->executeIf(
+ [&](const BSONObj& data) {
+ int i = doc.getIntField("i");
log() << "Hanging " << where << " index build of i=" << i;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET((*fp));
- }
- }
+ fp->pauseWhileSet();
+ },
+ [&](const BSONObj& data) {
+ int i = doc.getIntField("i");
+ return data["i"].numberInt() == i;
+ });
}
Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
@@ -451,16 +454,16 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
progress.set(CurOp::get(opCtx)->setProgress_inlock(curopMessage, numRecords));
}
- if (MONGO_FAIL_POINT(hangAfterSettingUpIndexBuild)) {
+ if (MONGO_unlikely(hangAfterSettingUpIndexBuild.shouldFail())) {
// Hang the build after the BackgroundOperation and curOP info is set up.
log() << "Hanging index build due to failpoint 'hangAfterSettingUpIndexBuild'";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterSettingUpIndexBuild);
+ hangAfterSettingUpIndexBuild.pauseWhileSet();
}
- if (MONGO_FAIL_POINT(hangAndThenFailIndexBuild)) {
+ if (MONGO_unlikely(hangAndThenFailIndexBuild.shouldFail())) {
// Hang the build after the BackgroundOperation and curOP info is set up.
log() << "Hanging index build due to failpoint 'hangAndThenFailIndexBuild'";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAndThenFailIndexBuild);
+ hangAndThenFailIndexBuild.pauseWhileSet();
return {ErrorCodes::InternalError,
"Failed index build because of failpoint 'hangAndThenFailIndexBuild'"};
}
@@ -494,7 +497,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
int retries = 0; // non-zero when retrying our last document.
while (retries ||
(PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex, &loc))) ||
- MONGO_FAIL_POINT(hangAfterStartingIndexBuild)) {
+ MONGO_unlikely(hangAfterStartingIndexBuild.shouldFail())) {
try {
auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK())
@@ -565,7 +568,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
return exec->getMemberObjectStatus(objToIndex.value());
}
- if (MONGO_FAIL_POINT(leaveIndexBuildUnfinishedForShutdown)) {
+ if (MONGO_unlikely(leaveIndexBuildUnfinishedForShutdown.shouldFail())) {
log() << "Index build interrupted due to 'leaveIndexBuildUnfinishedForShutdown' failpoint. "
"Mimicing shutdown error code.";
return Status(
@@ -573,14 +576,14 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(OperationContext* opCtx,
"background index build interrupted due to failpoint. returning a shutdown error.");
}
- if (MONGO_FAIL_POINT(hangAfterStartingIndexBuildUnlocked)) {
+ if (MONGO_unlikely(hangAfterStartingIndexBuildUnlocked.shouldFail())) {
// Unlock before hanging so replication recognizes we've completed.
Locker::LockSnapshot lockInfo;
invariant(opCtx->lockState()->saveLockStateAndUnlock(&lockInfo));
log() << "Hanging index build with no locks due to "
"'hangAfterStartingIndexBuildUnlocked' failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterStartingIndexBuildUnlocked);
+ hangAfterStartingIndexBuildUnlocked.pauseWhileSet();
if (isBackgroundBuilding()) {
opCtx->lockState()->restoreLockState(opCtx, lockInfo);
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 167b2fda9c3..69aa9cd4a34 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -51,7 +51,7 @@
namespace mongo {
-MONGO_FAIL_POINT_DECLARE(leaveIndexBuildUnfinishedForShutdown);
+extern FailPoint leaveIndexBuildUnfinishedForShutdown;
class Collection;
class MatchExpression;
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 428cef3df58..1fcd21bd593 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -102,7 +102,7 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
<< target);
// TODO: SERVER-42638 Replace checks of cm() with cm()->distributionMode() == sharded
- if (!MONGO_FAIL_POINT(useRenameCollectionPathThroughConfigsvr)) {
+ if (!MONGO_unlikely(useRenameCollectionPathThroughConfigsvr.shouldFail())) {
if (isCollectionSharded(opCtx, source))
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
}
@@ -470,7 +470,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
}
// TODO: SERVER-42638 Replace checks of cm() with cm()->distributionMode() == sharded
- if (!MONGO_FAIL_POINT(useRenameCollectionPathThroughConfigsvr)) {
+ if (!MONGO_unlikely(useRenameCollectionPathThroughConfigsvr.shouldFail())) {
if (isCollectionSharded(opCtx, source))
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
}
@@ -675,7 +675,7 @@ Status renameBetweenDBs(OperationContext* opCtx,
opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); });
});
// Used to make sure that a WCE can be handled by this logic without data loss.
- if (MONGO_FAIL_POINT(writeConflictInRenameCollCopyToTmp)) {
+ if (MONGO_unlikely(writeConflictInRenameCollCopyToTmp.shouldFail())) {
throw WriteConflictException();
}
wunit.commit();
diff --git a/src/mongo/db/catalog/throttle_cursor.cpp b/src/mongo/db/catalog/throttle_cursor.cpp
index 8e2fb2f9745..1cfd22c23f7 100644
--- a/src/mongo/db/catalog/throttle_cursor.cpp
+++ b/src/mongo/db/catalog/throttle_cursor.cpp
@@ -118,7 +118,7 @@ void DataThrottle::awaitIfNeeded(OperationContext* opCtx, const int64_t dataSize
_bytesProcessed = 0;
}
- _bytesProcessed += MONGO_FAIL_POINT(fixedCursorDataSizeOf512KBForDataThrottle)
+ _bytesProcessed += MONGO_unlikely(fixedCursorDataSizeOf512KBForDataThrottle.shouldFail())
? /*512KB*/ 1 * 1024 * 512
: dataSize;
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index a9b3b49ac28..ce29a2c495b 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -73,10 +73,8 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
_resolvedNss = CollectionCatalog::get(opCtx).resolveNamespaceStringOrUUID(nsOrUUID);
// Wait for a configured amount of time after acquiring locks if the failpoint is enabled
- MONGO_FAIL_POINT_BLOCK(setAutoGetCollectionWait, customWait) {
- const BSONObj& data = customWait.getData();
- sleepFor(Milliseconds(data["waitForMillis"].numberInt()));
- }
+ setAutoGetCollectionWait.execute(
+ [&](const BSONObj& data) { sleepFor(Milliseconds(data["waitForMillis"].numberInt())); });
Database* const db = _autoDb.getDb();
invariant(!nsOrUUID.uuid() || db,
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 2a9c6d70b24..30402ec2552 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -533,7 +533,7 @@ Status Cloner::createCollectionsForDb(
auto collCount = 0;
for (auto&& params : createCollectionParams) {
- if (MONGO_FAIL_POINT(movePrimaryFailPoint) && collCount > 0) {
+ if (MONGO_unlikely(movePrimaryFailPoint.shouldFail()) && collCount > 0) {
return Status(ErrorCodes::CommandFailed, "movePrimary failed due to failpoint");
}
collCount++;
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 031836713a8..7ca2cec1443 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -506,34 +506,35 @@ bool CommandHelpers::shouldActivateFailCommandFailPoint(const BSONObj& data,
void CommandHelpers::evaluateFailCommandFailPoint(OperationContext* opCtx,
StringData commandName,
const NamespaceString& nss) {
- bool closeConnection, hasErrorCode;
+ bool closeConnection;
+ bool hasErrorCode;
long long errorCode;
+ failCommand.executeIf(
+ [&](const BSONObj&) {
+ if (closeConnection) {
+ opCtx->getClient()->session()->end();
+ log() << "Failing command '" << commandName
+ << "' via 'failCommand' failpoint. Action: closing connection.";
+ uasserted(50985, "Failing command due to 'failCommand' failpoint");
+ }
- MONGO_FAIL_POINT_BLOCK_IF(failCommand, data, [&](const BSONObj& data) {
- closeConnection = data.hasField("closeConnection") &&
- bsonExtractBooleanField(data, "closeConnection", &closeConnection).isOK() &&
- closeConnection;
- hasErrorCode = data.hasField("errorCode") &&
- bsonExtractIntegerField(data, "errorCode", &errorCode).isOK();
-
- return shouldActivateFailCommandFailPoint(data, commandName, opCtx->getClient(), nss) &&
- (closeConnection || hasErrorCode);
- }) {
- if (closeConnection) {
- opCtx->getClient()->session()->end();
- log() << "Failing command '" << commandName
- << "' via 'failCommand' failpoint. Action: closing connection.";
- uasserted(50985, "Failing command due to 'failCommand' failpoint");
- }
-
- if (hasErrorCode) {
- log() << "Failing command '" << commandName
- << "' via 'failCommand' failpoint. Action: returning error code " << errorCode
- << ".";
- uasserted(ErrorCodes::Error(errorCode),
- "Failing command due to 'failCommand' failpoint");
- }
- }
+ if (hasErrorCode) {
+ log() << "Failing command '" << commandName
+ << "' via 'failCommand' failpoint. Action: returning error code " << errorCode
+ << ".";
+ uasserted(ErrorCodes::Error(errorCode),
+ "Failing command due to 'failCommand' failpoint");
+ }
+ },
+ [&](const BSONObj& data) {
+ closeConnection = data.hasField("closeConnection") &&
+ bsonExtractBooleanField(data, "closeConnection", &closeConnection).isOK() &&
+ closeConnection;
+ hasErrorCode = data.hasField("errorCode") &&
+ bsonExtractIntegerField(data, "errorCode", &errorCode).isOK();
+ return shouldActivateFailCommandFailPoint(data, commandName, opCtx->getClient(), nss) &&
+ (closeConnection || hasErrorCode);
+ });
}
void CommandHelpers::handleMarkKillOnClientDisconnect(OperationContext* opCtx,
@@ -546,16 +547,13 @@ void CommandHelpers::handleMarkKillOnClientDisconnect(OperationContext* opCtx,
opCtx->markKillOnClientDisconnect();
}
- MONGO_FAIL_POINT_BLOCK_IF(
- waitInCommandMarkKillOnClientDisconnect, options, [&](const BSONObj& obj) {
- const auto& clientMetadata =
+ waitInCommandMarkKillOnClientDisconnect.executeIf(
+ [&](const BSONObj&) { waitInCommandMarkKillOnClientDisconnect.pauseWhileSet(opCtx); },
+ [&](const BSONObj& obj) {
+ const auto& md =
ClientMetadataIsMasterState::get(opCtx->getClient()).getClientMetadata();
-
- return clientMetadata && (clientMetadata->getApplicationName() == obj["appName"].str());
- }) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- waitInCommandMarkKillOnClientDisconnect);
- }
+ return md && (md->getApplicationName() == obj["appName"].str());
+ });
}
//////////////////////////////////////////////////////////////
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index f355df6289c..e944f7c238f 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -54,8 +54,8 @@
namespace mongo {
-MONGO_FAIL_POINT_DECLARE(failCommand);
-MONGO_FAIL_POINT_DECLARE(waitInCommandMarkKillOnClientDisconnect);
+extern FailPoint failCommand;
+extern FailPoint waitInCommandMarkKillOnClientDisconnect;
class Command;
class CommandInvocation;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 36befb7255e..9ccb0c4af79 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -487,7 +487,7 @@ bool runCreateIndexes(OperationContext* opCtx,
// The 'indexer' can throw, so ensure the build cleanup occurs.
ON_BLOCK_EXIT([&] {
opCtx->recoveryUnit()->abandonSnapshot();
- if (MONGO_FAIL_POINT(leaveIndexBuildUnfinishedForShutdown)) {
+ if (MONGO_unlikely(leaveIndexBuildUnfinishedForShutdown.shouldFail())) {
// Set a flag to leave the persisted index build state intact when cleanUpAfterBuild()
// is called below. The index build will be found on server startup.
//
@@ -533,9 +533,9 @@ bool runCreateIndexes(OperationContext* opCtx,
uassertStatusOK(indexer.insertAllDocumentsInCollection(opCtx, collection));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildDumpsInsertsFromBulk)) {
+ if (MONGO_unlikely(hangAfterIndexBuildDumpsInsertsFromBulk.shouldFail())) {
log() << "Hanging after dumping inserts from bulk builder";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildDumpsInsertsFromBulk);
+ hangAfterIndexBuildDumpsInsertsFromBulk.pauseWhileSet();
}
// Perform the first drain while holding an intent lock.
@@ -554,9 +554,9 @@ bool runCreateIndexes(OperationContext* opCtx,
uassertStatusOK(indexer.drainBackgroundWrites(opCtx));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildFirstDrain)) {
+ if (MONGO_unlikely(hangAfterIndexBuildFirstDrain.shouldFail())) {
log() << "Hanging after index build first drain";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildFirstDrain);
+ hangAfterIndexBuildFirstDrain.pauseWhileSet();
}
// Perform the second drain while stopping writes on the collection.
@@ -575,9 +575,9 @@ bool runCreateIndexes(OperationContext* opCtx,
uassertStatusOK(indexer.drainBackgroundWrites(opCtx));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildSecondDrain)) {
+ if (MONGO_unlikely(hangAfterIndexBuildSecondDrain.shouldFail())) {
log() << "Hanging after index build second drain";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildSecondDrain);
+ hangAfterIndexBuildSecondDrain.pauseWhileSet();
}
// Need to get exclusive collection lock back to complete the index build.
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 3aba920862c..963a7aa7f83 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -220,7 +220,7 @@ public:
});
}
- if (MONGO_FAIL_POINT(reIndexCrashAfterDrop)) {
+ if (MONGO_unlikely(reIndexCrashAfterDrop.shouldFail())) {
log() << "exiting because 'reIndexCrashAfterDrop' fail point was set";
quickExit(EXIT_ABRUPT);
}
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 0a7482078b4..8e7cf3b8d98 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -177,12 +177,11 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons
}
if (newVersion != ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
- if (MONGO_FAIL_POINT(hangBeforeAbortingRunningTransactionsOnFCVDowngrade)) {
+ if (MONGO_unlikely(hangBeforeAbortingRunningTransactionsOnFCVDowngrade.shouldFail())) {
log() << "featureCompatibilityVersion - "
"hangBeforeAbortingRunningTransactionsOnFCVDowngrade fail point enabled. "
"Blocking until fail point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(
- hangBeforeAbortingRunningTransactionsOnFCVDowngrade);
+ hangBeforeAbortingRunningTransactionsOnFCVDowngrade.pauseWhileSet();
}
// Abort all open transactions when downgrading the featureCompatibilityVersion.
SessionKiller::Matcher matcherAllSessions(
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index 9c0a0ad661f..5fe6e5b56e9 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -332,13 +332,12 @@ void CmdShutdown::shutdownHelper(const BSONObj& cmdObj) {
ShutdownTaskArgs shutdownArgs;
shutdownArgs.isUserInitiated = true;
- MONGO_FAIL_POINT_BLOCK(crashOnShutdown, crashBlock) {
- const std::string crashHow = crashBlock.getData()["how"].str();
- if (crashHow == "fault") {
+ crashOnShutdown.execute([&](const BSONObj& data) {
+ if (data["how"].str() == "fault") {
++*illegalAddress;
}
::abort();
- }
+ });
log() << "terminating, shutdown command received " << cmdObj;
@@ -352,11 +351,10 @@ void CmdShutdown::shutdownHelper(const BSONObj& cmdObj) {
// The ServiceMain thread will quit for us so just sleep until it does.
while (true)
sleepsecs(60); // Loop forever
- } else
-#endif
- {
- shutdown(EXIT_CLEAN, shutdownArgs); // this never returns
+ return;
}
+#endif
+ shutdown(EXIT_CLEAN, shutdownArgs); // this never returns
}
} // namespace mongo
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index c146024162f..cceb0f15fa8 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -385,11 +385,10 @@ public:
invariant(cursorPin->lockPolicy() ==
ClientCursorParams::LockPolicy::kLockExternally);
- if (MONGO_FAIL_POINT(GetMoreHangBeforeReadLock)) {
+ if (MONGO_unlikely(GetMoreHangBeforeReadLock.shouldFail())) {
log() << "GetMoreHangBeforeReadLock fail point enabled. Blocking until fail "
"point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- GetMoreHangBeforeReadLock);
+ GetMoreHangBeforeReadLock.pauseWhileSet(opCtx);
}
// Lock the backing collection by using the executor's namespace. Note that it may
@@ -445,7 +444,7 @@ public:
validateLSID(opCtx, _request, cursorPin.getCursor());
validateTxnNumber(opCtx, _request, cursorPin.getCursor());
- if (_request.nss.isOplog() && MONGO_FAIL_POINT(rsStopGetMoreCmd)) {
+ if (_request.nss.isOplog() && MONGO_unlikely(rsStopGetMoreCmd.shouldFail())) {
uasserted(ErrorCodes::CommandFailed,
str::stream() << "getMore on " << _request.nss.ns()
<< " rejected due to active fail point rsStopGetMoreCmd");
@@ -475,7 +474,7 @@ public:
readLock.reset();
readLock.emplace(opCtx, _request.nss);
};
- if (MONGO_FAIL_POINT(waitAfterPinningCursorBeforeGetMoreBatch)) {
+ if (MONGO_unlikely(waitAfterPinningCursorBeforeGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitAfterPinningCursorBeforeGetMoreBatch,
opCtx,
@@ -486,7 +485,7 @@ public:
}
const bool disableAwaitDataFailpointActive =
- MONGO_FAIL_POINT(disableAwaitDataForGetMoreCmd);
+ MONGO_unlikely(disableAwaitDataForGetMoreCmd.shouldFail());
// Inherit properties like readConcern and maxTimeMS from our originating cursor.
setUpOperationContextStateForGetMore(
@@ -553,8 +552,8 @@ public:
dropAndReacquireReadLock();
exec->restoreState();
};
- MONGO_FAIL_POINT_BLOCK(waitWithPinnedCursorDuringGetMoreBatch, options) {
- const BSONObj& data = options.getData();
+
+ waitWithPinnedCursorDuringGetMoreBatch.execute([&](const BSONObj& data) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitWithPinnedCursorDuringGetMoreBatch,
opCtx,
@@ -564,7 +563,7 @@ public:
: saveAndRestoreStateWithReadLockReacquisition,
false,
_request.nss);
- }
+ });
uassertStatusOK(generateBatch(
opCtx, cursorPin.getCursor(), _request, &nextBatch, &state, &numResults));
@@ -647,7 +646,7 @@ public:
// If the 'waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch' failpoint is active, we
// set the 'msg' field of this operation's CurOp to signal that we've hit this point and
// then spin until the failpoint is released.
- if (MONGO_FAIL_POINT(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch)) {
+ if (MONGO_unlikely(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch,
opCtx,
@@ -666,7 +665,7 @@ public:
return AllowedOnSecondary::kAlways;
}
- ReadWriteType getReadWriteType() const {
+ ReadWriteType getReadWriteType() const override {
return ReadWriteType::kRead;
}
diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp
index 8a0fe30e9ec..4575eef8a62 100644
--- a/src/mongo/db/commands/txn_cmds.cpp
+++ b/src/mongo/db/commands/txn_cmds.cpp
@@ -104,7 +104,8 @@ public:
// commit oplog entry.
auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient());
replClient.setLastOpToSystemLastOpTime(opCtx);
- if (MONGO_FAIL_POINT(participantReturnNetworkErrorForCommitAfterExecutingCommitLogic)) {
+ if (MONGO_unlikely(
+ participantReturnNetworkErrorForCommitAfterExecutingCommitLogic.shouldFail())) {
uasserted(ErrorCodes::HostUnreachable,
"returning network error because failpoint is on");
}
@@ -134,7 +135,8 @@ public:
txnParticipant.commitUnpreparedTransaction(opCtx);
}
- if (MONGO_FAIL_POINT(participantReturnNetworkErrorForCommitAfterExecutingCommitLogic)) {
+ if (MONGO_unlikely(
+ participantReturnNetworkErrorForCommitAfterExecutingCommitLogic.shouldFail())) {
uasserted(ErrorCodes::HostUnreachable,
"returning network error because failpoint is on");
}
@@ -189,7 +191,7 @@ public:
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangBeforeAbortingTxn, opCtx, "hangBeforeAbortingTxn");
- if (!MONGO_FAIL_POINT(dontRemoveTxnCoordinatorOnAbort) &&
+ if (!MONGO_unlikely(dontRemoveTxnCoordinatorOnAbort.shouldFail()) &&
(ShardingState::get(opCtx)->canAcceptShardedCommands().isOK() ||
serverGlobalParams.clusterRole == ClusterRole::ConfigServer)) {
TransactionCoordinatorService::get(opCtx)->cancelIfCommitNotYetStarted(
@@ -198,7 +200,8 @@ public:
txnParticipant.abortTransaction(opCtx);
- if (MONGO_FAIL_POINT(participantReturnNetworkErrorForAbortAfterExecutingAbortLogic)) {
+ if (MONGO_unlikely(
+ participantReturnNetworkErrorForAbortAfterExecutingAbortLogic.shouldFail())) {
uasserted(ErrorCodes::HostUnreachable,
"returning network error because failpoint is on");
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 43c5fcae3b0..63ed7581a3c 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -109,7 +109,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- if (MONGO_FAIL_POINT(validateCmdCollectionNotValid)) {
+ if (MONGO_unlikely(validateCmdCollectionNotValid.shouldFail())) {
result.appendBool("valid", false);
return true;
}
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 3a162b425b0..fd840437c89 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -895,7 +895,8 @@ void LockerImpl::_lockComplete(OperationContext* opCtx,
// This failpoint is used to time out non-intent locks if they cannot be granted immediately.
// Testing-only.
- if (!_uninterruptibleLocksRequested && MONGO_FAIL_POINT(failNonIntentLocksIfWaitNeeded)) {
+ if (!_uninterruptibleLocksRequested &&
+ MONGO_unlikely(failNonIntentLocksIfWaitNeeded.shouldFail())) {
uassert(ErrorCodes::LockTimeout,
str::stream() << "Cannot immediately acquire lock '" << resId.toString()
<< "'. Timing out due to failpoint.",
@@ -991,7 +992,7 @@ void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode
LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx, LockMode mode) {
bool testOnly = false;
- if (MONGO_FAIL_POINT(enableTestOnlyFlagforRSTL)) {
+ if (MONGO_unlikely(enableTestOnlyFlagforRSTL.shouldFail())) {
testOnly = true;
}
diff --git a/src/mongo/db/concurrency/write_conflict_exception.h b/src/mongo/db/concurrency/write_conflict_exception.h
index 71f38b34670..84089ae86f6 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.h
+++ b/src/mongo/db/concurrency/write_conflict_exception.h
@@ -38,7 +38,7 @@
namespace mongo {
-MONGO_FAIL_POINT_DECLARE(skipWriteConflictRetries);
+extern FailPoint skipWriteConflictRetries;
/**
* This is thrown if during a write, two or more operations conflict with each other.
@@ -83,7 +83,8 @@ auto writeConflictRetry(OperationContext* opCtx, StringData opStr, StringData ns
invariant(opCtx->lockState());
invariant(opCtx->recoveryUnit());
- if (opCtx->lockState()->inAWriteUnitOfWork() || MONGO_FAIL_POINT(skipWriteConflictRetries)) {
+ if (opCtx->lockState()->inAWriteUnitOfWork() ||
+ MONGO_unlikely(skipWriteConflictRetries.shouldFail())) {
return f();
}
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index da117a244b4..de804da0983 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -247,7 +247,7 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx,
bool backtraceMode,
BSONObjBuilder* infoBuilder) {
invariant(client);
- if (MONGO_FAIL_POINT(keepDiagnosticCaptureOnFailedLock)) {
+ if (MONGO_unlikely(keepDiagnosticCaptureOnFailedLock.shouldFail())) {
gHangLock.lock.lock();
try {
stdx::lock_guard testLock(gHangLock.mutex);
@@ -336,7 +336,7 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx,
}
}
- if (MONGO_FAIL_POINT(keepDiagnosticCaptureOnFailedLock)) {
+ if (MONGO_unlikely(keepDiagnosticCaptureOnFailedLock.shouldFail())) {
gHangLock.lock.unlock();
}
}
diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp
index 6afbfb05be5..5f2a4190de7 100644
--- a/src/mongo/db/curop_failpoint_helpers.cpp
+++ b/src/mongo/db/curop_failpoint_helpers.cpp
@@ -47,42 +47,44 @@ std::string CurOpFailpointHelpers::updateCurOpMsg(OperationContext* opCtx,
void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint,
OperationContext* opCtx,
const std::string& curOpMsg,
- const std::function<void(void)>& whileWaiting,
+ const std::function<void()>& whileWaiting,
bool checkForInterrupt,
boost::optional<NamespaceString> nss) {
-
invariant(failPoint);
- MONGO_FAIL_POINT_BLOCK((*failPoint), options) {
- const BSONObj& data = options.getData();
- StringData fpNss = data.getStringField("nss");
- if (nss && !fpNss.empty() && fpNss != nss.get().toString()) {
- return;
- }
+ failPoint->executeIf(
+ [&](const BSONObj& data) {
+ auto origCurOpMsg = updateCurOpMsg(opCtx, curOpMsg);
- auto origCurOpMsg = updateCurOpMsg(opCtx, curOpMsg);
+ const bool shouldCheckForInterrupt =
+ checkForInterrupt || data["shouldCheckForInterrupt"].booleanSafe();
+ const bool shouldContinueOnInterrupt = data["shouldContinueOnInterrupt"].booleanSafe();
+ while (MONGO_unlikely(failPoint->shouldFail())) {
+ sleepFor(Milliseconds(10));
+ if (whileWaiting) {
+ whileWaiting();
+ }
- const bool shouldCheckForInterrupt =
- checkForInterrupt || data["shouldCheckForInterrupt"].booleanSafe();
- const bool shouldContinueOnInterrupt = data["shouldContinueOnInterrupt"].booleanSafe();
- while (MONGO_FAIL_POINT((*failPoint))) {
- sleepFor(Milliseconds(10));
- if (whileWaiting) {
- whileWaiting();
+ // Check for interrupt so that an operation can be killed while waiting for the
+ // failpoint to be disabled, if the failpoint is configured to be interruptible.
+ //
+ // For shouldContinueOnInterrupt, an interrupt merely allows the code to continue
+ // past the failpoint; it is up to the code under test to actually check for
+ // interrupt.
+ if (shouldContinueOnInterrupt) {
+ if (!opCtx->checkForInterruptNoAssert().isOK())
+ break;
+ } else if (shouldCheckForInterrupt) {
+ opCtx->checkForInterrupt();
+ }
}
-
- // Check for interrupt so that an operation can be killed while waiting for the
- // failpoint to be disabled, if the failpoint is configured to be interruptible.
- //
- // For shouldContinueOnInterrupt, an interrupt merely allows the code to continue past
- // the failpoint; it is up to the code under test to actually check for interrupt.
- if (shouldContinueOnInterrupt) {
- if (!opCtx->checkForInterruptNoAssert().isOK())
- break;
- } else if (shouldCheckForInterrupt) {
- opCtx->checkForInterrupt();
+ updateCurOpMsg(opCtx, origCurOpMsg);
+ },
+ [&](const BSONObj& data) {
+ StringData fpNss = data.getStringField("nss");
+ if (nss && !fpNss.empty() && fpNss != nss.get().toString()) {
+ return false;
}
- }
- updateCurOpMsg(opCtx, origCurOpMsg);
- }
+ return true;
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/curop_failpoint_helpers.h b/src/mongo/db/curop_failpoint_helpers.h
index a1143805951..8e2bd1bc08f 100644
--- a/src/mongo/db/curop_failpoint_helpers.h
+++ b/src/mongo/db/curop_failpoint_helpers.h
@@ -60,7 +60,7 @@ public:
static void waitWhileFailPointEnabled(FailPoint* failPoint,
OperationContext* opCtx,
const std::string& curOpMsg,
- const std::function<void(void)>& whileWaiting = nullptr,
+ const std::function<void()>& whileWaiting = nullptr,
bool checkForInterrupt = false,
boost::optional<NamespaceString> nss = boost::none);
};
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 1f76b6e6d03..f0778289bd8 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -669,7 +669,7 @@ ExitCode _initAndListen(int listenPort) {
}
#endif
- if (MONGO_FAIL_POINT(shutdownAtStartup)) {
+ if (MONGO_unlikely(shutdownAtStartup.shouldFail())) {
log() << "starting clean exit via failpoint";
exitCleanly(EXIT_CLEAN);
}
diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp
index 4da1d88b854..a9bddf56266 100644
--- a/src/mongo/db/exec/update_stage.cpp
+++ b/src/mongo/db/exec/update_stage.cpp
@@ -607,7 +607,7 @@ void UpdateStage::doInsert() {
}
}
- if (MONGO_FAIL_POINT(hangBeforeUpsertPerformsInsert)) {
+ if (MONGO_unlikely(hangBeforeUpsertPerformsInsert.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangBeforeUpsertPerformsInsert, getOpCtx(), "hangBeforeUpsertPerformsInsert");
}
@@ -972,10 +972,9 @@ bool UpdateStage::checkUpdateChangesShardKeyFields(ScopedCollectionMetadata meta
getOpCtx()->getTxnNumber() || !getOpCtx()->writesAreReplicated());
if (!metadata->keyBelongsToMe(newShardKey)) {
- if (MONGO_FAIL_POINT(hangBeforeThrowWouldChangeOwningShard)) {
+ if (MONGO_unlikely(hangBeforeThrowWouldChangeOwningShard.shouldFail())) {
log() << "Hit hangBeforeThrowWouldChangeOwningShard failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(getOpCtx(),
- hangBeforeThrowWouldChangeOwningShard);
+ hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(getOpCtx());
}
uasserted(WouldChangeOwningShardInfo(oldObj.value(), newObj, false /* upsert */),
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index a6a53894d1b..c55a569442f 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -331,13 +331,14 @@ void IndexBuildInterceptor::_tryYield(OperationContext* opCtx) {
// Track the number of yields in CurOp.
CurOp::get(opCtx)->yielded();
- MONGO_FAIL_POINT_BLOCK(hangDuringIndexBuildDrainYield, config) {
- StringData ns{config.getData().getStringField("namespace")};
- if (ns == _indexCatalogEntry->ns().ns()) {
+ hangDuringIndexBuildDrainYield.executeIf(
+ [&](auto&&) {
log() << "Hanging index build during drain yield";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDuringIndexBuildDrainYield);
- }
- }
+ hangDuringIndexBuildDrainYield.pauseWhileSet();
+ },
+ [&](auto&& config) {
+ return config.getStringField("namespace") == _indexCatalogEntry->ns().ns();
+ });
locker->restoreLockState(opCtx, snapshot);
}
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index f6e0edfdddb..2bbafd3cac5 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -894,9 +894,9 @@ void IndexBuildsCoordinator::_buildIndex(
_indexBuildsManager.startBuildingIndex(opCtx, collection, replState->buildUUID));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildDumpsInsertsFromBulk)) {
+ if (MONGO_unlikely(hangAfterIndexBuildDumpsInsertsFromBulk.shouldFail())) {
log() << "Hanging after dumping inserts from bulk builder";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildDumpsInsertsFromBulk);
+ hangAfterIndexBuildDumpsInsertsFromBulk.pauseWhileSet();
}
// Perform the first drain while holding an intent lock.
@@ -908,9 +908,9 @@ void IndexBuildsCoordinator::_buildIndex(
opCtx, replState->buildUUID, RecoveryUnit::ReadSource::kUnset));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildFirstDrain)) {
+ if (MONGO_unlikely(hangAfterIndexBuildFirstDrain.shouldFail())) {
log() << "Hanging after index build first drain";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildFirstDrain);
+ hangAfterIndexBuildFirstDrain.pauseWhileSet();
}
// Perform the second drain while stopping writes on the collection.
@@ -922,9 +922,9 @@ void IndexBuildsCoordinator::_buildIndex(
opCtx, replState->buildUUID, RecoveryUnit::ReadSource::kUnset));
}
- if (MONGO_FAIL_POINT(hangAfterIndexBuildSecondDrain)) {
+ if (MONGO_unlikely(hangAfterIndexBuildSecondDrain.shouldFail())) {
log() << "Hanging after index build second drain";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterIndexBuildSecondDrain);
+ hangAfterIndexBuildSecondDrain.pauseWhileSet();
}
// Need to return the collection lock back to exclusive mode, to complete the index build.
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index e75292f004f..ff2f75fd288 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -507,8 +507,8 @@ private:
// These fail points are used to control index build progress. Declared here to be shared
// temporarily between createIndexes command and IndexBuildsCoordinator.
-MONGO_FAIL_POINT_DECLARE(hangAfterIndexBuildFirstDrain);
-MONGO_FAIL_POINT_DECLARE(hangAfterIndexBuildSecondDrain);
-MONGO_FAIL_POINT_DECLARE(hangAfterIndexBuildDumpsInsertsFromBulk);
+extern FailPoint hangAfterIndexBuildFirstDrain;
+extern FailPoint hangAfterIndexBuildSecondDrain;
+extern FailPoint hangAfterIndexBuildDumpsInsertsFromBulk;
} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 695edb86852..31a9859b38c 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -184,7 +184,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
return;
}
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterInitializingIndexBuild);
+ hangAfterInitializingIndexBuild.pauseWhileSet();
auto opCtx = Client::getCurrent()->makeOperationContext();
diff --git a/src/mongo/db/key_generator.cpp b/src/mongo/db/key_generator.cpp
index ab4eb8ef11e..def0b6c4ac5 100644
--- a/src/mongo/db/key_generator.cpp
+++ b/src/mongo/db/key_generator.cpp
@@ -76,7 +76,7 @@ KeyGenerator::KeyGenerator(std::string purpose,
Status KeyGenerator::generateNewKeysIfNeeded(OperationContext* opCtx) {
- if (MONGO_FAIL_POINT(disableKeyGeneration)) {
+ if (MONGO_unlikely(disableKeyGeneration.shouldFail())) {
return {ErrorCodes::FailPointEnabled, "key generation disabled"};
}
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index a771a9150ad..bbad4f450c6 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -265,13 +265,9 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
}
- MONGO_FAIL_POINT_BLOCK(maxKeyRefreshWaitTimeOverrideMS, data) {
- const BSONObj& dataObj = data.getData();
- auto overrideMS = Milliseconds(dataObj["overrideMS"].numberInt());
- if (nextWakeup > overrideMS) {
- nextWakeup = overrideMS;
- }
- }
+ maxKeyRefreshWaitTimeOverrideMS.execute([&](const BSONObj& data) {
+ nextWakeup = std::min(nextWakeup, Milliseconds(data["overrideMS"].numberInt()));
+ });
stdx::unique_lock<stdx::mutex> lock(_mutex);
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index f5bae2f7c55..b476db91330 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -46,7 +46,7 @@ namespace mongo {
* Enabling the disableMatchExpressionOptimization fail point will stop match expressions from
* being optimized.
*/
-MONGO_FAIL_POINT_DECLARE(disableMatchExpressionOptimization);
+extern FailPoint disableMatchExpressionOptimization;
class CollatorInterface;
class MatchExpression;
@@ -143,7 +143,7 @@ public:
static std::unique_ptr<MatchExpression> optimize(std::unique_ptr<MatchExpression> expression) {
// If the disableMatchExpressionOptimization failpoint is enabled, optimizations are skipped
// and the expression is left unmodified.
- if (MONGO_FAIL_POINT(disableMatchExpressionOptimization)) {
+ if (MONGO_unlikely(disableMatchExpressionOptimization.shouldFail())) {
return expression;
}
diff --git a/src/mongo/db/matcher/expression_expr.cpp b/src/mongo/db/matcher/expression_expr.cpp
index f356c4aa25b..62a345ab515 100644
--- a/src/mongo/db/matcher/expression_expr.cpp
+++ b/src/mongo/db/matcher/expression_expr.cpp
@@ -62,7 +62,7 @@ bool ExprMatchExpression::matches(const MatchableDocument* doc, MatchDetails* de
auto value = _expression->evaluate(document, &variables);
return value.coerceToBool();
} catch (const DBException&) {
- if (MONGO_FAIL_POINT(ExprMatchExpressionMatchesReturnsFalseOnException)) {
+ if (MONGO_unlikely(ExprMatchExpressionMatchesReturnsFalseOnException.shouldFail())) {
return false;
}
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 4ba8d17eea1..2ccfa6fa5dc 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -413,16 +413,18 @@ void OpObserverImpl::onInserts(OperationContext* opCtx,
}
void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
- MONGO_FAIL_POINT_BLOCK(failCollectionUpdates, extraData) {
- auto collElem = extraData.getData()["collectionNS"];
- // If the failpoint specifies no collection or matches the existing one, fail.
- if (!collElem || args.nss.ns() == collElem.String()) {
+ failCollectionUpdates.executeIf(
+ [&](const BSONObj&) {
uasserted(40654,
str::stream() << "failCollectionUpdates failpoint enabled, namespace: "
<< args.nss.ns() << ", update: " << args.updateArgs.update
<< " on document with " << args.updateArgs.criteria);
- }
- }
+ },
+ [&](const BSONObj& data) {
+ // If the failpoint specifies no collection or matches the existing one, fail.
+ auto collElem = data["collectionNS"];
+ return !collElem || args.nss.ns() == collElem.String();
+ });
// Do not log a no-op operation; see SERVER-21738
if (args.updateArgs.update.isEmpty()) {
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index c3fef5535ae..6c21a7b58a1 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -139,10 +139,10 @@ bool OperationContext::hasDeadlineExpired() const {
if (!hasDeadline()) {
return false;
}
- if (MONGO_FAIL_POINT(maxTimeNeverTimeOut)) {
+ if (MONGO_unlikely(maxTimeNeverTimeOut.shouldFail())) {
return false;
}
- if (MONGO_FAIL_POINT(maxTimeAlwaysTimeOut)) {
+ if (MONGO_unlikely(maxTimeAlwaysTimeOut.shouldFail())) {
return true;
}
@@ -216,12 +216,12 @@ Status OperationContext::checkForInterruptNoAssert() noexcept {
return Status::OK();
}
- MONGO_FAIL_POINT_BLOCK(checkForInterruptFail, scopedFailPoint) {
- if (opShouldFail(getClient(), scopedFailPoint.getData())) {
+ checkForInterruptFail.executeIf(
+ [&](auto&&) {
log() << "set pending kill on op " << getOpID() << ", for checkForInterruptFail";
markKilled();
- }
- }
+ },
+ [&](auto&& data) { return opShouldFail(getClient(), data); });
const auto killStatus = getKillStatus();
if (killStatus != ErrorCodes::OK) {
@@ -281,7 +281,7 @@ StatusWith<stdx::cv_status> OperationContext::waitForConditionOrInterruptNoAsser
// maxTimeNeverTimeOut is set) then we assume that the incongruity is due to a clock mismatch
// and return _timeoutError regardless. To prevent this behaviour, only consider the op's
// deadline in the event that the maxTimeNeverTimeOut failpoint is not set.
- bool opHasDeadline = (hasDeadline() && !MONGO_FAIL_POINT(maxTimeNeverTimeOut));
+ bool opHasDeadline = (hasDeadline() && !MONGO_unlikely(maxTimeNeverTimeOut.shouldFail()));
if (opHasDeadline) {
deadline = std::min(deadline, getDeadline());
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 3b87ce8038d..8e46b008110 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -368,7 +368,7 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
true, // Check for interrupt periodically.
wholeOp.getNamespace());
- if (MONGO_FAIL_POINT(failAllInserts)) {
+ if (MONGO_unlikely(failAllInserts.shouldFail())) {
uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!");
}
@@ -601,7 +601,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
false /*checkForInterrupt*/,
ns);
- if (MONGO_FAIL_POINT(failAllUpdates)) {
+ if (MONGO_unlikely(failAllUpdates.shouldFail())) {
uasserted(ErrorCodes::InternalError, "failAllUpdates failpoint active!");
}
@@ -848,7 +848,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
},
true // Check for interrupt periodically.
);
- if (MONGO_FAIL_POINT(failAllRemoves)) {
+ if (MONGO_unlikely(failAllRemoves.shouldFail())) {
uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!");
}
@@ -936,12 +936,12 @@ WriteResult performDeletes(OperationContext* opCtx, const write_ops::Delete& who
curOp.setCommand_inlock(cmd);
}
ON_BLOCK_EXIT([&] {
- if (MONGO_FAIL_POINT(hangBeforeChildRemoveOpFinishes)) {
+ if (MONGO_unlikely(hangBeforeChildRemoveOpFinishes.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangBeforeChildRemoveOpFinishes, opCtx, "hangBeforeChildRemoveOpFinishes");
}
finishCurOp(opCtx, &curOp);
- if (MONGO_FAIL_POINT(hangBeforeChildRemoveOpIsPopped)) {
+ if (MONGO_unlikely(hangBeforeChildRemoveOpIsPopped.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangBeforeChildRemoveOpIsPopped, opCtx, "hangBeforeChildRemoveOpIsPopped");
}
@@ -959,7 +959,7 @@ WriteResult performDeletes(OperationContext* opCtx, const write_ops::Delete& who
}
}
- if (MONGO_FAIL_POINT(hangAfterAllChildRemoveOpsArePopped)) {
+ if (MONGO_unlikely(hangAfterAllChildRemoveOpsArePopped.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangAfterAllChildRemoveOpsArePopped, opCtx, "hangAfterAllChildRemoveOpsArePopped");
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 277db77429c..669cf02f670 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -86,7 +86,7 @@ void DocumentSourceCursor::loadBatch() {
return;
}
- while (MONGO_FAIL_POINT(hangBeforeDocumentSourceCursorLoadBatch)) {
+ while (MONGO_unlikely(hangBeforeDocumentSourceCursorLoadBatch.shouldFail())) {
log() << "Hanging aggregation due to 'hangBeforeDocumentSourceCursorLoadBatch' failpoint";
sleepmillis(10);
}
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index 2220a73b496..76ca6839e96 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -315,7 +315,7 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx,
// The return value is an index of a full consumer buffer.
size_t fullConsumerId = loadNextBatch();
- if (MONGO_FAIL_POINT(exchangeFailLoadNextBatch)) {
+ if (MONGO_unlikely(exchangeFailLoadNextBatch.shouldFail())) {
log() << "exchangeFailLoadNextBatch fail point enabled.";
uasserted(ErrorCodes::FailPointEnabled,
"Asserting on loading the next batch due to failpoint.");
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 355bc8a429a..c5fdbc032fb 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -266,7 +266,7 @@ void Pipeline::validateCommon() const {
void Pipeline::optimizePipeline() {
// If the disablePipelineOptimization failpoint is enabled, the pipeline won't be optimized.
- if (MONGO_FAIL_POINT(disablePipelineOptimization)) {
+ if (MONGO_unlikely(disablePipelineOptimization.shouldFail())) {
return;
}
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index c5cec04fb34..3ef81811d71 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -425,10 +425,10 @@ std::vector<RemoteCursor> establishShardCursors(
appendDbVersionIfPresent(cmdObjWithShardVersion, routingInfo->db()));
}
- if (MONGO_FAIL_POINT(clusterAggregateHangBeforeEstablishingShardCursors)) {
+ if (MONGO_unlikely(clusterAggregateHangBeforeEstablishingShardCursors.shouldFail())) {
log() << "clusterAggregateHangBeforeEstablishingShardCursors fail point enabled. Blocking "
"until fail point is disabled.";
- while (MONGO_FAIL_POINT(clusterAggregateHangBeforeEstablishingShardCursors)) {
+ while (MONGO_unlikely(clusterAggregateHangBeforeEstablishingShardCursors.shouldFail())) {
sleepsecs(1);
}
}
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 65816afeef4..2f351c930d3 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -248,7 +248,7 @@ Message getMore(OperationContext* opCtx,
curOp.ensureStarted();
// For testing, we may want to fail if we receive a getmore.
- if (MONGO_FAIL_POINT(failReceivedGetmore)) {
+ if (MONGO_unlikely(failReceivedGetmore.shouldFail())) {
MONGO_UNREACHABLE;
}
@@ -369,8 +369,7 @@ Message getMore(OperationContext* opCtx,
// repeatedly release and re-acquire the collection readLock at regular intervals until
// the failpoint is released. This is done in order to avoid deadlocks caused by the
// pinned-cursor failpoints in this file (see SERVER-21997).
- MONGO_FAIL_POINT_BLOCK(waitAfterPinningCursorBeforeGetMoreBatch, options) {
- const BSONObj& data = options.getData();
+ waitAfterPinningCursorBeforeGetMoreBatch.execute([&](const BSONObj& data) {
if (data["shouldNotdropLock"].booleanSafe()) {
dropAndReaquireReadLock = []() {};
}
@@ -381,7 +380,7 @@ Message getMore(OperationContext* opCtx,
dropAndReaquireReadLock,
false,
nss);
- }
+ });
const auto replicationMode = repl::ReplicationCoordinator::get(opCtx)->getReplicationMode();
@@ -459,7 +458,7 @@ Message getMore(OperationContext* opCtx,
// accumulate over the course of a cursor's lifetime.
PlanSummaryStats preExecutionStats;
Explain::getSummaryStats(*exec, &preExecutionStats);
- if (MONGO_FAIL_POINT(waitWithPinnedCursorDuringGetMoreBatch)) {
+ if (MONGO_unlikely(waitWithPinnedCursorDuringGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(&waitWithPinnedCursorDuringGetMoreBatch,
opCtx,
"waitWithPinnedCursorDuringGetMoreBatch",
@@ -548,7 +547,7 @@ Message getMore(OperationContext* opCtx,
// If the 'waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch' failpoint is active, we
// set the 'msg' field of this operation's CurOp to signal that we've hit this point and
// then spin until the failpoint is released.
- if (MONGO_FAIL_POINT(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch)) {
+ if (MONGO_unlikely(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch,
opCtx,
diff --git a/src/mongo/db/query/find_common.h b/src/mongo/db/query/find_common.h
index 14515224326..90216d9138c 100644
--- a/src/mongo/db/query/find_common.h
+++ b/src/mongo/db/query/find_common.h
@@ -57,23 +57,23 @@ class CanonicalQuery;
class QueryRequest;
// Failpoint for making find hang.
-MONGO_FAIL_POINT_DECLARE(waitInFindBeforeMakingBatch);
+extern FailPoint waitInFindBeforeMakingBatch;
// Failpoint for making getMore not wait for an awaitdata cursor. Allows us to avoid waiting during
// tests.
-MONGO_FAIL_POINT_DECLARE(disableAwaitDataForGetMoreCmd);
+extern FailPoint disableAwaitDataForGetMoreCmd;
// Enabling this fail point will cause getMores to busy wait after pinning the cursor
// but before we have started building the batch, until the fail point is disabled.
-MONGO_FAIL_POINT_DECLARE(waitAfterPinningCursorBeforeGetMoreBatch);
+extern FailPoint waitAfterPinningCursorBeforeGetMoreBatch;
// Enabling this fail point will cause getMores to busy wait after setting up the plan executor and
// before beginning the batch.
-MONGO_FAIL_POINT_DECLARE(waitWithPinnedCursorDuringGetMoreBatch);
+extern FailPoint waitWithPinnedCursorDuringGetMoreBatch;
// Enabling this failpoint will cause getMores to wait just before it unpins its cursor after it
// has completed building the current batch.
-MONGO_FAIL_POINT_DECLARE(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch);
+extern FailPoint waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch;
/**
* Suite of find/getMore related functions used in both the mongod and mongos query paths.
diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp
index b3f73cf26bc..7a07b1f2a03 100644
--- a/src/mongo/db/query/plan_executor_impl.cpp
+++ b/src/mongo/db/query/plan_executor_impl.cpp
@@ -472,7 +472,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_waitForInserts(CappedInsertNotifierDa
PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* objOut,
RecordId* dlOut) {
- if (MONGO_FAIL_POINT(planExecutorAlwaysFails)) {
+ if (MONGO_unlikely(planExecutorAlwaysFails.shouldFail())) {
Status status(ErrorCodes::InternalError,
str::stream() << "PlanExecutor hit planExecutorAlwaysFails fail point");
*objOut =
@@ -573,7 +573,8 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
// This result didn't have the data the caller wanted, try again.
} else if (PlanStage::NEED_YIELD == code) {
invariant(id == WorkingSet::INVALID_ID);
- if (!_yieldPolicy->canAutoYield() || MONGO_FAIL_POINT(skipWriteConflictRetries)) {
+ if (!_yieldPolicy->canAutoYield() ||
+ MONGO_unlikely(skipWriteConflictRetries.shouldFail())) {
throw WriteConflictException();
}
@@ -589,10 +590,10 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj
} else if (PlanStage::NEED_TIME == code) {
// Fall through to yield check at end of large conditional.
} else if (PlanStage::IS_EOF == code) {
- if (MONGO_FAIL_POINT(planExecutorHangBeforeShouldWaitForInserts)) {
+ if (MONGO_unlikely(planExecutorHangBeforeShouldWaitForInserts.shouldFail())) {
log() << "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point "
"enabled. Blocking until fail point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(planExecutorHangBeforeShouldWaitForInserts);
+ planExecutorHangBeforeShouldWaitForInserts.pauseWhileSet();
}
if (!_shouldWaitForInserts()) {
return PlanExecutor::IS_EOF;
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 43590f27d7d..fbdb0d117ca 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -90,7 +90,7 @@ Status PlanYieldPolicy::yieldOrInterrupt(std::function<void()> whileYieldingFn)
invariant(opCtx);
// If the 'setInterruptOnlyPlansCheckForInterruptHang' fail point is enabled, set the 'msg'
// field of this operation's CurOp to signal that we've hit this point.
- if (MONGO_FAIL_POINT(setInterruptOnlyPlansCheckForInterruptHang)) {
+ if (MONGO_unlikely(setInterruptOnlyPlansCheckForInterruptHang.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&setInterruptOnlyPlansCheckForInterruptHang,
opCtx,
@@ -183,20 +183,17 @@ void PlanYieldPolicy::_yieldAllLocks(OperationContext* opCtx,
// Track the number of yields in CurOp.
CurOp::get(opCtx)->yielded();
- MONGO_FAIL_POINT_BLOCK(setYieldAllLocksHang, config) {
- StringData ns{config.getData().getStringField("namespace")};
- if (ns.empty() || ns == planExecNS.ns()) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(setYieldAllLocksHang);
- }
- }
-
- MONGO_FAIL_POINT_BLOCK(setYieldAllLocksWait, customWait) {
- const BSONObj& data = customWait.getData();
- BSONElement customWaitNS = data["namespace"];
- if (!customWaitNS || planExecNS.ns() == customWaitNS.str()) {
- sleepFor(Milliseconds(data["waitForMillis"].numberInt()));
- }
- }
+ setYieldAllLocksHang.executeIf([](auto&&) { setYieldAllLocksHang.pauseWhileSet(); },
+ [&](const BSONObj& config) {
+ StringData ns = config.getStringField("namespace");
+ return ns.empty() || ns == planExecNS.ns();
+ });
+ setYieldAllLocksWait.executeIf(
+ [&](const BSONObj& data) { sleepFor(Milliseconds(data["waitForMillis"].numberInt())); },
+ [&](const BSONObj& config) {
+ BSONElement dataNs = config["namespace"];
+ return !dataNs || planExecNS.ns() == dataNs.str();
+ });
if (whileYieldingFn) {
whileYieldingFn();
diff --git a/src/mongo/db/repair_database_and_check_version.cpp b/src/mongo/db/repair_database_and_check_version.cpp
index 1a0711faddf..b08d67696c0 100644
--- a/src/mongo/db/repair_database_and_check_version.cpp
+++ b/src/mongo/db/repair_database_and_check_version.cpp
@@ -363,7 +363,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
if (storageGlobalParams.repair) {
invariant(!storageGlobalParams.readOnly);
- if (MONGO_FAIL_POINT(exitBeforeDataRepair)) {
+ if (MONGO_unlikely(exitBeforeDataRepair.shouldFail())) {
log() << "Exiting because 'exitBeforeDataRepair' fail point was set.";
quickExit(EXIT_ABRUPT);
}
@@ -420,7 +420,7 @@ bool repairDatabasesAndCheckVersion(OperationContext* opCtx) {
}
if (storageGlobalParams.repair) {
- if (MONGO_FAIL_POINT(exitBeforeRepairInvalidatesConfig)) {
+ if (MONGO_unlikely(exitBeforeRepairInvalidatesConfig.shouldFail())) {
log() << "Exiting because 'exitBeforeRepairInvalidatesConfig' fail point was set.";
quickExit(EXIT_ABRUPT);
}
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 1e947868a65..6426f8e1979 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -271,8 +271,8 @@ Status _applyOps(OperationContext* opCtx,
(*numApplied)++;
- if (MONGO_FAIL_POINT(applyOpsPauseBetweenOperations)) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(applyOpsPauseBetweenOperations);
+ if (MONGO_unlikely(applyOpsPauseBetweenOperations.shouldFail())) {
+ applyOpsPauseBetweenOperations.pauseWhileSet();
}
}
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 990e7167aeb..870f9d54e45 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -219,7 +219,7 @@ void BackgroundSync::_runProducer() {
}
void BackgroundSync::_produce() {
- if (MONGO_FAIL_POINT(stopReplProducer)) {
+ if (MONGO_unlikely(stopReplProducer.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "bgsync - stopReplProducer fail point "
"enabled. Blocking until fail point is disabled.";
@@ -227,7 +227,7 @@ void BackgroundSync::_produce() {
// TODO(SERVER-27120): Remove the return statement and uncomment the while loop.
// Currently we cannot block here or we prevent primaries from being fully elected since
// we'll never call _signalNoNewDataForApplier.
- // while (MONGO_FAIL_POINT(stopReplProducer) && !inShutdown()) {
+ // while (MONGO_unlikely(stopReplProducer.shouldFail()) && !inShutdown()) {
// mongo::sleepsecs(1);
// }
mongo::sleepsecs(1);
@@ -577,11 +577,11 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
}
}
- if (MONGO_FAIL_POINT(rollbackHangBeforeStart)) {
+ if (MONGO_unlikely(rollbackHangBeforeStart.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "rollback - rollbackHangBeforeStart fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(rollbackHangBeforeStart) && !inShutdown()) {
+ while (MONGO_unlikely(rollbackHangBeforeStart.shouldFail()) && !inShutdown()) {
mongo::sleepsecs(1);
}
}
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index b081be1fce0..c5270c5370d 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -429,20 +429,22 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
_finishCallback(cbd.status);
return;
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangCollectionClonerBeforeEstablishingCursor, nssData) {
- const BSONObj& data = nssData.getData();
- auto nss = data["nss"].str();
- // Only hang when cloning the specified collection, or if no collection was specified.
- if (nss.empty() || _destNss.toString() == nss) {
- while (MONGO_FAIL_POINT(initialSyncHangCollectionClonerBeforeEstablishingCursor) &&
+ initialSyncHangCollectionClonerBeforeEstablishingCursor.executeIf(
+ [&](const BSONObj& data) {
+ while (MONGO_unlikely(
+ initialSyncHangCollectionClonerBeforeEstablishingCursor.shouldFail()) &&
!_isShuttingDown()) {
log() << "initialSyncHangCollectionClonerBeforeEstablishingCursor fail point "
"enabled for "
<< _destNss.toString() << ". Blocking until fail point is disabled.";
mongo::sleepsecs(1);
}
- }
- }
+ },
+ [&](const BSONObj& data) {
+ auto nss = data["nss"].str();
+ // Only hang when cloning the specified collection, or if no collection was specified.
+ return nss.empty() || nss == _destNss.toString();
+ });
if (!_idIndexSpec.isEmpty() && _options.autoIndexId == CollectionOptions::NO) {
warning()
<< "Found the _id_ index spec but the collection specified autoIndexId of false on ns:"
@@ -510,16 +512,16 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
}
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangBeforeCollectionClone, options) {
- const BSONObj& data = options.getData();
- if (data["namespace"].String() == _destNss.ns()) {
+ initialSyncHangBeforeCollectionClone.executeIf(
+ [&](const BSONObj&) {
log() << "initial sync - initialSyncHangBeforeCollectionClone fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangBeforeCollectionClone) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangBeforeCollectionClone.shouldFail()) &&
+ !_isShuttingDown()) {
mongo::sleepsecs(1);
}
- }
- }
+ },
+ [&](const BSONObj& data) { return data["namespace"].String() == _destNss.ns(); });
Status clientConnectionStatus = _clientConnection->connect(_source, StringData());
if (!clientConnectionStatus.isOK()) {
@@ -600,20 +602,22 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
uassertStatusOK(newStatus);
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangCollectionClonerAfterHandlingBatchResponse, nssData) {
- const BSONObj& data = nssData.getData();
- auto nss = data["nss"].str();
- // Only hang when cloning the specified collection, or if no collection was specified.
- if (nss.empty() || _destNss.toString() == nss) {
- while (MONGO_FAIL_POINT(initialSyncHangCollectionClonerAfterHandlingBatchResponse) &&
+ initialSyncHangCollectionClonerAfterHandlingBatchResponse.executeIf(
+ [&](const BSONObj&) {
+ while (MONGO_unlikely(
+ initialSyncHangCollectionClonerAfterHandlingBatchResponse.shouldFail()) &&
!_isShuttingDown()) {
log() << "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point "
"enabled for "
<< _destNss.toString() << ". Blocking until fail point is disabled.";
mongo::sleepsecs(1);
}
- }
- }
+ },
+ [&](const BSONObj& data) {
+ // Only hang when cloning the specified collection, or if no collection was specified.
+ auto nss = data["nss"].str();
+ return nss.empty() || nss == _destNss.toString();
+ });
}
void CollectionCloner::_verifyCollectionWasDropped(
@@ -702,19 +706,21 @@ void CollectionCloner::_insertDocumentsCallback(
return;
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangDuringCollectionClone, options) {
- const BSONObj& data = options.getData();
- if (data["namespace"].String() == _destNss.ns() &&
- static_cast<int>(_stats.documentsCopied) >= data["numDocsToClone"].numberInt()) {
+ initialSyncHangDuringCollectionClone.executeIf(
+ [&](const BSONObj&) {
lk.unlock();
log() << "initial sync - initialSyncHangDuringCollectionClone fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangDuringCollectionClone) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangDuringCollectionClone.shouldFail()) &&
+ !_isShuttingDown()) {
mongo::sleepsecs(1);
}
lk.lock();
- }
- }
+ },
+ [&](const BSONObj& data) {
+ return data["namespace"].String() == _destNss.ns() &&
+ static_cast<int>(_stats.documentsCopied) >= data["numDocsToClone"].numberInt();
+ });
}
void CollectionCloner::_finishCallback(const Status& status) {
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 5a26e80195d..63a00583854 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -178,19 +178,21 @@ Status DatabaseCloner::startup() noexcept {
return Status(ErrorCodes::ShutdownInProgress, "database cloner completed");
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangBeforeListCollections, customArgs) {
- const auto& data = customArgs.getData();
- const auto databaseElem = data["database"];
- if (!databaseElem || databaseElem.checkAndGetStringData() == _dbname) {
+ initialSyncHangBeforeListCollections.executeIf(
+ [&](const BSONObj&) {
lk.unlock();
log() << "initial sync - initialSyncHangBeforeListCollections fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangBeforeListCollections) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangBeforeListCollections.shouldFail()) &&
+ !_isShuttingDown()) {
mongo::sleepsecs(1);
}
lk.lock();
- }
- }
+ },
+ [&](const BSONObj& data) {
+ const auto databaseElem = data["database"];
+ return !databaseElem || databaseElem.checkAndGetStringData() == _dbname;
+ });
_stats.start = _executor->now();
LOG(1) << "Scheduling listCollections call for database: " << _dbname;
@@ -294,16 +296,13 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
return;
}
- MONGO_FAIL_POINT_BLOCK(initialSyncHangAfterListCollections, options) {
- const BSONObj& data = options.getData();
- if (data["database"].String() == _dbname) {
+ initialSyncHangAfterListCollections.executeIf(
+ [&](const BSONObj&) {
log() << "initial sync - initialSyncHangAfterListCollections fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangAfterListCollections)) {
- mongo::sleepsecs(1);
- }
- }
- }
+ initialSyncHangAfterListCollections.pauseWhileSet();
+ },
+ [&](const BSONObj& data) { return data["database"].String() == _dbname; });
_collectionNamespaces.reserve(_collectionInfos.size());
std::set<std::string> seen;
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 418899be19c..444435d4085 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -542,7 +542,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
return;
}
- if (MONGO_FAIL_POINT(failInitialSyncWithBadHost)) {
+ if (MONGO_unlikely(failInitialSyncWithBadHost.shouldFail())) {
status = Status(ErrorCodes::InvalidSyncSource,
"initial sync failed - failInitialSyncWithBadHost failpoint is set.");
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
@@ -580,12 +580,13 @@ void InitialSyncer::_chooseSyncSourceCallback(
return;
}
- if (MONGO_FAIL_POINT(initialSyncHangBeforeCreatingOplog)) {
+ if (MONGO_unlikely(initialSyncHangBeforeCreatingOplog.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "initial sync - initialSyncHangBeforeCreatingOplog fail point "
"enabled. Blocking until fail point is disabled.";
lock.unlock();
- while (MONGO_FAIL_POINT(initialSyncHangBeforeCreatingOplog) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangBeforeCreatingOplog.shouldFail()) &&
+ !_isShuttingDown()) {
mongo::sleepsecs(1);
}
lock.lock();
@@ -953,14 +954,15 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
return;
}
- if (MONGO_FAIL_POINT(initialSyncHangBeforeCopyingDatabases)) {
+ if (MONGO_unlikely(initialSyncHangBeforeCopyingDatabases.shouldFail())) {
lock.unlock();
// This could have been done with a scheduleWorkAt but this is used only by JS tests where
// we run with multiple threads so it's fine to spin on this thread.
// This log output is used in js tests so please leave it.
log() << "initial sync - initialSyncHangBeforeCopyingDatabases fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangBeforeCopyingDatabases) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangBeforeCopyingDatabases.shouldFail()) &&
+ !_isShuttingDown()) {
mongo::sleepsecs(1);
}
lock.lock();
@@ -1026,13 +1028,13 @@ void InitialSyncer::_databasesClonerCallback(const Status& databaseClonerFinishS
log() << "Finished cloning data: " << redact(databaseClonerFinishStatus)
<< ". Beginning oplog replay.";
- if (MONGO_FAIL_POINT(initialSyncHangAfterDataCloning)) {
+ if (MONGO_unlikely(initialSyncHangAfterDataCloning.shouldFail())) {
// This could have been done with a scheduleWorkAt but this is used only by JS tests where
// we run with multiple threads so it's fine to spin on this thread.
// This log output is used in js tests so please leave it.
log() << "initial sync - initialSyncHangAfterDataCloning fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangAfterDataCloning) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangAfterDataCloning.shouldFail()) && !_isShuttingDown()) {
mongo::sleepsecs(1);
}
}
@@ -1147,22 +1149,22 @@ void InitialSyncer::_getNextApplierBatchCallback(
// Set and unset by the InitialSyncTest fixture to cause initial sync to pause so that the
// Initial Sync Fuzzer can run commands on the sync source.
- if (MONGO_FAIL_POINT(initialSyncFuzzerSynchronizationPoint1)) {
+ if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint1.shouldFail())) {
log() << "Initial Syncer is about to apply the next oplog batch of size: "
<< batchResult.getValue().size();
log() << "initialSyncFuzzerSynchronizationPoint1 fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(initialSyncFuzzerSynchronizationPoint1);
+ initialSyncFuzzerSynchronizationPoint1.pauseWhileSet();
}
- if (MONGO_FAIL_POINT(initialSyncFuzzerSynchronizationPoint2)) {
+ if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint2.shouldFail())) {
log() << "initialSyncFuzzerSynchronizationPoint2 fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(initialSyncFuzzerSynchronizationPoint2);
+ initialSyncFuzzerSynchronizationPoint2.pauseWhileSet();
}
- if (MONGO_FAIL_POINT(failInitialSyncBeforeApplyingBatch)) {
+ if (MONGO_unlikely(failInitialSyncBeforeApplyingBatch.shouldFail())) {
log() << "initial sync - failInitialSyncBeforeApplyingBatch fail point enabled. Pausing"
<< "until fail point is disabled, then will fail initial sync.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(failInitialSyncBeforeApplyingBatch);
+ failInitialSyncBeforeApplyingBatch.pauseWhileSet();
status = Status(ErrorCodes::CallbackCanceled,
"failInitialSyncBeforeApplyingBatch fail point enabled");
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
@@ -1236,7 +1238,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
// Set to cause initial sync to fassert instead of restart if applying a batch fails, so that
// tests can be robust to network errors but not oplog idempotency errors.
- if (MONGO_FAIL_POINT(initialSyncFassertIfApplyingBatchFails)) {
+ if (MONGO_unlikely(initialSyncFassertIfApplyingBatchFails.shouldFail())) {
log() << "initialSyncFassertIfApplyingBatchFails fail point enabled.";
fassert(31210, status);
}
@@ -1325,9 +1327,9 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
_stats.initialSyncAttemptInfos.emplace_back(
InitialSyncer::InitialSyncAttemptInfo{runTime, result.getStatus(), _syncSource});
- if (MONGO_FAIL_POINT(failAndHangInitialSync)) {
+ if (MONGO_unlikely(failAndHangInitialSync.shouldFail())) {
log() << "failAndHangInitialSync fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(failAndHangInitialSync);
+ failAndHangInitialSync.pauseWhileSet();
result = Status(ErrorCodes::InternalError, "failAndHangInitialSync fail point enabled");
}
@@ -1399,11 +1401,11 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
std::swap(_onCompletion, onCompletion);
}
- if (MONGO_FAIL_POINT(initialSyncHangBeforeFinish)) {
+ if (MONGO_unlikely(initialSyncHangBeforeFinish.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "initial sync - initialSyncHangBeforeFinish fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(initialSyncHangBeforeFinish) && !_isShuttingDown()) {
+ while (MONGO_unlikely(initialSyncHangBeforeFinish.shouldFail()) && !_isShuttingDown()) {
mongo::sleepsecs(1);
}
}
@@ -1428,7 +1430,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// Clear the initial sync progress after an initial sync attempt has been successfully
// completed.
- if (lastApplied.isOK() && !MONGO_FAIL_POINT(skipClearInitialSyncState)) {
+ if (lastApplied.isOK() && !MONGO_unlikely(skipClearInitialSyncState.shouldFail())) {
_initialSyncState.reset();
}
}
@@ -1628,7 +1630,7 @@ StatusWith<Operations> InitialSyncer::_getNextApplierBatch_inlock() {
// If the fail-point is active, delay the apply batch by returning an empty batch so that
// _getNextApplierBatchCallback() will reschedule itself at a later time.
// See InitialSyncerOptions::getApplierBatchCallbackRetryWait.
- if (MONGO_FAIL_POINT(rsSyncApplyStop)) {
+ if (MONGO_unlikely(rsSyncApplyStop.shouldFail())) {
return Operations();
}
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index 414de99a576..6ad23526d85 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -63,13 +63,13 @@ namespace repl {
// TODO: Remove forward declares once we remove rs_initialsync.cpp and other dependents.
// Failpoint which fails initial sync and leaves an oplog entry in the buffer.
-MONGO_FAIL_POINT_DECLARE(failInitSyncWithBufferedEntriesLeft);
+extern FailPoint failInitSyncWithBufferedEntriesLeft;
// Failpoint which causes the initial sync function to hang before copying databases.
-MONGO_FAIL_POINT_DECLARE(initialSyncHangBeforeCopyingDatabases);
+extern FailPoint initialSyncHangBeforeCopyingDatabases;
// Failpoint which stops the applier.
-MONGO_FAIL_POINT_DECLARE(rsSyncApplyStop);
+extern FailPoint rsSyncApplyStop;
struct InitialSyncState;
struct MemberState;
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 55f4b1074bf..9d95ee96821 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -3495,7 +3495,7 @@ TEST_F(
// Enable 'rsSyncApplyStop' so that _getNextApplierBatch_inlock() returns an empty batch of
// operations instead of a batch containing an oplog entry with a bad version.
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint("rsSyncApplyStop");
+ auto failPoint = globalFailPointRegistry().find("rsSyncApplyStop");
failPoint->setMode(FailPoint::alwaysOn);
ON_BLOCK_EXIT([failPoint]() { failPoint->setMode(FailPoint::off); });
@@ -3954,7 +3954,7 @@ TEST_F(InitialSyncerTest,
auto opCtx = makeOpCtx();
// This fail point makes chooseSyncSourceCallback fail with an InvalidSyncSource error.
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint("failInitialSyncWithBadHost");
+ auto failPoint = globalFailPointRegistry().find("failInitialSyncWithBadHost");
failPoint->setMode(FailPoint::alwaysOn);
ON_BLOCK_EXIT([failPoint]() { failPoint->setMode(FailPoint::off); });
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 988076d25f0..fa388531db3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -339,10 +339,9 @@ void _logOpsInner(OperationContext* opCtx,
}
// Optionally hang before advancing lastApplied.
- if (MONGO_FAIL_POINT(hangBeforeLogOpAdvancesLastApplied)) {
+ if (MONGO_unlikely(hangBeforeLogOpAdvancesLastApplied.shouldFail())) {
log() << "hangBeforeLogOpAdvancesLastApplied fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- hangBeforeLogOpAdvancesLastApplied);
+ hangBeforeLogOpAdvancesLastApplied.pauseWhileSet(opCtx);
}
// Optimes on the primary should always represent consistent database states.
@@ -477,13 +476,12 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx,
RecordId(), RecordData(bsonOplogEntries[i].objdata(), bsonOplogEntries[i].objsize())};
}
- MONGO_FAIL_POINT_BLOCK(sleepBetweenInsertOpTimeGenerationAndLogOp, customWait) {
- const BSONObj& data = customWait.getData();
+ sleepBetweenInsertOpTimeGenerationAndLogOp.execute([&](const BSONObj& data) {
auto numMillis = data["waitForMillis"].numberInt();
log() << "Sleeping for " << numMillis << "ms after receiving " << count << " optimes from "
<< opTimes.front() << " to " << opTimes.back();
sleepmillis(numMillis);
- }
+ });
invariant(!opTimes.empty());
auto lastOpTime = opTimes.back();
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 0aa50c4be16..89ada66c851 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -370,7 +370,7 @@ Milliseconds OplogFetcher::getAwaitDataTimeout_forTest() const {
}
Milliseconds OplogFetcher::_getGetMoreMaxTime() const {
- if (MONGO_FAIL_POINT(setSmallOplogGetMoreMaxTimeMS)) {
+ if (MONGO_unlikely(setSmallOplogGetMoreMaxTimeMS.shouldFail())) {
return Milliseconds(50);
}
@@ -382,24 +382,31 @@ StatusWith<BSONObj> OplogFetcher::_onSuccessfulBatch(const Fetcher::QueryRespons
// Stop fetching and return on fail point.
// This fail point makes the oplog fetcher ignore the downloaded batch of operations and not
// error out. The FailPointEnabled error will be caught by the AbstractOplogFetcher.
- if (MONGO_FAIL_POINT(stopReplProducer)) {
+ if (MONGO_unlikely(stopReplProducer.shouldFail())) {
return Status(ErrorCodes::FailPointEnabled, "stopReplProducer fail point is enabled");
}
// Stop fetching and return when we reach a particular document. This failpoint should be used
// with the setParameter bgSyncOplogFetcherBatchSize=1, so that documents are fetched one at a
// time.
- MONGO_FAIL_POINT_BLOCK(stopReplProducerOnDocument, fp) {
- auto opCtx = cc().makeOperationContext();
- boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx.get(), nullptr));
- auto query = fp.getData()["document"].Obj();
- Matcher m(query, expCtx);
- if (!queryResponse.documents.empty() &&
- m.matches(queryResponse.documents.front()["o"].Obj())) {
- log() << "stopReplProducerOnDocument fail point is enabled.";
- return Status(ErrorCodes::FailPointEnabled,
- "stopReplProducerOnDocument fail point is enabled");
- }
+ {
+ Status status = Status::OK();
+ stopReplProducerOnDocument.executeIf(
+ [&](auto&&) {
+ status = {ErrorCodes::FailPointEnabled,
+ "stopReplProducerOnDocument fail point is enabled."};
+ log() << status.reason();
+ },
+ [&](const BSONObj& data) {
+ auto opCtx = cc().makeOperationContext();
+ boost::intrusive_ptr<ExpressionContext> expCtx(
+ new ExpressionContext(opCtx.get(), nullptr));
+ Matcher m(data["document"].Obj(), expCtx);
+ return !queryResponse.documents.empty() &&
+ m.matches(queryResponse.documents.front()["o"].Obj());
+ });
+ if (!status.isOK())
+ return status;
}
const auto& documents = queryResponse.documents;
diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h
index f547304ac39..6bf7a0275eb 100644
--- a/src/mongo/db/repl/oplog_fetcher.h
+++ b/src/mongo/db/repl/oplog_fetcher.h
@@ -44,7 +44,7 @@
namespace mongo {
namespace repl {
-MONGO_FAIL_POINT_DECLARE(stopReplProducer);
+extern FailPoint stopReplProducer;
/**
* The oplog fetcher, once started, reads operations from a remote oplog using a tailable cursor.
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 096e74076a3..cadbce56e30 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -679,10 +679,8 @@ public:
const string&,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- MONGO_FAIL_POINT_BLOCK(rsDelayHeartbeatResponse, delay) {
- const BSONObj& data = delay.getData();
- sleepsecs(data["delay"].numberInt());
- }
+ rsDelayHeartbeatResponse.execute(
+ [&](const BSONObj& data) { sleepsecs(data["delay"].numberInt()); });
LOG_FOR_HEARTBEATS(2) << "Received heartbeat request from " << cmdObj.getStringField("from")
<< ", " << cmdObj;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 596ece684c2..05806b9b9db 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -881,12 +881,12 @@ void ReplicationCoordinatorExternalStateImpl::notifyOplogMetadataWaiters(
scheduleWork(
_taskExecutor.get(),
[committedOpTime, reaper](const executor::TaskExecutor::CallbackArgs& args) {
- if (MONGO_FAIL_POINT(dropPendingCollectionReaperHang)) {
+ if (MONGO_unlikely(dropPendingCollectionReaperHang.shouldFail())) {
log() << "fail point dropPendingCollectionReaperHang enabled. "
"Blocking until fail point is disabled. "
"committedOpTime: "
<< committedOpTime;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(dropPendingCollectionReaperHang);
+ dropPendingCollectionReaperHang.pauseWhileSet();
}
auto opCtx = cc().makeOperationContext();
reaper->dropCollectionsOlderThan(opCtx.get(), committedOpTime);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 5d34f1e691c..a98101fb33b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2030,11 +2030,12 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
auto action = _updateMemberStateFromTopologyCoordinator(lk, opCtx);
lk.unlock();
- if (MONGO_FAIL_POINT(stepdownHangBeforePerformingPostMemberStateUpdateActions)) {
+ if (MONGO_unlikely(stepdownHangBeforePerformingPostMemberStateUpdateActions.shouldFail())) {
log() << "stepping down from primary - "
"stepdownHangBeforePerformingPostMemberStateUpdateActions fail point enabled. "
"Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(stepdownHangBeforePerformingPostMemberStateUpdateActions)) {
+ while (MONGO_unlikely(
+ stepdownHangBeforePerformingPostMemberStateUpdateActions.shouldFail())) {
mongo::sleepsecs(1);
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
@@ -3567,11 +3568,10 @@ boost::optional<OpTimeAndWallTime> ReplicationCoordinatorImpl::_chooseStableOpTi
maximumStableOpTime.opTime.getTimestamp());
}
- MONGO_FAIL_POINT_BLOCK(holdStableTimestampAtSpecificTimestamp, data) {
- const BSONObj& dataObj = data.getData();
+ holdStableTimestampAtSpecificTimestamp.execute([&](const BSONObj& dataObj) {
const auto holdStableTimestamp = dataObj["timestamp"].timestamp();
maximumStableTimestamp = std::min(maximumStableTimestamp, holdStableTimestamp);
- }
+ });
maximumStableOpTime = {OpTime(maximumStableTimestamp, maximumStableOpTime.opTime.getTerm()),
maximumStableOpTime.wallTime};
@@ -3693,7 +3693,7 @@ void ReplicationCoordinatorImpl::_setStableTimestampForStorage(WithLock lk) {
}
// Set the stable timestamp regardless of whether the majority commit point moved
// forward.
- if (!MONGO_FAIL_POINT(disableSnapshotting)) {
+ if (!MONGO_unlikely(disableSnapshotting.shouldFail())) {
_storage->setStableTimestamp(getServiceContext(),
stableOpTime->opTime.getTimestamp());
}
@@ -3980,7 +3980,7 @@ bool ReplicationCoordinatorImpl::_updateCommittedSnapshot(
_currentCommittedSnapshot->opTime.getTimestamp());
invariant(newCommittedSnapshot.opTime >= _currentCommittedSnapshot->opTime);
}
- if (MONGO_FAIL_POINT(disableSnapshotting))
+ if (MONGO_unlikely(disableSnapshotting.shouldFail()))
return false;
_currentCommittedSnapshot = newCommittedSnapshot;
_currentCommittedSnapshotCond.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index b1464c0de9b..f49ecec21be 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -355,13 +355,13 @@ void ReplicationCoordinatorImpl::_onVoteRequestComplete(
_voteRequester.reset();
auto electionFinishedEvent = _electionFinishedEvent;
- MONGO_FAIL_POINT_BLOCK(electionHangsBeforeUpdateMemberState, customWait) {
- auto waitForMillis = Milliseconds(customWait.getData()["waitForMillis"].numberInt());
+ electionHangsBeforeUpdateMemberState.execute([&](const BSONObj& customWait) {
+ auto waitForMillis = Milliseconds(customWait["waitForMillis"].numberInt());
log() << "election succeeded - electionHangsBeforeUpdateMemberState fail point "
"enabled, sleeping "
<< waitForMillis;
sleepFor(waitForMillis);
- }
+ });
_postWonElectionUpdateMemberState(lk);
_replExecutor->signalEvent(electionFinishedEvent);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 4c7afc1130f..c51f4ace311 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -478,9 +478,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100));
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatReconfigFinish")
- ->setMode(FailPoint::alwaysOn);
+ globalFailPointRegistry().find("blockHeartbeatReconfigFinish")->setMode(FailPoint::alwaysOn);
// hb reconfig
NetworkInterfaceMock* net = getNet();
@@ -573,9 +571,7 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
ASSERT_EQUALS(1,
countLogLinesContaining("Not standing for election; processing "
"a configuration change"));
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatReconfigFinish")
- ->setMode(FailPoint::off);
+ globalFailPointRegistry().find("blockHeartbeatReconfigFinish")->setMode(FailPoint::off);
}
TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequestVotes) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 8900e2a0471..6097df4f6e0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -370,7 +370,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
return;
}
- if (MONGO_FAIL_POINT(blockHeartbeatStepdown)) {
+ if (MONGO_unlikely(blockHeartbeatStepdown.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "stepDown - blockHeartbeatStepdown fail point enabled. "
"Blocking until fail point is disabled.";
@@ -380,7 +380,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
return _inShutdown;
};
- while (MONGO_FAIL_POINT(blockHeartbeatStepdown) && !inShutdown()) {
+ while (MONGO_unlikely(blockHeartbeatStepdown.shouldFail()) && !inShutdown()) {
mongo::sleepsecs(1);
}
}
@@ -577,7 +577,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
return;
}
- if (MONGO_FAIL_POINT(blockHeartbeatReconfigFinish)) {
+ if (MONGO_unlikely(blockHeartbeatReconfigFinish.shouldFail())) {
LOG_FOR_HEARTBEATS(0) << "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
"_heartbeatReconfigFinish until fail point is disabled.";
_replExecutor
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 738ff86ef87..e90146ecf34 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -469,9 +469,7 @@ TEST_F(
simulateSuccessfulV1Election();
ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatReconfigFinish")
- ->setMode(FailPoint::alwaysOn);
+ globalFailPointRegistry().find("blockHeartbeatReconfigFinish")->setMode(FailPoint::alwaysOn);
// hb reconfig
NetworkInterfaceMock* net = getNet();
@@ -511,9 +509,7 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
getReplCoord()->processReplSetReconfig(opCtx.get(), args, &result));
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatReconfigFinish")
- ->setMode(FailPoint::off);
+ globalFailPointRegistry().find("blockHeartbeatReconfigFinish")->setMode(FailPoint::off);
}
TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfig) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 631a47c6395..117f76eed7d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -137,9 +137,7 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
ASSERT(replCoord->getMemberState().primary());
// Primary begins stepping down due to new term, but cannot finish.
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatStepdown")
- ->setMode(FailPoint::alwaysOn);
+ globalFailPointRegistry().find("blockHeartbeatStepdown")->setMode(FailPoint::alwaysOn);
TopologyCoordinator::UpdateTermResult updateTermResult;
replCoord->updateTerm_forTest(replCoord->getTerm() + 1, &updateTermResult);
@@ -154,7 +152,7 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) {
ASSERT_FALSE(responseObj["secondary"].Bool());
ASSERT_FALSE(responseObj.hasField("isreplicaset"));
- getGlobalFailPointRegistry()->getFailPoint("blockHeartbeatStepdown")->setMode(FailPoint::off);
+ globalFailPointRegistry().find("blockHeartbeatStepdown")->setMode(FailPoint::off);
}
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index a2e5e2d9e8a..27fac4a3715 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -86,9 +86,7 @@ ReplCoordTest::ReplCoordTest() {
}
ReplCoordTest::~ReplCoordTest() {
- getGlobalFailPointRegistry()
- ->getFailPoint("blockHeartbeatReconfigFinish")
- ->setMode(FailPoint::off);
+ globalFailPointRegistry().find("blockHeartbeatReconfigFinish")->setMode(FailPoint::off);
if (_callShutdown) {
auto opCtx = makeOperationContext();
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 6e93691e17b..9b854735676 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -244,7 +244,7 @@ public:
CommandHelpers::handleMarkKillOnClientDisconnect(opCtx);
// TODO Unwind after SERVER-41070
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, waitInIsMaster);
+ waitInIsMaster.pauseWhileSet(opCtx);
/* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
authenticated.
diff --git a/src/mongo/db/repl/roll_back_local_operations.h b/src/mongo/db/repl/roll_back_local_operations.h
index 6c6e6c148ab..b6920cc10c4 100644
--- a/src/mongo/db/repl/roll_back_local_operations.h
+++ b/src/mongo/db/repl/roll_back_local_operations.h
@@ -48,11 +48,11 @@ namespace repl {
// two separate files, rs_rollback and rs_rollback_no_uuid. However, after
// MongoDB 3.8 is released, we no longer need to maintain rs_rollback_no_uuid
// code and these forward declares can be removed. See SERVER-29766.
-MONGO_FAIL_POINT_DECLARE(rollbackHangBeforeFinish);
-MONGO_FAIL_POINT_DECLARE(rollbackHangThenFailAfterWritingMinValid);
+extern FailPoint rollbackHangBeforeFinish;
+extern FailPoint rollbackHangThenFailAfterWritingMinValid;
// This is needed by rs_rollback and rollback_impl.
-MONGO_FAIL_POINT_DECLARE(rollbackHangAfterTransitionToRollback);
+extern FailPoint rollbackHangAfterTransitionToRollback;
class RollBackLocalOperations {
RollBackLocalOperations(const RollBackLocalOperations&) = delete;
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 1dc03162c29..ecb73b66573 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -182,11 +182,10 @@ Status RollbackImpl::runRollback(OperationContext* opCtx) {
}
_listener->onTransitionToRollback();
- if (MONGO_FAIL_POINT(rollbackHangAfterTransitionToRollback)) {
+ if (MONGO_unlikely(rollbackHangAfterTransitionToRollback.shouldFail())) {
log() << "rollbackHangAfterTransitionToRollback fail point enabled. Blocking until fail "
"point is disabled (rollback_impl).";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- rollbackHangAfterTransitionToRollback);
+ rollbackHangAfterTransitionToRollback.pauseWhileSet(opCtx);
}
// We clear the SizeRecoveryState before we recover to a stable timestamp. This ensures that we
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 04ee064da24..3e50d1ede2d 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -670,12 +670,12 @@ void checkRbidAndUpdateMinValid(OperationContext* opCtx,
replicationProcess->getConsistencyMarkers()->clearAppliedThrough(opCtx, {});
replicationProcess->getConsistencyMarkers()->setMinValid(opCtx, minValid);
- if (MONGO_FAIL_POINT(rollbackHangThenFailAfterWritingMinValid)) {
+ if (MONGO_unlikely(rollbackHangThenFailAfterWritingMinValid.shouldFail())) {
// This log output is used in jstests so please leave it.
log() << "rollback - rollbackHangThenFailAfterWritingMinValid fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(rollbackHangThenFailAfterWritingMinValid)) {
+ while (MONGO_unlikely(rollbackHangThenFailAfterWritingMinValid.shouldFail())) {
invariant(!globalInShutdownDeprecated()); // It is an error to shutdown while enabled.
mongo::sleepsecs(1);
}
@@ -1001,7 +1001,7 @@ Status _syncRollback(OperationContext* opCtx,
});
syncFixUp(opCtx, how, rollbackSource, replCoord, replicationProcess);
- if (MONGO_FAIL_POINT(rollbackExitEarlyAfterCollectionDrop)) {
+ if (MONGO_unlikely(rollbackExitEarlyAfterCollectionDrop.shouldFail())) {
log() << "rollbackExitEarlyAfterCollectionDrop fail point enabled. Returning early "
"until fail point is disabled.";
return Status(ErrorCodes::NamespaceNotFound,
@@ -1013,11 +1013,11 @@ Status _syncRollback(OperationContext* opCtx,
return Status(ErrorCodes::UnrecoverableRollbackError, e.what());
}
- if (MONGO_FAIL_POINT(rollbackHangBeforeFinish)) {
+ if (MONGO_unlikely(rollbackHangBeforeFinish.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "rollback - rollbackHangBeforeFinish fail point "
"enabled. Blocking until fail point is disabled.";
- while (MONGO_FAIL_POINT(rollbackHangBeforeFinish)) {
+ while (MONGO_unlikely(rollbackHangBeforeFinish.shouldFail())) {
invariant(!globalInShutdownDeprecated()); // It is an error to shutdown while enabled.
mongo::sleepsecs(1);
}
@@ -1181,7 +1181,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
}
}
- if (MONGO_FAIL_POINT(rollbackExitEarlyAfterCollectionDrop)) {
+ if (MONGO_unlikely(rollbackExitEarlyAfterCollectionDrop.shouldFail())) {
return;
}
@@ -1643,11 +1643,10 @@ void rollback(OperationContext* opCtx,
}
}
- if (MONGO_FAIL_POINT(rollbackHangAfterTransitionToRollback)) {
+ if (MONGO_unlikely(rollbackHangAfterTransitionToRollback.shouldFail())) {
log() << "rollbackHangAfterTransitionToRollback fail point enabled. Blocking until fail "
"point is disabled (rs_rollback).";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- rollbackHangAfterTransitionToRollback);
+ rollbackHangAfterTransitionToRollback.pauseWhileSet(opCtx);
}
try {
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index ec30792335f..165ab31c7a3 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -530,7 +530,7 @@ private:
BatchLimits batchLimits;
while (true) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(rsSyncApplyStop);
+ rsSyncApplyStop.pauseWhileSet();
batchLimits.slaveDelayLatestTimestamp = _calculateSlaveDelayLatestTimestamp();
@@ -661,10 +661,10 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
opCtx.setShouldParticipateInFlowControl(false);
// For pausing replication in tests.
- if (MONGO_FAIL_POINT(rsSyncApplyStop)) {
+ if (MONGO_unlikely(rsSyncApplyStop.shouldFail())) {
log() << "sync tail - rsSyncApplyStop fail point enabled. Blocking until fail point is "
"disabled.";
- while (MONGO_FAIL_POINT(rsSyncApplyStop)) {
+ while (MONGO_unlikely(rsSyncApplyStop.shouldFail())) {
// Tests should not trigger clean shutdown while that failpoint is active. If we
// think we need this, we need to think hard about what the behavior should be.
if (inShutdown()) {
@@ -689,7 +689,7 @@ void SyncTail::_oplogApplication(ReplicationCoordinator* replCoord,
// Shut down and exit oplog application loop.
return;
}
- if (MONGO_FAIL_POINT(rsSyncApplyStop)) {
+ if (MONGO_unlikely(rsSyncApplyStop.shouldFail())) {
continue;
}
if (ops.termWhenExhausted()) {
@@ -814,10 +814,10 @@ Status syncApply(OperationContext* opCtx,
auto clockSource = opCtx->getServiceContext()->getFastClockSource();
auto applyStartTime = clockSource->now();
- if (MONGO_FAIL_POINT(hangAfterRecordingOpApplicationStartTime)) {
+ if (MONGO_unlikely(hangAfterRecordingOpApplicationStartTime.shouldFail())) {
log() << "syncApply - fail point hangAfterRecordingOpApplicationStartTime enabled. "
<< "Blocking until fail point is disabled. ";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterRecordingOpApplicationStartTime);
+ hangAfterRecordingOpApplicationStartTime.pauseWhileSet();
}
auto opType = op.getOpType();
@@ -1198,11 +1198,10 @@ StatusWith<OpTime> SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O
// Use this fail point to hold the PBWM lock after we have written the oplog entries but
// before we have applied them.
- if (MONGO_FAIL_POINT(pauseBatchApplicationAfterWritingOplogEntries)) {
+ if (MONGO_unlikely(pauseBatchApplicationAfterWritingOplogEntries.shouldFail())) {
log() << "pauseBatchApplicationAfterWritingOplogEntries fail point enabled. Blocking "
"until fail point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, pauseBatchApplicationAfterWritingOplogEntries);
+ pauseBatchApplicationAfterWritingOplogEntries.pauseWhileSet(opCtx);
}
// Reset consistency markers in case the node fails while applying ops.
@@ -1239,10 +1238,10 @@ StatusWith<OpTime> SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O
storageEngine->replicationBatchIsComplete();
// Use this fail point to hold the PBWM lock and prevent the batch from completing.
- if (MONGO_FAIL_POINT(pauseBatchApplicationBeforeCompletion)) {
+ if (MONGO_unlikely(pauseBatchApplicationBeforeCompletion.shouldFail())) {
log() << "pauseBatchApplicationBeforeCompletion fail point enabled. Blocking until fail "
"point is disabled.";
- while (MONGO_FAIL_POINT(pauseBatchApplicationBeforeCompletion)) {
+ while (MONGO_unlikely(pauseBatchApplicationBeforeCompletion.shouldFail())) {
if (inShutdown()) {
severe() << "Turn off pauseBatchApplicationBeforeCompletion before attempting "
"clean shutdown";
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index c79248d7af6..1441a4cdeb5 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -208,8 +208,8 @@ HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now,
return HostAndPort();
}
- MONGO_FAIL_POINT_BLOCK(forceSyncSourceCandidate, customArgs) {
- const auto& data = customArgs.getData();
+ if (auto sfp = forceSyncSourceCandidate.scoped(); MONGO_unlikely(sfp.isActive())) {
+ const auto& data = sfp.getData();
const auto hostAndPortElem = data["hostAndPort"];
if (!hostAndPortElem) {
severe() << "'forceSyncSoureCandidate' parameter set with invalid host and port: "
@@ -2659,7 +2659,7 @@ bool TopologyCoordinator::shouldChangeSyncSource(
return true;
}
- if (MONGO_FAIL_POINT(disableMaxSyncSourceLagSecs)) {
+ if (MONGO_unlikely(disableMaxSyncSourceLagSecs.shouldFail())) {
log() << "disableMaxSyncSourceLagSecs fail point enabled - not checking the most recent "
"OpTime, "
<< currentSourceOpTime.toString() << ", of our current sync source, " << currentSource
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 92cbee05379..2d65483c42c 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -355,10 +355,9 @@ Status _applyPrepareTransaction(OperationContext* opCtx,
auto status = _applyOperationsForTransaction(opCtx, ops, mode);
fassert(31137, status);
- if (MONGO_FAIL_POINT(applyOpsHangBeforePreparingTransaction)) {
+ if (MONGO_unlikely(applyOpsHangBeforePreparingTransaction.shouldFail())) {
LOG(0) << "Hit applyOpsHangBeforePreparingTransaction failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- applyOpsHangBeforePreparingTransaction);
+ applyOpsHangBeforePreparingTransaction.pauseWhileSet(opCtx);
}
transaction.prepareTransaction(opCtx, entry.getOpTime());
@@ -435,7 +434,7 @@ Status applyPrepareTransaction(OperationContext* opCtx,
}
void reconstructPreparedTransactions(OperationContext* opCtx, repl::OplogApplication::Mode mode) {
- if (MONGO_FAIL_POINT(skipReconstructPreparedTransactions)) {
+ if (MONGO_unlikely(skipReconstructPreparedTransactions.shouldFail())) {
log() << "Hit skipReconstructPreparedTransactions failpoint";
return;
}
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index dd1a9011612..a920dd64ba9 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -394,18 +394,14 @@ void Balancer::_mainThread() {
LOG(1) << "*** End of balancing round";
}
- auto balancerInterval = [&]() -> Milliseconds {
- MONGO_FAIL_POINT_BLOCK(overrideBalanceRoundInterval, data) {
- int interval = data.getData()["intervalMs"].numberInt();
- log() << "overrideBalanceRoundInterval: using shorter balancing interval: "
- << interval << "ms";
-
- return Milliseconds(interval);
- }
-
- return _balancedLastTime ? kShortBalanceRoundInterval
- : kBalanceRoundDefaultInterval;
- }();
+ Milliseconds balancerInterval =
+ _balancedLastTime ? kShortBalanceRoundInterval : kBalanceRoundDefaultInterval;
+
+ overrideBalanceRoundInterval.execute([&](const BSONObj& data) {
+ balancerInterval = Milliseconds(data["intervalMs"].numberInt());
+ log() << "overrideBalanceRoundInterval: using shorter balancing interval: "
+ << balancerInterval;
+ });
_endRound(opCtx.get(), balancerInterval);
} catch (const DBException& e) {
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index efd71ca26ed..f512f77ca6a 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -355,7 +355,7 @@ vector<MigrateInfo> BalancerPolicy::balance(const ShardStatisticsVector& shardSt
std::set<ShardId>* usedShards) {
vector<MigrateInfo> migrations;
- if (MONGO_FAIL_POINT(balancerShouldReturnRandomMigrations) &&
+ if (MONGO_unlikely(balancerShouldReturnRandomMigrations.shouldFail()) &&
!distribution.nss().isConfigDB()) {
LOG(1) << "balancerShouldReturnRandomMigrations failpoint is set";
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index d5affc26cc0..27b0a47a7ef 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -395,9 +395,9 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
PlanExecutor::YIELD_MANUAL,
InternalPlanner::FORWARD);
- if (MONGO_FAIL_POINT(hangBeforeDoingDeletion)) {
+ if (MONGO_unlikely(hangBeforeDoingDeletion.shouldFail())) {
LOG(0) << "Hit hangBeforeDoingDeletion failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDoingDeletion);
+ hangBeforeDoingDeletion.pauseWhileSet(opCtx);
}
PlanYieldPolicy planYieldPolicy(exec.get(), PlanExecutor::YIELD_MANUAL);
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index dbe03cadd4d..7e38463b55e 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -226,7 +226,7 @@ boost::optional<ScopedCollectionMetadata> CollectionShardingState::_getMetadataW
auto metadata = _getMetadata(atClusterTime);
auto wantedShardVersion = ChunkVersion::UNSHARDED();
- if (MONGO_FAIL_POINT(useFCV44CheckShardVersionProtocol)) {
+ if (MONGO_unlikely(useFCV44CheckShardVersionProtocol.shouldFail())) {
LOG(0) << "Received shardVersion: " << receivedShardVersion << " for " << _nss.ns();
if (isCollection) {
LOG(0) << "Namespace " << _nss.ns() << " is collection, "
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index 66c2cb94653..06970e7ecf3 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -111,10 +111,8 @@ public:
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
Seconds waitFor(DistLockManager::kDefaultLockTimeout);
- MONGO_FAIL_POINT_BLOCK(setDropCollDistLockWait, customWait) {
- const BSONObj& data = customWait.getData();
- waitFor = Seconds(data["waitForSecs"].numberInt());
- }
+ setDropCollDistLockWait.execute(
+ [&](const BSONObj& data) { waitFor = Seconds(data["waitForSecs"].numberInt()); });
auto const catalogClient = Grid::get(opCtx)->catalogClient();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 337f854354e..00287e86410 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -997,7 +997,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
// Enable fail point to cause all updates to fail. Since we add the databases detected from
// the shard being added with upserts, but we add the shard document itself via insert, this
// will allow the shard to be added but prevent the databases from brought into the cluster.
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint("failAllUpdates");
+ auto failPoint = globalFailPointRegistry().find("failAllUpdates");
ASSERT(failPoint);
failPoint->setMode(FailPoint::alwaysOn);
ON_BLOCK_EXIT([&] { failPoint->setMode(FailPoint::off); });
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index e3683f37787..76df6923276 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -663,7 +663,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
return findResponse.getStatus();
}
- if (MONGO_FAIL_POINT(migrationCommitVersionError)) {
+ if (MONGO_unlikely(migrationCommitVersionError.shouldFail())) {
uassert(ErrorCodes::StaleEpoch,
"failpoint 'migrationCommitVersionError' generated error",
false);
@@ -730,7 +730,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Update the history of the migrated chunk.
// Drop the history that is too old (10 seconds of history for now).
// TODO SERVER-33831 to update the old history removal policy.
- if (!MONGO_FAIL_POINT(skipExpiringOldChunkHistory)) {
+ if (!MONGO_unlikely(skipExpiringOldChunkHistory.shouldFail())) {
while (!newHistory.empty() &&
newHistory.back().getValidAfter().getSecs() + kHistorySecs <
validAfter.get().getSecs()) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index cba7c79772e..28122a3a3da 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -527,10 +527,9 @@ void ShardingCatalogManager::renameCollection(OperationContext* opCtx,
CommandHelpers::filterCommandRequestForPassthrough(passthroughFields)),
Shard::RetryPolicy::kIdempotent));
- if (MONGO_FAIL_POINT(hangRenameCollectionAfterSendingRenameToPrimaryShard)) {
+ if (MONGO_unlikely(hangRenameCollectionAfterSendingRenameToPrimaryShard.shouldFail())) {
log() << "Hit hangRenameCollectionAfterSendingRenameToPrimaryShard";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangRenameCollectionAfterSendingRenameToPrimaryShard);
+ hangRenameCollectionAfterSendingRenameToPrimaryShard.pauseWhileSet(opCtx);
}
uassertStatusOK(cmdResponse.commandStatus);
@@ -638,10 +637,9 @@ void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(Operatio
void ShardingCatalogManager::createCollection(OperationContext* opCtx,
const NamespaceString& ns,
const CollectionOptions& collOptions) {
- if (MONGO_FAIL_POINT(hangCreateCollectionAfterAcquiringDistlocks)) {
+ if (MONGO_unlikely(hangCreateCollectionAfterAcquiringDistlocks.shouldFail())) {
log() << "Hit hangCreateCollectionAfterAcquiringDistlocks";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangCreateCollectionAfterAcquiringDistlocks);
+ hangCreateCollectionAfterAcquiringDistlocks.pauseWhileSet(opCtx);
}
const auto catalogClient = Grid::get(opCtx)->catalogClient();
@@ -668,10 +666,9 @@ void ShardingCatalogManager::createCollection(OperationContext* opCtx,
createCmdBuilder.obj(),
Shard::RetryPolicy::kIdempotent);
- if (MONGO_FAIL_POINT(hangCreateCollectionAfterSendingCreateToPrimaryShard)) {
+ if (MONGO_unlikely(hangCreateCollectionAfterSendingCreateToPrimaryShard.shouldFail())) {
log() << "Hit hangCreateCollectionAfterSendingCreateToPrimaryShard";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangCreateCollectionAfterSendingCreateToPrimaryShard);
+ hangCreateCollectionAfterSendingCreateToPrimaryShard.pauseWhileSet(opCtx);
}
auto createStatus = Shard::CommandResponse::getEffectiveStatus(swResponse);
@@ -681,10 +678,9 @@ void ShardingCatalogManager::createCollection(OperationContext* opCtx,
const auto uuid = checkCollectionOptions(opCtx, primaryShard.get(), ns, collOptions);
- if (MONGO_FAIL_POINT(hangCreateCollectionAfterGettingUUIDFromPrimaryShard)) {
+ if (MONGO_unlikely(hangCreateCollectionAfterGettingUUIDFromPrimaryShard.shouldFail())) {
log() << "Hit hangCreateCollectionAfterGettingUUIDFromPrimaryShard";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangCreateCollectionAfterGettingUUIDFromPrimaryShard);
+ hangCreateCollectionAfterGettingUUIDFromPrimaryShard.pauseWhileSet(opCtx);
}
if (collOptions.isView()) {
@@ -751,15 +747,14 @@ void ShardingCatalogManager::createCollection(OperationContext* opCtx,
ShardingCatalogClient::kLocalWriteConcern));
}
- if (MONGO_FAIL_POINT(writeUnshardedCollectionsToShardingCatalog)) {
+ if (MONGO_unlikely(writeUnshardedCollectionsToShardingCatalog.shouldFail())) {
LOG(0) << "Going to write initial chunk for new unsharded collection " << ns.ns() << ": "
<< chunk.toString();
writeFirstChunksForCollection(opCtx, initialChunks);
- if (MONGO_FAIL_POINT(hangCreateCollectionAfterWritingEntryToConfigChunks)) {
+ if (MONGO_unlikely(hangCreateCollectionAfterWritingEntryToConfigChunks.shouldFail())) {
log() << "Hit hangCreateCollectionAfterWritingEntryToConfigChunks";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangCreateCollectionAfterWritingEntryToConfigChunks);
+ hangCreateCollectionAfterWritingEntryToConfigChunks.pauseWhileSet(opCtx);
}
LOG(0) << "Going to write collection entry for new unsharded collection " << ns.ns() << ": "
@@ -767,10 +762,9 @@ void ShardingCatalogManager::createCollection(OperationContext* opCtx,
uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
opCtx, ns, targetCollType, true /*upsert*/));
- if (MONGO_FAIL_POINT(hangCreateCollectionAfterWritingEntryToConfigCollections)) {
+ if (MONGO_unlikely(hangCreateCollectionAfterWritingEntryToConfigCollections.shouldFail())) {
log() << "Hit hangCreateCollectionAfterWritingEntryToConfigCollections";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangCreateCollectionAfterWritingEntryToConfigCollections);
+ hangCreateCollectionAfterWritingEntryToConfigCollections.pauseWhileSet(opCtx);
}
}
}
@@ -859,10 +853,9 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
// the newly-generated objectid, (ii) their bounds for each new field in the refined key to
// MinKey (except for the global max chunk where the max bounds are set to MaxKey), and
// unsetting (iii) their jumbo field.
- if (MONGO_FAIL_POINT(hangRefineCollectionShardKeyBeforeUpdatingChunks)) {
+ if (MONGO_unlikely(hangRefineCollectionShardKeyBeforeUpdatingChunks.shouldFail())) {
log() << "Hit hangRefineCollectionShardKeyBeforeUpdatingChunks failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangRefineCollectionShardKeyBeforeUpdatingChunks);
+ hangRefineCollectionShardKeyBeforeUpdatingChunks.pauseWhileSet(opCtx);
}
uassertStatusOK(updateConfigDocumentInTxn(asr.opCtx(),
@@ -918,10 +911,9 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
<< executionTimer.millis() << " ms. Total time taken: " << totalTimer.millis()
<< " ms.";
- if (MONGO_FAIL_POINT(hangRefineCollectionShardKeyBeforeCommit)) {
+ if (MONGO_unlikely(hangRefineCollectionShardKeyBeforeCommit.shouldFail())) {
log() << "Hit hangRefineCollectionShardKeyBeforeCommit failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangRefineCollectionShardKeyBeforeCommit);
+ hangRefineCollectionShardKeyBeforeCommit.pauseWhileSet(opCtx);
}
uassertStatusOK(commitTxnForConfigDocument(asr.opCtx(), txnNumber));
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index e843eeff898..5d832418367 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -145,7 +145,7 @@ void scheduleCleanup(executor::TaskExecutor* executor,
auto uniqueOpCtx = Client::getCurrent()->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(suspendRangeDeletion);
+ suspendRangeDeletion.pauseWhileSet();
auto next = CollectionRangeDeleter::cleanUpNextRange(opCtx, nss, epoch);
if (next) {
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index e4b987f537c..51d6e4ba712 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -724,7 +724,7 @@ void MigrationDestinationManager::_migrateThread() {
_setStateFail(str::stream() << "migrate failed: " << redact(exceptionToStatus()));
}
- if (getState() != DONE && !MONGO_FAIL_POINT(failMigrationLeaveOrphans)) {
+ if (getState() != DONE && !MONGO_unlikely(failMigrationLeaveOrphans.shouldFail())) {
_forgetPending(opCtx.get(), ChunkRange(_min, _max));
}
@@ -761,7 +761,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
cloneCollectionIndexesAndOptions(opCtx, _nss, _fromShard);
timing.done(1);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep1);
+ migrateThreadHangAtStep1.pauseWhileSet();
}
auto fromShard =
@@ -787,7 +787,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
timing.done(2);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep2);
+ migrateThreadHangAtStep2.pauseWhileSet();
}
repl::OpTime lastOpApplied;
@@ -884,9 +884,9 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
lastOpApplied = cloneDocumentsFromDonor(opCtx, insertBatchFn, fetchBatchFn);
timing.done(3);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep3);
+ migrateThreadHangAtStep3.pauseWhileSet();
- if (MONGO_FAIL_POINT(failMigrationLeaveOrphans)) {
+ if (MONGO_unlikely(failMigrationLeaveOrphans.shouldFail())) {
_setStateFail(str::stream() << "failing migration after cloning " << _numCloned
<< " docs due to failMigrationLeaveOrphans failpoint");
return;
@@ -949,7 +949,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
timing.done(4);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep4);
+ migrateThreadHangAtStep4.pauseWhileSet();
}
{
@@ -1024,7 +1024,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
timing.done(5);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep5);
+ migrateThreadHangAtStep5.pauseWhileSet();
}
_sessionMigration->join();
@@ -1036,7 +1036,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
setState(DONE);
timing.done(6);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(migrateThreadHangAtStep6);
+ migrateThreadHangAtStep6.pauseWhileSet();
}
bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
@@ -1067,7 +1067,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
BSONObj fullObj;
if (Helpers::findById(opCtx, autoColl.getDb(), _nss.ns(), id, fullObj)) {
if (!isInRange(fullObj, _min, _max, _shardKeyPattern)) {
- if (MONGO_FAIL_POINT(failMigrationReceivedOutOfRangeOperation)) {
+ if (MONGO_unlikely(failMigrationReceivedOutOfRangeOperation.shouldFail())) {
MONGO_UNREACHABLE;
}
continue;
@@ -1105,7 +1105,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
// do not apply insert/update if doc does not belong to the chunk being migrated
if (!isInRange(updatedDoc, _min, _max, _shardKeyPattern)) {
- if (MONGO_FAIL_POINT(failMigrationReceivedOutOfRangeOperation)) {
+ if (MONGO_unlikely(failMigrationReceivedOutOfRangeOperation.shouldFail())) {
MONGO_UNREACHABLE;
}
continue;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 725af2a1ea7..56d84023c24 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -364,7 +364,7 @@ Status MigrationSourceManager::commitChunkOnRecipient(OperationContext* opCtx) {
// Tell the recipient shard to fetch the latest changes.
auto commitCloneStatus = _cloneDriver->commitClone(opCtx);
- if (MONGO_FAIL_POINT(failMigrationCommit) && commitCloneStatus.isOK()) {
+ if (MONGO_unlikely(failMigrationCommit.shouldFail()) && commitCloneStatus.isOK()) {
commitCloneStatus = {ErrorCodes::InternalError,
"Failing _recvChunkCommit due to failpoint."};
}
@@ -423,7 +423,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
builder.obj(),
Shard::RetryPolicy::kIdempotent);
- if (MONGO_FAIL_POINT(migrationCommitNetworkError)) {
+ if (MONGO_unlikely(migrationCommitNetworkError.shouldFail())) {
commitChunkMigrationResponse = Status(
ErrorCodes::InternalError, "Failpoint 'migrationCommitNetworkError' generated error");
}
@@ -547,7 +547,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
LOG(0) << "Migration succeeded and updated collection version to "
<< refreshedMetadata->getCollVersion();
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection);
+ hangBeforeLeavingCriticalSection.pauseWhileSet();
scopedGuard.dismiss();
@@ -576,7 +576,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
return CollectionShardingRuntime::get(opCtx, getNss())->cleanUpRange(range, whenToClean);
}();
- if (!MONGO_FAIL_POINT(doNotRefreshRecipientAfterCommit)) {
+ if (!MONGO_unlikely(doNotRefreshRecipientAfterCommit.shouldFail())) {
// Best-effort make the recipient refresh its routing table to the new collection version.
refreshRecipientRoutingTable(opCtx,
getNss(),
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 8fafb8c0253..24c0023a6c4 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -217,30 +217,30 @@ private:
moveChunkRequest.getFromShardId());
moveTimingHelper.done(1);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep1);
+ moveChunkHangAtStep1.pauseWhileSet();
MigrationSourceManager migrationSourceManager(
opCtx, moveChunkRequest, donorConnStr, recipientHost);
moveTimingHelper.done(2);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep2);
+ moveChunkHangAtStep2.pauseWhileSet();
uassertStatusOKWithWarning(migrationSourceManager.startClone(opCtx));
moveTimingHelper.done(3);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep3);
+ moveChunkHangAtStep3.pauseWhileSet();
uassertStatusOKWithWarning(migrationSourceManager.awaitToCatchUp(opCtx));
moveTimingHelper.done(4);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep4);
+ moveChunkHangAtStep4.pauseWhileSet();
uassertStatusOKWithWarning(migrationSourceManager.enterCriticalSection(opCtx));
uassertStatusOKWithWarning(migrationSourceManager.commitChunkOnRecipient(opCtx));
moveTimingHelper.done(5);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep5);
+ moveChunkHangAtStep5.pauseWhileSet();
uassertStatusOKWithWarning(migrationSourceManager.commitChunkMetadataOnConfig(opCtx));
moveTimingHelper.done(6);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(moveChunkHangAtStep6);
+ moveChunkHangAtStep6.pauseWhileSet();
}
} moveChunkCmd;
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index cab3a7391fd..9042fba9b5d 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -90,7 +90,7 @@ void onShardVersionMismatch(OperationContext* opCtx,
}
}
- if (MONGO_FAIL_POINT(skipShardFilteringMetadataRefresh)) {
+ if (MONGO_unlikely(skipShardFilteringMetadataRefresh.shouldFail())) {
return;
}
@@ -117,7 +117,7 @@ void onDbVersionMismatch(OperationContext* opCtx,
// StaleDatabaseVersion retry attempts while the movePrimary is being committed.
OperationShardingState::get(opCtx).waitForMovePrimaryCriticalSectionSignal(opCtx);
- if (MONGO_FAIL_POINT(skipDatabaseVersionMetadataRefresh)) {
+ if (MONGO_unlikely(skipDatabaseVersionMetadataRefresh.shouldFail())) {
return;
}
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index cf158829ef5..78600e5d488 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -88,10 +88,9 @@ void dropChunksIfEpochChanged(OperationContext* opCtx,
// table cache.
dropChunks(opCtx, nss);
- if (MONGO_FAIL_POINT(hangPersistCollectionAndChangedChunksAfterDropChunks)) {
+ if (MONGO_unlikely(hangPersistCollectionAndChangedChunksAfterDropChunks.shouldFail())) {
log() << "Hit hangPersistCollectionAndChangedChunksAfterDropChunks failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangPersistCollectionAndChangedChunksAfterDropChunks);
+ hangPersistCollectionAndChangedChunksAfterDropChunks.pauseWhileSet(opCtx);
}
}
}
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 239f3c6bed2..29f9f699848 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -111,7 +111,7 @@ public:
LOG(0) << "Updating config server with confirmed set " << connStr;
Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
- if (MONGO_FAIL_POINT(failUpdateShardIdentityConfigString)) {
+ if (MONGO_unlikely(failUpdateShardIdentityConfigString.shouldFail())) {
return;
}
diff --git a/src/mongo/db/s/shardsvr_rename_collection.cpp b/src/mongo/db/s/shardsvr_rename_collection.cpp
index 84d4bda3c5f..41971a44c6e 100644
--- a/src/mongo/db/s/shardsvr_rename_collection.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection.cpp
@@ -72,10 +72,9 @@ public:
uassertStatusOK(ActiveRenameCollectionRegistry::get(opCtx).registerRenameCollection(
incomingRequest));
- if (MONGO_FAIL_POINT(hangRenameCollectionAfterGettingRename)) {
+ if (MONGO_unlikely(hangRenameCollectionAfterGettingRename.shouldFail())) {
log() << "Hit hangRenameCollectionAfterGettingRename";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangRenameCollectionAfterGettingRename);
+ hangRenameCollectionAfterGettingRename.pauseWhileSet(opCtx);
}
// Check if there is an existing renameCollection running and if so, join it
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index c46920126ee..7836c63ef62 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -61,20 +61,19 @@ ExecutorFuture<void> waitForMajorityWithHangFailpoint(ServiceContext* service,
return WaitForMajorityService::get(service).waitUntilMajority(opTime).thenRunOn(executor);
};
- MONGO_FAIL_POINT_BLOCK(failpoint, fp) {
+ if (auto sfp = failpoint.scoped(); MONGO_unlikely(sfp.isActive())) {
+ const BSONObj& data = sfp.getData();
LOG(0) << "Hit " << failPointName << " failpoint";
- const BSONObj& data = fp.getData();
// Run the hang failpoint asynchronously on a different thread to avoid self deadlocks.
return ExecutorFuture<void>(executor).then(
[service, &failpoint, failPointName, data, waitForWC, opTime] {
if (!data["useUninterruptibleSleep"].eoo()) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(failpoint);
+ failpoint.pauseWhileSet();
} else {
ThreadClient tc(failPointName, service);
auto opCtx = tc->makeOperationContext();
-
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx.get(), failpoint);
+ failpoint.pauseWhileSet(opCtx.get());
}
return waitForWC(std::move(opTime));
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 59ae621bc59..5d3cf3bfdd5 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -95,9 +95,9 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
AuthorizationSession::get(opCtx->getClient())
->grantInternalAuthorization(opCtx->getClient());
- if (MONGO_FAIL_POINT(hangWhileTargetingLocalHost)) {
+ if (MONGO_unlikely(hangWhileTargetingLocalHost.shouldFail())) {
LOG(0) << "Hit hangWhileTargetingLocalHost failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingLocalHost);
+ hangWhileTargetingLocalHost.pauseWhileSet(opCtx);
}
const auto service = opCtx->getServiceContext();
@@ -231,9 +231,9 @@ Future<AsyncWorkScheduler::HostAndShard> AsyncWorkScheduler::_targetHostAsync(
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardId));
- if (MONGO_FAIL_POINT(hangWhileTargetingRemoteHost)) {
+ if (MONGO_unlikely(hangWhileTargetingRemoteHost.shouldFail())) {
LOG(0) << "Hit hangWhileTargetingRemoteHost failpoint for shard " << shardId;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangWhileTargetingRemoteHost);
+ hangWhileTargetingRemoteHost.pauseWhileSet(opCtx);
}
// TODO (SERVER-35678): Return a SemiFuture<HostAndShard> rather than using a blocking call
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index 9e6b78c445a..c98a1c25775 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -107,9 +107,9 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
const std::vector<ShardId>& participantList) {
LOG(3) << txnIdToString(lsid, txnNumber) << " Going to write participant list";
- if (MONGO_FAIL_POINT(hangBeforeWritingParticipantList)) {
+ if (MONGO_unlikely(hangBeforeWritingParticipantList.shouldFail())) {
LOG(0) << "Hit hangBeforeWritingParticipantList failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingParticipantList);
+ hangBeforeWritingParticipantList.pauseWhileSet(opCtx);
}
OperationSessionInfo sessionInfo;
@@ -239,9 +239,9 @@ Future<PrepareVoteConsensus> sendPrepare(ServiceContext* service,
getTransactionCoordinatorWorkerCurOpRepository()->set(
opCtx, lsid, txnNumber, CoordinatorAction::kSendingPrepare);
- if (MONGO_FAIL_POINT(hangBeforeSendingPrepare)) {
+ if (MONGO_unlikely(hangBeforeSendingPrepare.shouldFail())) {
LOG(0) << "Hit hangBeforeSendingPrepare failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeSendingPrepare);
+ hangBeforeSendingPrepare.pauseWhileSet(opCtx);
}
};
@@ -292,9 +292,9 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
LOG(3) << txnIdToString(lsid, txnNumber) << " Going to write decision "
<< (isCommit ? "commit" : "abort");
- if (MONGO_FAIL_POINT(hangBeforeWritingDecision)) {
+ if (MONGO_unlikely(hangBeforeWritingDecision.shouldFail())) {
LOG(0) << "Hit hangBeforeWritingDecision failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeWritingDecision);
+ hangBeforeWritingDecision.pauseWhileSet(opCtx);
}
OperationSessionInfo sessionInfo;
@@ -401,9 +401,9 @@ Future<void> sendCommit(ServiceContext* service,
getTransactionCoordinatorWorkerCurOpRepository()->set(
opCtx, lsid, txnNumber, CoordinatorAction::kSendingCommit);
- if (MONGO_FAIL_POINT(hangBeforeSendingCommit)) {
+ if (MONGO_unlikely(hangBeforeSendingCommit.shouldFail())) {
LOG(0) << "Hit hangBeforeSendingCommit failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeSendingCommit);
+ hangBeforeSendingCommit.pauseWhileSet(opCtx);
}
};
@@ -431,9 +431,9 @@ Future<void> sendAbort(ServiceContext* service,
getTransactionCoordinatorWorkerCurOpRepository()->set(
opCtx, lsid, txnNumber, CoordinatorAction::kSendingAbort);
- if (MONGO_FAIL_POINT(hangBeforeSendingAbort)) {
+ if (MONGO_unlikely(hangBeforeSendingAbort.shouldFail())) {
LOG(0) << "Hit hangBeforeSendingAbort failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeSendingAbort);
+ hangBeforeSendingAbort.pauseWhileSet(opCtx);
}
};
@@ -451,9 +451,9 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
TxnNumber txnNumber) {
LOG(3) << txnIdToString(lsid, txnNumber) << " Going to delete coordinator doc";
- if (MONGO_FAIL_POINT(hangBeforeDeletingCoordinatorDoc)) {
+ if (MONGO_unlikely(hangBeforeDeletingCoordinatorDoc.shouldFail())) {
LOG(0) << "Hit hangBeforeDeletingCoordinatorDoc failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeDeletingCoordinatorDoc);
+ hangBeforeDeletingCoordinatorDoc.pauseWhileSet(opCtx);
}
OperationSessionInfo sessionInfo;
@@ -506,15 +506,14 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
LOG(3) << txnIdToString(lsid, txnNumber) << " Deleted coordinator doc";
- MONGO_FAIL_POINT_BLOCK(hangAfterDeletingCoordinatorDoc, fp) {
+ hangAfterDeletingCoordinatorDoc.execute([&](const BSONObj& data) {
LOG(0) << "Hit hangAfterDeletingCoordinatorDoc failpoint";
- const BSONObj& data = fp.getData();
if (!data["useUninterruptibleSleep"].eoo()) {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterDeletingCoordinatorDoc);
+ hangAfterDeletingCoordinatorDoc.pauseWhileSet();
} else {
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangAfterDeletingCoordinatorDoc);
+ hangAfterDeletingCoordinatorDoc.pauseWhileSet(opCtx);
}
- }
+ });
}
} // namespace
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index 639b44e6e13..21293128b58 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -134,8 +134,8 @@ public:
<< opCtx->recoveryUnit()->getPrepareTimestamp().toString()
<< " participant prepareOpTime: " << prepareOpTime.toString());
- if (MONGO_FAIL_POINT(
- participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
+ if (MONGO_unlikely(participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic
+ .shouldFail())) {
uasserted(ErrorCodes::HostUnreachable,
"returning network error because failpoint is on");
}
@@ -143,8 +143,8 @@ public:
}
const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx, {});
- if (MONGO_FAIL_POINT(
- participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic)) {
+ if (MONGO_unlikely(participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic
+ .shouldFail())) {
uasserted(ErrorCodes::HostUnreachable,
"returning network error because failpoint is on");
}
@@ -234,10 +234,9 @@ public:
*opCtx->getTxnNumber(),
validateParticipants(opCtx, cmd.getParticipants()));
- if (MONGO_FAIL_POINT(hangAfterStartingCoordinateCommit)) {
+ if (MONGO_unlikely(hangAfterStartingCoordinateCommit.shouldFail())) {
LOG(0) << "Hit hangAfterStartingCoordinateCommit failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx,
- hangAfterStartingCoordinateCommit);
+ hangAfterStartingCoordinateCommit.pauseWhileSet(opCtx);
}
ON_BLOCK_EXIT([opCtx] {
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 9e06c0bc77c..a9d4badc2c6 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -146,7 +146,7 @@ Status validateBaseOptions(const moe::Environment& params) {
if (enableTestCommandsValue) {
// Only register failpoint server parameters if enableTestCommands=1.
- getGlobalFailPointRegistry()->registerAllFailPointsAsServerParameters();
+ globalFailPointRegistry().registerAllFailPointsAsServerParameters();
} else {
// Deregister test-only parameters.
ServerParameterSet::getGlobal()->disableTestParameters();
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index f3760c3c8f4..b4b7d90ca59 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -574,16 +574,23 @@ bool runCommandImpl(OperationContext* opCtx,
}
auto waitForWriteConcern = [&](auto&& bb) {
- MONGO_FAIL_POINT_BLOCK_IF(failCommand, data, [&](const BSONObj& data) {
- return CommandHelpers::shouldActivateFailCommandFailPoint(
- data, request.getCommandName(), opCtx->getClient(), invocation->ns()) &&
- data.hasField("writeConcernError");
- }) {
- bb.append(data.getData()["writeConcernError"]);
- return; // Don't do normal waiting.
+ bool reallyWait = true;
+ failCommand.executeIf(
+ [&](const BSONObj& data) {
+ bb.append(data["writeConcernError"]);
+ reallyWait = false;
+ },
+ [&](const BSONObj& data) {
+ return CommandHelpers::shouldActivateFailCommandFailPoint(
+ data,
+ request.getCommandName(),
+ opCtx->getClient(),
+ invocation->ns()) &&
+ data.hasField("writeConcernError");
+ });
+ if (reallyWait) {
+ behaviors.waitForWriteConcern(opCtx, invocation, lastOpBeforeRun, bb);
}
-
- behaviors.waitForWriteConcern(opCtx, invocation, lastOpBeforeRun, bb);
};
try {
@@ -623,8 +630,7 @@ bool runCommandImpl(OperationContext* opCtx,
// This failpoint should affect both getMores and commands which are read-only and thus don't
// support writeConcern.
if (!shouldWaitForWriteConcern || command->getLogicalOp() == LogicalOp::opGetMore) {
- MONGO_FAIL_POINT_BLOCK(waitAfterReadCommandFinishesExecution, options) {
- const BSONObj& data = options.getData();
+ waitAfterReadCommandFinishesExecution.execute([&](const BSONObj& data) {
auto db = data["db"].str();
if (db.empty() || request.getDatabase() == db) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
@@ -632,7 +638,7 @@ bool runCommandImpl(OperationContext* opCtx,
opCtx,
"waitAfterReadCommandFinishesExecution");
}
- }
+ });
}
behaviors.waitForLinearizableReadConcern(opCtx);
@@ -692,15 +698,14 @@ void execCommandDatabase(OperationContext* opCtx,
CurOp::get(opCtx)->setCommand_inlock(command);
}
- MONGO_FAIL_POINT_BLOCK(sleepMillisAfterCommandExecutionBegins, arg) {
- const BSONObj& data = arg.getData();
+ sleepMillisAfterCommandExecutionBegins.execute([&](const BSONObj& data) {
auto numMillis = data["millis"].numberInt();
auto commands = data["commands"].Obj().getFieldNames<std::set<std::string>>();
// Only sleep for one of the specified commands.
if (commands.find(command->getName()) != commands.end()) {
mongo::sleepmillis(numMillis);
}
- }
+ });
// TODO: move this back to runCommands when mongos supports OperationContext
// see SERVER-18515 for details.
@@ -776,7 +781,7 @@ void execCommandDatabase(OperationContext* opCtx,
const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname);
if (!opCtx->getClient()->isInDirectClient() &&
- !MONGO_FAIL_POINT(skipCheckingForNotMasterInCommandDispatch)) {
+ !MONGO_unlikely(skipCheckingForNotMasterInCommandDispatch.shouldFail())) {
const bool inMultiDocumentTransaction = (sessionOptions.getAutocommit() == false);
auto allowed = command->secondaryAllowed(opCtx->getServiceContext());
bool alwaysAllowed = allowed == Command::AllowedOnSecondary::kAlways;
@@ -789,7 +794,7 @@ void execCommandDatabase(OperationContext* opCtx,
uasserted(ErrorCodes::NotMasterNoSlaveOk, "not master and slaveOk=false");
}
- if (MONGO_FAIL_POINT(respondWithNotPrimaryInCommandDispatch)) {
+ if (MONGO_unlikely(respondWithNotPrimaryInCommandDispatch.shouldFail())) {
uassert(ErrorCodes::NotMaster, "not primary", canRunHere);
} else {
uassert(ErrorCodes::NotMaster, "not master", canRunHere);
@@ -1246,7 +1251,7 @@ DbResponse receivedGetMore(OperationContext* opCtx,
audit::logGetMoreAuthzCheck(opCtx->getClient(), nsString, cursorid, status.code());
uassertStatusOK(status);
- while (MONGO_FAIL_POINT(rsStopGetMore)) {
+ while (MONGO_unlikely(rsStopGetMore.shouldFail())) {
sleepmillis(0);
}
diff --git a/src/mongo/db/service_entry_point_common.h b/src/mongo/db/service_entry_point_common.h
index 87a6c178588..b41c0ef2c55 100644
--- a/src/mongo/db/service_entry_point_common.h
+++ b/src/mongo/db/service_entry_point_common.h
@@ -39,12 +39,12 @@
namespace mongo {
-MONGO_FAIL_POINT_DECLARE(rsStopGetMore);
-MONGO_FAIL_POINT_DECLARE(respondWithNotPrimaryInCommandDispatch);
+extern FailPoint rsStopGetMore;
+extern FailPoint respondWithNotPrimaryInCommandDispatch;
// When active, we won't check if we are master in command dispatch. Activate this if you want to
// test failing during command execution.
-MONGO_FAIL_POINT_DECLARE(skipCheckingForNotMasterInCommandDispatch);
+extern FailPoint skipCheckingForNotMasterInCommandDispatch;
/**
* Helpers for writing ServiceEntryPointImpl implementations from a reusable core.
diff --git a/src/mongo/db/snapshot_window_util.cpp b/src/mongo/db/snapshot_window_util.cpp
index 75d59c4abbd..05a46b42e13 100644
--- a/src/mongo/db/snapshot_window_util.cpp
+++ b/src/mongo/db/snapshot_window_util.cpp
@@ -88,7 +88,7 @@ void _decreaseTargetSnapshotWindowSize(WithLock lock, OperationContext* opCtx) {
} // namespace
void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
- if (MONGO_FAIL_POINT(preventDynamicSnapshotHistoryWindowTargetAdjustments)) {
+ if (MONGO_unlikely(preventDynamicSnapshotHistoryWindowTargetAdjustments.shouldFail())) {
return;
}
@@ -146,7 +146,7 @@ void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
}
void decreaseTargetSnapshotWindowSize(OperationContext* opCtx) {
- if (MONGO_FAIL_POINT(preventDynamicSnapshotHistoryWindowTargetAdjustments)) {
+ if (MONGO_unlikely(preventDynamicSnapshotHistoryWindowTargetAdjustments.shouldFail())) {
return;
}
diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp
index e811f87d4f5..ac010f891ae 100644
--- a/src/mongo/db/storage/flow_control.cpp
+++ b/src/mongo/db/storage/flow_control.cpp
@@ -294,8 +294,8 @@ int FlowControl::getNumTickets() {
// Flow Control is only enabled on nodes that can accept writes.
const bool canAcceptWrites = _replCoord->canAcceptNonLocalWrites();
- MONGO_FAIL_POINT_BLOCK(flowControlTicketOverride, failpointObj) {
- int numTickets = failpointObj.getData().getIntField("numTickets");
+ if (auto sfp = flowControlTicketOverride.scoped(); MONGO_unlikely(sfp.isActive())) {
+ int numTickets = sfp.getData().getIntField("numTickets");
if (numTickets > 0 && canAcceptWrites) {
return numTickets;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index b911c67f590..f3a2c93e709 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -1125,7 +1125,7 @@ protected:
std::memcmp(_key.getBuffer(), item.data, std::min(_key.getSize(), item.size));
bool nextNotIncreasing = cmp > 0 || (cmp == 0 && _key.getSize() > item.size);
- if (MONGO_FAIL_POINT(WTEmulateOutOfOrderNextIndexKey)) {
+ if (MONGO_unlikely(WTEmulateOutOfOrderNextIndexKey.shouldFail())) {
log() << "WTIndex::updatePosition simulating next key not increasing.";
nextNotIncreasing = true;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 1bad2e82872..556a01a2efb 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1663,7 +1663,7 @@ void WiredTigerKVEngine::setOldestTimestampFromStable() {
// Set the oldest timestamp to the stable timestamp to ensure that there is no lag window
// between the two.
- if (MONGO_FAIL_POINT(WTSetOldestTSToStableTS)) {
+ if (MONGO_unlikely(WTSetOldestTSToStableTS.shouldFail())) {
setOldestTimestamp(stableTimestamp, false);
return;
}
@@ -1681,7 +1681,7 @@ void WiredTigerKVEngine::setOldestTimestampFromStable() {
}
void WiredTigerKVEngine::setOldestTimestamp(Timestamp newOldestTimestamp, bool force) {
- if (MONGO_FAIL_POINT(WTPreserveSnapshotHistoryIndefinitely)) {
+ if (MONGO_unlikely(WTPreserveSnapshotHistoryIndefinitely.shouldFail())) {
return;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 2edf06fb8a2..fbf0b9450a3 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -205,7 +205,7 @@ void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* ses
}
}
- while (!_shuttingDown && MONGO_FAIL_POINT(WTPausePrimaryOplogDurabilityLoop)) {
+ while (!_shuttingDown && MONGO_unlikely(WTPausePrimaryOplogDurabilityLoop.shouldFail())) {
lk.unlock();
sleepmillis(10);
lk.lock();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
index d2de945877d..8ce0a079bd4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
@@ -38,12 +38,12 @@
namespace mongo {
// When set, simulates returning WT_PREPARE_CONFLICT on WT cursor read operations.
-MONGO_FAIL_POINT_DECLARE(WTPrepareConflictForReads);
+extern FailPoint WTPrepareConflictForReads;
// When set, WT_ROLLBACK is returned in place of retrying on WT_PREPARE_CONFLICT errors.
-MONGO_FAIL_POINT_DECLARE(WTSkipPrepareConflictRetries);
+extern FailPoint WTSkipPrepareConflictRetries;
-MONGO_FAIL_POINT_DECLARE(WTPrintPrepareConflictLog);
+extern FailPoint WTPrintPrepareConflictLog;
/**
* Logs a message with the number of prepare conflict retry attempts.
@@ -72,8 +72,8 @@ int wiredTigerPrepareConflictRetry(OperationContext* opCtx, F&& f) {
// error other than WT_PREPARE_CONFLICT. Reset PrepareConflictTracker accordingly.
ON_BLOCK_EXIT([opCtx] { PrepareConflictTracker::get(opCtx).endPrepareConflict(); });
// If the failpoint is enabled, don't call the function, just simulate a conflict.
- int ret =
- MONGO_FAIL_POINT(WTPrepareConflictForReads) ? WT_PREPARE_CONFLICT : WT_READ_CHECK(f());
+ int ret = MONGO_unlikely(WTPrepareConflictForReads.shouldFail()) ? WT_PREPARE_CONFLICT
+ : WT_READ_CHECK(f());
if (ret != WT_PREPARE_CONFLICT)
return ret;
@@ -85,7 +85,7 @@ int wiredTigerPrepareConflictRetry(OperationContext* opCtx, F&& f) {
// this way are expected to be set to ignore prepare conflicts.
invariant(!opCtx->isIgnoringInterrupts());
- if (MONGO_FAIL_POINT(WTPrintPrepareConflictLog)) {
+ if (MONGO_unlikely(WTPrintPrepareConflictLog.shouldFail())) {
wiredTigerPrepareConflictFailPointLog();
}
@@ -111,7 +111,7 @@ int wiredTigerPrepareConflictRetry(OperationContext* opCtx, F&& f) {
str::stream() << lock.resourceId.toString() << " in " << modeName(lock.mode));
}
- if (MONGO_FAIL_POINT(WTSkipPrepareConflictRetries)) {
+ if (MONGO_unlikely(WTSkipPrepareConflictRetries.shouldFail())) {
// Callers of wiredTigerPrepareConflictRetry() should eventually call wtRCToStatus() via
// invariantWTOK() and have the WT_ROLLBACK error bubble up as a WriteConflictException.
// Enabling the "skipWriteConflictRetries" failpoint in conjunction with the
@@ -124,8 +124,8 @@ int wiredTigerPrepareConflictRetry(OperationContext* opCtx, F&& f) {
attempts++;
auto lastCount = recoveryUnit->getSessionCache()->getPrepareCommitOrAbortCount();
// If the failpoint is enabled, don't call the function, just simulate a conflict.
- ret =
- MONGO_FAIL_POINT(WTPrepareConflictForReads) ? WT_PREPARE_CONFLICT : WT_READ_CHECK(f());
+ ret = MONGO_unlikely(WTPrepareConflictForReads.shouldFail()) ? WT_PREPARE_CONFLICT
+ : WT_READ_CHECK(f());
if (ret != WT_PREPARE_CONFLICT)
return ret;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index e33e9a09f00..c0bf2784dc9 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -53,13 +53,14 @@
* conflict exception if the WTWriteConflictException failpoint is enabled. This is only checked
* on cursor methods that make modifications.
*/
-#define WT_OP_CHECK(x) (((MONGO_FAIL_POINT(WTWriteConflictException))) ? (WT_ROLLBACK) : (x))
+#define WT_OP_CHECK(x) \
+ (((MONGO_unlikely(WTWriteConflictException.shouldFail()))) ? (WT_ROLLBACK) : (x))
/**
* Identical to WT_OP_CHECK except this is checked on cursor seeks/advancement.
*/
#define WT_READ_CHECK(x) \
- (((MONGO_FAIL_POINT(WTWriteConflictExceptionForReads))) ? (WT_ROLLBACK) : (x))
+ (((MONGO_unlikely(WTWriteConflictExceptionForReads.shouldFail()))) ? (WT_ROLLBACK) : (x))
namespace mongo {
@@ -520,11 +521,11 @@ private:
// WT failpoint to throw write conflict exceptions randomly
-MONGO_FAIL_POINT_DECLARE(WTWriteConflictException);
-MONGO_FAIL_POINT_DECLARE(WTWriteConflictExceptionForReads);
+extern FailPoint WTWriteConflictException;
+extern FailPoint WTWriteConflictExceptionForReads;
// Prevents oplog writes from being considered durable on the primary. Once activated, new writes
// will not be considered durable until deactivated. It is unspecified whether writes that commit
// before activation will become visible while active.
-MONGO_FAIL_POINT_DECLARE(WTPausePrimaryOplogDurabilityLoop);
+extern FailPoint WTPausePrimaryOplogDurabilityLoop;
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index b0275a2c7ab..e6abd29ed08 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -185,7 +185,7 @@ void WiredTigerRecoveryUnit::_commit() {
}
_setState(State::kCommitting);
- if (MONGO_FAIL_POINT(WTAlwaysNotifyPrepareConflictWaiters)) {
+ if (MONGO_unlikely(WTAlwaysNotifyPrepareConflictWaiters.shouldFail())) {
notifyDone = true;
}
@@ -204,7 +204,7 @@ void WiredTigerRecoveryUnit::_abort() {
}
_setState(State::kAborting);
- if (notifyDone || MONGO_FAIL_POINT(WTAlwaysNotifyPrepareConflictWaiters)) {
+ if (notifyDone || MONGO_unlikely(WTAlwaysNotifyPrepareConflictWaiters.shouldFail())) {
_sessionCache->notifyPreparedUnitOfWorkHasCommittedOrAborted();
}
diff --git a/src/mongo/db/storage/write_unit_of_work.cpp b/src/mongo/db/storage/write_unit_of_work.cpp
index c1dfa10d09a..8bddf4e9c59 100644
--- a/src/mongo/db/storage/write_unit_of_work.cpp
+++ b/src/mongo/db/storage/write_unit_of_work.cpp
@@ -102,7 +102,7 @@ void WriteUnitOfWork::commit() {
invariant(!_released);
invariant(_opCtx->_ruState == RecoveryUnitState::kActiveUnitOfWork);
if (_toplevel) {
- if (MONGO_FAIL_POINT(sleepBeforeCommit)) {
+ if (MONGO_unlikely(sleepBeforeCommit.shouldFail())) {
sleepFor(Milliseconds(100));
}
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index e01ffa8a10e..26ab885031a 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -637,11 +637,11 @@ TransactionParticipant::OplogSlotReserver::OplogSlotReserver(OperationContext* o
}
TransactionParticipant::OplogSlotReserver::~OplogSlotReserver() {
- if (MONGO_FAIL_POINT(hangBeforeReleasingTransactionOplogHole)) {
+ if (MONGO_unlikely(hangBeforeReleasingTransactionOplogHole.shouldFail())) {
log()
<< "transaction - hangBeforeReleasingTransactionOplogHole fail point enabled. Blocking "
"until fail point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeReleasingTransactionOplogHole);
+ hangBeforeReleasingTransactionOplogHole.pauseWhileSet();
}
// If the constructor did not complete, we do not attempt to abort the units of work.
@@ -744,7 +744,7 @@ void TransactionParticipant::TxnResources::release(OperationContext* opCtx) {
}
_locker->reacquireTicket(opCtx);
- if (MONGO_FAIL_POINT(restoreLocksFail)) {
+ if (MONGO_unlikely(restoreLocksFail.shouldFail())) {
uasserted(ErrorCodes::LockTimeout, str::stream() << "Lock restore failed due to failpoint");
}
@@ -1002,7 +1002,7 @@ void TransactionParticipant::Participant::unstashTransactionResources(OperationC
// The Client lock must not be held when executing this failpoint as it will block currentOp
// execution.
- if (MONGO_FAIL_POINT(hangAfterPreallocateSnapshot)) {
+ if (MONGO_unlikely(hangAfterPreallocateSnapshot.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangAfterPreallocateSnapshot, opCtx, "hangAfterPreallocateSnapshot");
}
@@ -1116,12 +1116,12 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
o(lk).prepareOpTime = prepareOplogSlot;
}
- if (MONGO_FAIL_POINT(hangAfterReservingPrepareTimestamp)) {
+ if (MONGO_unlikely(hangAfterReservingPrepareTimestamp.shouldFail())) {
// This log output is used in js tests so please leave it.
log() << "transaction - hangAfterReservingPrepareTimestamp fail point "
"enabled. Blocking until fail point is disabled. Prepare OpTime: "
<< prepareOplogSlot;
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterReservingPrepareTimestamp);
+ hangAfterReservingPrepareTimestamp.pauseWhileSet();
}
}
opCtx->recoveryUnit()->setPrepareTimestamp(prepareOplogSlot.getTimestamp());
@@ -1145,10 +1145,10 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
o(lk).lastWriteOpTime = prepareOplogSlot;
}
- if (MONGO_FAIL_POINT(hangAfterSettingPrepareStartTime)) {
+ if (MONGO_unlikely(hangAfterSettingPrepareStartTime.shouldFail())) {
log() << "transaction - hangAfterSettingPrepareStartTime fail point enabled. Blocking "
"until fail point is disabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangAfterSettingPrepareStartTime);
+ hangAfterSettingPrepareStartTime.pauseWhileSet();
}
// We unlock the RSTL to allow prepared transactions to survive state transitions. This should
@@ -1314,7 +1314,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction(
"commitTransaction for a prepared transaction cannot be run before its prepare "
"oplog entry has been majority committed",
replCoord->getLastCommittedOpTime().getTimestamp() >= prepareTimestamp ||
- MONGO_FAIL_POINT(skipCommitTxnCheckPrepareMajorityCommitted));
+ MONGO_unlikely(skipCommitTxnCheckPrepareMajorityCommitted.shouldFail()));
}
try {
@@ -2122,9 +2122,7 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
}
});
- MONGO_FAIL_POINT_BLOCK(onPrimaryTransactionalWrite, customArgs) {
- const auto& data = customArgs.getData();
-
+ onPrimaryTransactionalWrite.execute([&](const BSONObj& data) {
const auto closeConnectionElem = data["closeConnection"];
if (closeConnectionElem.eoo() || closeConnectionElem.Bool()) {
opCtx->getClient()->session()->end();
@@ -2138,7 +2136,7 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit(
<< "Failing write for " << _sessionId() << ":" << o().activeTxnNumber
<< " due to failpoint. The write must not be reflected.");
}
- }
+ });
}
} // namespace mongo
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index a638c8f6e2d..c5e58a96fbf 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -848,7 +848,7 @@ TEST_F(TxnParticipantTest, UnstashFailsShouldLeaveTxnResourceStashUnchanged) {
ASSERT_FALSE(txnParticipant.getTxnResourceStashLockerForTest()->isLocked());
// Enable fail point.
- getGlobalFailPointRegistry()->getFailPoint("restoreLocksFail")->setMode(FailPoint::alwaysOn);
+ globalFailPointRegistry().find("restoreLocksFail")->setMode(FailPoint::alwaysOn);
ASSERT_THROWS_CODE(txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"),
AssertionException,
@@ -858,7 +858,7 @@ TEST_F(TxnParticipantTest, UnstashFailsShouldLeaveTxnResourceStashUnchanged) {
ASSERT_FALSE(txnParticipant.getTxnResourceStashLockerForTest()->isLocked());
// Disable fail point.
- getGlobalFailPointRegistry()->getFailPoint("restoreLocksFail")->setMode(FailPoint::off);
+ globalFailPointRegistry().find("restoreLocksFail")->setMode(FailPoint::off);
// Should be successfully able to perform lock restore.
txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction");
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c7a5202b7b0..398ad6b1c8c 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -214,9 +214,9 @@ private:
LOG(1) << "ns: " << collectionNSS << " key: " << key << " name: " << name;
AutoGetCollection autoGetCollection(opCtx, collectionNSS, MODE_IX);
- if (MONGO_FAIL_POINT(hangTTLMonitorWithLock)) {
+ if (MONGO_unlikely(hangTTLMonitorWithLock.shouldFail())) {
log() << "Hanging due to hangTTLMonitorWithLock fail point";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangTTLMonitorWithLock);
+ hangTTLMonitorWithLock.pauseWhileSet(opCtx);
}
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index b4e2b756f9f..a1ea9c5bf92 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -171,7 +171,7 @@ Status waitForWriteConcern(OperationContext* opCtx,
if (!opCtx->getClient()->isInDirectClient()) {
// Respecting this failpoint for internal clients prevents stepup from working properly.
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeWaitingForWriteConcern);
+ hangBeforeWaitingForWriteConcern.pauseWhileSet();
}
// Next handle blocking on disk
diff --git a/src/mongo/executor/network_interface.h b/src/mongo/executor/network_interface.h
index 6f938efb3a1..09a7fdaeb75 100644
--- a/src/mongo/executor/network_interface.h
+++ b/src/mongo/executor/network_interface.h
@@ -42,8 +42,8 @@
namespace mongo {
namespace executor {
-MONGO_FAIL_POINT_DECLARE(networkInterfaceDiscardCommandsBeforeAcquireConn);
-MONGO_FAIL_POINT_DECLARE(networkInterfaceDiscardCommandsAfterAcquireConn);
+extern FailPoint networkInterfaceDiscardCommandsBeforeAcquireConn;
+extern FailPoint networkInterfaceDiscardCommandsAfterAcquireConn;
/**
* Interface to networking for use by TaskExecutor implementations.
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index 2cc0fc9244f..bcd1672e50e 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -286,7 +286,7 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
}
});
- if (MONGO_FAIL_POINT(networkInterfaceDiscardCommandsBeforeAcquireConn)) {
+ if (MONGO_unlikely(networkInterfaceDiscardCommandsBeforeAcquireConn.shouldFail())) {
log() << "Discarding command due to failpoint before acquireConn";
return Status::OK();
}
@@ -311,7 +311,7 @@ Status NetworkInterfaceTL::startCommand(const TaskExecutor::CallbackHandle& cbHa
// We have a connection and the command hasn't already been attempted
cmdState->request.emplace(cmdState->requestOnAny, idx);
- if (MONGO_FAIL_POINT(networkInterfaceDiscardCommandsAfterAcquireConn)) {
+ if (MONGO_unlikely(networkInterfaceDiscardCommandsAfterAcquireConn.shouldFail())) {
log() << "Discarding command due to failpoint after acquireConn";
return Status::OK();
}
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index a9b4e32dde8..e7e2f1bd5ae 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -215,10 +215,10 @@ private:
std::piecewise_construct, std::forward_as_tuple(id), std::forward_as_tuple());
};
- if (MONGO_FAIL_POINT(ScopedTaskExecutorHangBeforeSchedule)) {
+ if (MONGO_unlikely(ScopedTaskExecutorHangBeforeSchedule.shouldFail())) {
ScopedTaskExecutorHangBeforeSchedule.setMode(FailPoint::off);
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(ScopedTaskExecutorHangExitBeforeSchedule);
+ ScopedTaskExecutorHangExitBeforeSchedule.pauseWhileSet();
}
// State 2 - Indeterminate state. We don't know yet if the task will get scheduled.
@@ -259,7 +259,7 @@ private:
doWorkAndNotify(args);
});
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(ScopedTaskExecutorHangAfterSchedule);
+ ScopedTaskExecutorHangAfterSchedule.pauseWhileSet();
stdx::unique_lock lk(_mutex);
diff --git a/src/mongo/executor/scoped_task_executor.h b/src/mongo/executor/scoped_task_executor.h
index 89d6187808f..dc166606115 100644
--- a/src/mongo/executor/scoped_task_executor.h
+++ b/src/mongo/executor/scoped_task_executor.h
@@ -107,9 +107,9 @@ private:
std::shared_ptr<TaskExecutor> _executor;
};
-MONGO_FAIL_POINT_DECLARE(ScopedTaskExecutorHangBeforeSchedule);
-MONGO_FAIL_POINT_DECLARE(ScopedTaskExecutorHangExitBeforeSchedule);
-MONGO_FAIL_POINT_DECLARE(ScopedTaskExecutorHangAfterSchedule);
+extern FailPoint ScopedTaskExecutorHangBeforeSchedule;
+extern FailPoint ScopedTaskExecutorHangExitBeforeSchedule;
+extern FailPoint ScopedTaskExecutorHangAfterSchedule;
} // namespace executor
} // namespace mongo
diff --git a/src/mongo/executor/scoped_task_executor_test.cpp b/src/mongo/executor/scoped_task_executor_test.cpp
index ec078cb4dde..49c3842f392 100644
--- a/src/mongo/executor/scoped_task_executor_test.cpp
+++ b/src/mongo/executor/scoped_task_executor_test.cpp
@@ -288,7 +288,7 @@ TEST_F(ScopedTaskExecutorTest, scheduleLoseRaceWithShutdownOfUnderlying) {
.isOK());
});
- MONGO_FAIL_POINT_PAUSE_WHILE_SET((bfp));
+ (bfp).pauseWhileSet();
shutdownUnderlying();
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 808b2a7350c..55adfc29984 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -420,7 +420,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
const RemoteCommandOnAnyCallbackFn& cb,
const BatonHandle& baton) {
- if (MONGO_FAIL_POINT(initialSyncFuzzerSynchronizationPoint1)) {
+ if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint1.shouldFail())) {
// We are only going to pause on these failpoints if the command issued is for the
// collection cloning part of initial sync.
const auto cmdName = request.cmdObj.firstElementFieldName();
@@ -431,11 +431,11 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
log() << "Collection Cloner scheduled a remote command on the " << request.dbname
<< " db: " << request.cmdObj;
log() << "initialSyncFuzzerSynchronizationPoint1 fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(initialSyncFuzzerSynchronizationPoint1);
+ initialSyncFuzzerSynchronizationPoint1.pauseWhileSet();
- if (MONGO_FAIL_POINT(initialSyncFuzzerSynchronizationPoint2)) {
+ if (MONGO_unlikely(initialSyncFuzzerSynchronizationPoint2.shouldFail())) {
log() << "initialSyncFuzzerSynchronizationPoint2 fail point enabled.";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET(initialSyncFuzzerSynchronizationPoint2);
+ initialSyncFuzzerSynchronizationPoint2.pauseWhileSet();
}
}
}
@@ -600,7 +600,7 @@ void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
lk.unlock();
- if (MONGO_FAIL_POINT(scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown)) {
+ if (MONGO_unlikely(scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown.shouldFail())) {
scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown.setMode(FailPoint::off);
lk.lock();
diff --git a/src/mongo/executor/thread_pool_task_executor.h b/src/mongo/executor/thread_pool_task_executor.h
index 218987383cb..9106f596069 100644
--- a/src/mongo/executor/thread_pool_task_executor.h
+++ b/src/mongo/executor/thread_pool_task_executor.h
@@ -44,8 +44,8 @@ namespace mongo {
class ThreadPoolInterface;
namespace executor {
-MONGO_FAIL_POINT_DECLARE(initialSyncFuzzerSynchronizationPoint1);
-MONGO_FAIL_POINT_DECLARE(initialSyncFuzzerSynchronizationPoint2);
+extern FailPoint initialSyncFuzzerSynchronizationPoint1;
+extern FailPoint initialSyncFuzzerSynchronizationPoint2;
struct ConnectionPoolStats;
class NetworkInterface;
diff --git a/src/mongo/executor/thread_pool_task_executor_test.cpp b/src/mongo/executor/thread_pool_task_executor_test.cpp
index ed458e50a7c..fb7330ca3ee 100644
--- a/src/mongo/executor/thread_pool_task_executor_test.cpp
+++ b/src/mongo/executor/thread_pool_task_executor_test.cpp
@@ -215,11 +215,11 @@ TEST_F(ThreadPoolExecutorTest, ShutdownAndScheduleWorkRaceDoesNotCrash) {
})
.getStatus());
- auto fpTPTE1 = getGlobalFailPointRegistry()->getFailPoint(
- "scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown");
+ auto fpTPTE1 =
+ globalFailPointRegistry().find("scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown");
fpTPTE1->setMode(FailPoint::alwaysOn);
barrier.countDownAndWait();
- MONGO_FAIL_POINT_PAUSE_WHILE_SET((*fpTPTE1));
+ (*fpTPTE1).pauseWhileSet();
executor.shutdown();
executor.join();
ASSERT_OK(status1);
@@ -247,11 +247,11 @@ TEST_F(ThreadPoolExecutorTest, ShutdownAndScheduleRaceDoesNotCrash) {
amRunningRecursively = false;
});
- auto fpTPTE1 = getGlobalFailPointRegistry()->getFailPoint(
- "scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown");
+ auto fpTPTE1 =
+ globalFailPointRegistry().find("scheduleIntoPoolSpinsUntilThreadPoolTaskExecutorShutsDown");
fpTPTE1->setMode(FailPoint::alwaysOn);
barrier.countDownAndWait();
- MONGO_FAIL_POINT_PAUSE_WHILE_SET((*fpTPTE1));
+ (*fpTPTE1).pauseWhileSet();
executor.shutdown();
executor.join();
ASSERT_OK(status1);
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index 8ee613a3616..5cd98239b5f 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -279,12 +279,12 @@ TEST(OpMsg, DocumentSequenceReturnsWork) {
constexpr auto kDisableChecksum = "dbClientConnectionDisableChecksum";
void disableClientChecksum() {
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint(kDisableChecksum);
+ auto failPoint = globalFailPointRegistry().find(kDisableChecksum);
failPoint->setMode(FailPoint::alwaysOn);
}
void enableClientChecksum() {
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint(kDisableChecksum);
+ auto failPoint = globalFailPointRegistry().find(kDisableChecksum);
failPoint->setMode(FailPoint::off);
}
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 9a2eb86e060..860b464939d 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -304,10 +304,9 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
const string who = str::stream() << _processID << ":" << getThreadName();
auto lockExpiration = _lockExpiration;
- MONGO_FAIL_POINT_BLOCK(setDistLockTimeout, customTimeout) {
- const BSONObj& data = customTimeout.getData();
+ setDistLockTimeout.execute([&](const BSONObj& data) {
lockExpiration = Milliseconds(data["timeoutMs"].numberInt());
- }
+ });
LOG(1) << "trying to acquire new distributed lock for " << name
<< " ( lock timeout : " << durationCount<Milliseconds>(lockExpiration)
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 501f24f39a0..a73251cd98c 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -709,7 +709,7 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
// TODO (Dianna) This fail point needs to be reexamined when CommitChunkMigration is in:
// migrations will no longer be able to exercise it, so split or merge will need to do so.
// SERVER-22659.
- if (MONGO_FAIL_POINT(failApplyChunkOps)) {
+ if (MONGO_unlikely(failApplyChunkOps.shouldFail())) {
status = Status(ErrorCodes::InternalError, "Failpoint 'failApplyChunkOps' generated error");
}
diff --git a/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp
index 23d8d1a3314..ede2821a364 100644
--- a/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp
+++ b/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp
@@ -58,10 +58,9 @@ public:
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
nss));
- if (MONGO_FAIL_POINT(hangRefineCollectionShardKeyAfterRefresh)) {
+ if (MONGO_unlikely(hangRefineCollectionShardKeyAfterRefresh.shouldFail())) {
log() << "Hit hangRefineCollectionShardKeyAfterRefresh failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangRefineCollectionShardKeyAfterRefresh);
+ hangRefineCollectionShardKeyAfterRefresh.pauseWhileSet(opCtx);
}
ConfigsvrRefineCollectionShardKey configsvrRefineCollShardKey(
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index d7b10d8f88d..bc48977219d 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -204,10 +204,9 @@ bool handleWouldChangeOwningShardError(OperationContext* opCtx,
bool updatedShardKey = false;
boost::optional<BSONObj> upsertedId;
if (isRetryableWrite) {
- if (MONGO_FAIL_POINT(hangAfterThrowWouldChangeOwningShardRetryableWrite)) {
+ if (MONGO_unlikely(hangAfterThrowWouldChangeOwningShardRetryableWrite.shouldFail())) {
log() << "Hit hangAfterThrowWouldChangeOwningShardRetryableWrite failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(
- opCtx, hangAfterThrowWouldChangeOwningShardRetryableWrite);
+ hangAfterThrowWouldChangeOwningShardRetryableWrite.pauseWhileSet(opCtx);
}
RouterOperationContextSession routerSession(opCtx);
try {
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index e038044ac49..20782145cfa 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -214,7 +214,7 @@ public:
str::stream() << "Invalid target namespace: " << toNss.ns(),
toNss.isValid());
- if (MONGO_FAIL_POINT(useRenameCollectionPathThroughConfigsvr)) {
+ if (MONGO_unlikely(useRenameCollectionPathThroughConfigsvr.shouldFail())) {
bool dropTarget = cmdObj["dropTarget"].trueValue();
bool stayTemp = cmdObj["stayTemp"].trueValue();
diff --git a/src/mongo/s/commands/document_shard_key_update_util.cpp b/src/mongo/s/commands/document_shard_key_update_util.cpp
index 119193d9ee5..90f12f4c8f5 100644
--- a/src/mongo/s/commands/document_shard_key_update_util.cpp
+++ b/src/mongo/s/commands/document_shard_key_update_util.cpp
@@ -77,9 +77,9 @@ bool executeOperationsAsPartOfShardKeyUpdate(OperationContext* opCtx,
return false;
}
- if (MONGO_FAIL_POINT(hangBeforeInsertOnUpdateShardKey)) {
+ if (MONGO_unlikely(hangBeforeInsertOnUpdateShardKey.shouldFail())) {
log() << "Hit hangBeforeInsertOnUpdateShardKey failpoint";
- MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(opCtx, hangBeforeInsertOnUpdateShardKey);
+ hangBeforeInsertOnUpdateShardKey.pauseWhileSet(opCtx);
}
auto insertOpMsg = OpMsgRequest::fromDBAndBody(db, insertCmdObj);
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index cc93d78a591..5949128fe88 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -298,15 +298,15 @@ void execCommandClient(OperationContext* opCtx,
invocation->run(opCtx, result);
}
- auto body = result->getBodyBuilder();
-
- MONGO_FAIL_POINT_BLOCK_IF(failCommand, data, [&](const BSONObj& data) {
- return CommandHelpers::shouldActivateFailCommandFailPoint(
- data, request.getCommandName(), opCtx->getClient(), invocation->ns()) &&
- data.hasField("writeConcernError");
- }) {
- body.append(data.getData()["writeConcernError"]);
- }
+ failCommand.executeIf(
+ [&](const BSONObj& data) {
+ result->getBodyBuilder().append(data["writeConcernError"]);
+ },
+ [&](const BSONObj& data) {
+ return CommandHelpers::shouldActivateFailCommandFailPoint(
+ data, request.getCommandName(), opCtx->getClient(), invocation->ns()) &&
+ data.hasField("writeConcernError");
+ });
}
auto body = result->getBodyBuilder();
@@ -484,7 +484,7 @@ void runCommand(OperationContext* opCtx,
// under a transaction (see the invariant inside ShardConnection). Because of this,
// the retargeting error could not have come from a ShardConnection, so we don't
// need to reset the connection's in-memory state.
- if (!MONGO_FAIL_POINT(doNotRefreshShardsOnRetargettingError) &&
+ if (!MONGO_unlikely(doNotRefreshShardsOnRetargettingError.shouldFail()) &&
!TransactionRouter::get(opCtx)) {
ShardConnection::checkMyConnectionVersions(opCtx, staleNs.ns());
}
@@ -990,7 +990,7 @@ void Strategy::explainFind(OperationContext* opCtx,
// under a transaction (see the invariant inside ShardConnection). Because of this, the
// retargeting error could not have come from a ShardConnection, so we don't need to
// reset the connection's in-memory state.
- if (!MONGO_FAIL_POINT(doNotRefreshShardsOnRetargettingError) &&
+ if (!MONGO_unlikely(doNotRefreshShardsOnRetargettingError.shouldFail()) &&
!TransactionRouter::get(opCtx)) {
ShardConnection::checkMyConnectionVersions(opCtx, staleNs.ns());
}
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index cfc60541fb3..953f4c48d05 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -147,7 +147,7 @@ sharded_agg_helpers::DispatchShardPipelineResults dispatchExchangeConsumerPipeli
invariant(!litePipe.hasChangeStream());
auto opCtx = expCtx->opCtx;
- if (MONGO_FAIL_POINT(clusterAggregateFailToDispatchExchangeConsumerPipeline)) {
+ if (MONGO_unlikely(clusterAggregateFailToDispatchExchangeConsumerPipeline.shouldFail())) {
log() << "clusterAggregateFailToDispatchExchangeConsumerPipeline fail point enabled.";
uasserted(ErrorCodes::FailPointEnabled,
"Asserting on exhange consumer pipeline dispatch due to failpoint.");
@@ -300,7 +300,7 @@ AsyncRequestsSender::Response establishMergingShardCursor(OperationContext* opCt
const AggregationRequest& request,
const BSONObj mergeCmdObj,
const ShardId& mergingShardId) {
- if (MONGO_FAIL_POINT(clusterAggregateFailToEstablishMergingShardCursor)) {
+ if (MONGO_unlikely(clusterAggregateFailToEstablishMergingShardCursor.shouldFail())) {
log() << "clusterAggregateFailToEstablishMergingShardCursor fail point enabled.";
uasserted(ErrorCodes::FailPointEnabled,
"Asserting on establishing merging shard cursor due to failpoint.");
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 274931769f9..a51435e0db4 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -601,7 +601,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
// If the 'waitAfterPinningCursorBeforeGetMoreBatch' fail point is enabled, set the 'msg'
// field of this operation's CurOp to signal that we've hit this point.
- if (MONGO_FAIL_POINT(waitAfterPinningCursorBeforeGetMoreBatch)) {
+ if (MONGO_unlikely(waitAfterPinningCursorBeforeGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitAfterPinningCursorBeforeGetMoreBatch,
opCtx,
@@ -624,7 +624,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
// If the 'waitWithPinnedCursorDuringGetMoreBatch' fail point is enabled, set the 'msg'
// field of this operation's CurOp to signal that we've hit this point.
- if (MONGO_FAIL_POINT(waitWithPinnedCursorDuringGetMoreBatch)) {
+ if (MONGO_unlikely(waitWithPinnedCursorDuringGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(&waitWithPinnedCursorDuringGetMoreBatch,
opCtx,
"waitWithPinnedCursorDuringGetMoreBatch");
@@ -701,7 +701,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
CurOp::get(opCtx)->debug().cursorExhausted = (idToReturn == 0);
CurOp::get(opCtx)->debug().nreturned = batch.size();
- if (MONGO_FAIL_POINT(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch)) {
+ if (MONGO_unlikely(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch.shouldFail())) {
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch,
opCtx,
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index e39a8b00142..dc66e21307b 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -459,7 +459,7 @@ public:
Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
- if (MONGO_FAIL_POINT(failReplicaSetChangeConfigServerUpdateHook)) {
+ if (MONGO_unlikely(failReplicaSetChangeConfigServerUpdateHook.shouldFail())) {
return;
}
ShardRegistry::updateReplicaSetOnConfigServer(serviceContext, connStr);
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index 81cd77fcab0..b9a77045d82 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -719,7 +719,7 @@ void TransactionRouter::Router::_clearPendingParticipants(OperationContext* opCt
}
bool TransactionRouter::Router::canContinueOnStaleShardOrDbError(StringData cmdName) const {
- if (MONGO_FAIL_POINT(enableStaleVersionAndSnapshotRetriesWithinTransactions)) {
+ if (MONGO_unlikely(enableStaleVersionAndSnapshotRetriesWithinTransactions.shouldFail())) {
// We can always retry on the first overall statement because all targeted participants must
// be pending, so the retry will restart the local transaction on each one, overwriting any
// effects from the first attempt.
@@ -770,7 +770,7 @@ void TransactionRouter::Router::onViewResolutionError(OperationContext* opCtx,
}
bool TransactionRouter::Router::canContinueOnSnapshotError() const {
- if (MONGO_FAIL_POINT(enableStaleVersionAndSnapshotRetriesWithinTransactions)) {
+ if (MONGO_unlikely(enableStaleVersionAndSnapshotRetriesWithinTransactions.shouldFail())) {
return o().atClusterTime && o().atClusterTime->canChange(p().latestStmtId);
}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 0c477a24c11..b5093581889 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -237,7 +237,7 @@ void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) {
uassert(10209, str::stream() << "name has to be a string: " << n, n.type() == String);
uassert(10210, "value has to be set", v.type() != EOO);
- if (MONGO_FAIL_POINT(mr_killop_test_fp)) {
+ if (MONGO_unlikely(mr_killop_test_fp.shouldFail())) {
/* This thread sleep makes the interrupts in the test come in at a time
* where the js misses the interrupt and throw an exception instead of
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 484b11f3331..434371863e5 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -446,7 +446,7 @@ private:
std::error_code ec;
size_t size;
- if (MONGO_FAIL_POINT(transportLayerASIOshortOpportunisticReadWrite) &&
+ if (MONGO_unlikely(transportLayerASIOshortOpportunisticReadWrite.shouldFail()) &&
_blockingMode == Async) {
asio::mutable_buffer localBuffer = buffers;
@@ -529,7 +529,7 @@ private:
std::error_code ec;
std::size_t size;
- if (MONGO_FAIL_POINT(transportLayerASIOshortOpportunisticReadWrite) &&
+ if (MONGO_unlikely(transportLayerASIOshortOpportunisticReadWrite.shouldFail()) &&
_blockingMode == Async) {
asio::const_buffer localBuffer = buffers;
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index cad586f38d6..58153a870e4 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -621,7 +621,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
return makeConnectError(status, connector->peer, connector->resolvedEndpoint);
})
.getAsync([connector](Status connectResult) {
- if (MONGO_FAIL_POINT(transportLayerASIOasyncConnectTimesOut)) {
+ if (MONGO_unlikely(transportLayerASIOasyncConnectTimesOut.shouldFail())) {
log() << "asyncConnectTimesOut fail point is active. simulating timeout.";
return;
}
diff --git a/src/mongo/transport/transport_layer_asio.h b/src/mongo/transport/transport_layer_asio.h
index c0e14452adf..90008fe3c89 100644
--- a/src/mongo/transport/transport_layer_asio.h
+++ b/src/mongo/transport/transport_layer_asio.h
@@ -69,11 +69,11 @@ class ServiceEntryPoint;
namespace transport {
// This fail point simulates reads and writes that always return 1 byte and fail with EAGAIN
-MONGO_FAIL_POINT_DECLARE(transportLayerASIOshortOpportunisticReadWrite);
+extern FailPoint transportLayerASIOshortOpportunisticReadWrite;
// This fail point will cause an asyncConnect to timeout after it's successfully connected
// to the remote peer
-MONGO_FAIL_POINT_DECLARE(transportLayerASIOasyncConnectTimesOut);
+extern FailPoint transportLayerASIOasyncConnectTimesOut;
/**
* A TransportLayer implementation based on ASIO networking primitives.
diff --git a/src/mongo/util/diagnostic_info.cpp b/src/mongo/util/diagnostic_info.cpp
index a42cc5275a9..a05dcb8a3f6 100644
--- a/src/mongo/util/diagnostic_info.cpp
+++ b/src/mongo/util/diagnostic_info.cpp
@@ -73,7 +73,7 @@ MONGO_INITIALIZER(LockActions)(InitializerContext* context) {
DiagnosticInfo::Diagnostic::clearDiagnostic();
}
void onFailedLock() override {
- if (!MONGO_FAIL_POINT(keepDiagnosticCaptureOnFailedLock)) {
+ if (!MONGO_unlikely(keepDiagnosticCaptureOnFailedLock.shouldFail())) {
DiagnosticInfo::Diagnostic::clearDiagnostic();
}
}
diff --git a/src/mongo/util/diagnostic_info.h b/src/mongo/util/diagnostic_info.h
index 7f7325c9b23..0a4958f35de 100644
--- a/src/mongo/util/diagnostic_info.h
+++ b/src/mongo/util/diagnostic_info.h
@@ -36,7 +36,7 @@
#include "mongo/util/time_support.h"
namespace mongo {
-MONGO_FAIL_POINT_DECLARE(keepDiagnosticCaptureOnFailedLock);
+extern FailPoint keepDiagnosticCaptureOnFailedLock;
/**
* DiagnosticInfo keeps track of diagnostic information such as a developer provided
* name, the time when a lock was first acquired, and a partial caller call stack.
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index d5af18f318f..c40a4b829d6 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -84,7 +84,7 @@ void FailPoint::shouldFailCloseBlock() {
_fpInfo.subtractAndFetch(1);
}
-void FailPoint::setMode(Mode mode, ValType val, const BSONObj& extra) {
+void FailPoint::setMode(Mode mode, ValType val, BSONObj extra) {
/**
* Outline:
*
@@ -96,20 +96,20 @@ void FailPoint::setMode(Mode mode, ValType val, const BSONObj& extra) {
stdx::lock_guard<stdx::mutex> scoped(_modMutex);
// Step 1
- disableFailPoint();
+ disable();
// Step 2
while (_fpInfo.load() != 0) {
sleepmillis(50);
}
+ // Step 3
_mode = mode;
_timesOrPeriod.store(val);
-
- _data = extra.copy();
+ _data = std::move(extra);
if (_mode != off) {
- enableFailPoint();
+ enable();
}
}
@@ -117,19 +117,19 @@ const BSONObj& FailPoint::getData() const {
return _data;
}
-void FailPoint::enableFailPoint() {
- _fpInfo.fetchAndBitOr(ACTIVE_BIT);
+void FailPoint::enable() {
+ _fpInfo.fetchAndBitOr(kActiveBit);
}
-void FailPoint::disableFailPoint() {
- _fpInfo.fetchAndBitAnd(~ACTIVE_BIT);
+void FailPoint::disable() {
+ _fpInfo.fetchAndBitAnd(~kActiveBit);
}
FailPoint::RetCode FailPoint::slowShouldFailOpenBlock(
std::function<bool(const BSONObj&)> cb) noexcept {
ValType localFpInfo = _fpInfo.addAndFetch(1);
- if ((localFpInfo & ACTIVE_BIT) == 0) {
+ if ((localFpInfo & kActiveBit) == 0) {
return slowOff;
}
@@ -149,7 +149,7 @@ FailPoint::RetCode FailPoint::slowShouldFailOpenBlock(
}
case nTimes: {
if (_timesOrPeriod.subtractAndFetch(1) <= 0)
- disableFailPoint();
+ disable();
return slowOn;
}
@@ -168,8 +168,7 @@ FailPoint::RetCode FailPoint::slowShouldFailOpenBlock(
}
}
-StatusWith<std::tuple<FailPoint::Mode, FailPoint::ValType, BSONObj>> FailPoint::parseBSON(
- const BSONObj& obj) {
+StatusWith<FailPoint::ModeOptions> FailPoint::parseBSON(const BSONObj& obj) {
Mode mode = FailPoint::alwaysOn;
ValType val = 0;
const BSONElement modeElem(obj["mode"]);
@@ -177,7 +176,6 @@ StatusWith<std::tuple<FailPoint::Mode, FailPoint::ValType, BSONObj>> FailPoint::
return {ErrorCodes::IllegalOperation, "When setting a failpoint, you must supply a 'mode'"};
} else if (modeElem.type() == String) {
const std::string modeStr(modeElem.valuestr());
-
if (modeStr == "off") {
mode = FailPoint::off;
} else if (modeStr == "alwaysOn") {
@@ -255,7 +253,7 @@ StatusWith<std::tuple<FailPoint::Mode, FailPoint::ValType, BSONObj>> FailPoint::
data = obj["data"].Obj().getOwned();
}
- return std::make_tuple(mode, val, data);
+ return ModeOptions{mode, val, data};
}
BSONObj FailPoint::toBSON() const {
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index f82d85785a5..57ee76bca9d 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -44,22 +44,27 @@ namespace mongo {
* deactivated, as well as embed temporary data into it.
*
* The fail point has a static instance, which is represented by a FailPoint
- * object, and dynamic instance, which are all the threads in between
- * shouldFailOpenBlock and shouldFailCloseBlock.
+ * object, and dynamic instance, represented by FailPoint::Scoped handles.
*
* Sample use:
* // Declared somewhere:
* FailPoint makeBadThingsHappen;
*
* // Somewhere in the code
- * return false || MONGO_FAIL_POINT(makeBadThingsHappen);
+ * return false || MONGO_unlikely(makeBadThingsHappen.shouldFail());
*
* or
*
* // Somewhere in the code
- * MONGO_FAIL_POINT_BLOCK(makeBadThingsHappen, blockMakeBadThingsHappen) {
- * const BSONObj& data = blockMakeBadThingsHappen.getData();
+ * makeBadThingsHappen.execute([&](const BSONObj& data) {
* // Do something
+ * });
+ *
+ * // Another way to do it, where lambda isn't suitable, e.g. to cause an early return
+ * // of the enclosing function.
+ * if (auto sfp = makeBadThingsHappen.scoped(); MONGO_unlikely(sfp.isActive())) {
+ * const BSONObj& data = sfp.getData();
+ * // Do something, including break, continue, return, etc...
* }
*
* Invariants:
@@ -69,13 +74,65 @@ namespace mongo {
* 2. Client visible fail point states are read-only when active.
*/
class FailPoint {
- FailPoint(const FailPoint&) = delete;
- FailPoint& operator=(const FailPoint&) = delete;
+private:
+ enum RetCode { fastOff = 0, slowOff, slowOn, userIgnored };
public:
- typedef unsigned ValType;
+ using ValType = unsigned;
enum Mode { off, alwaysOn, random, nTimes, skip };
- enum RetCode { fastOff = 0, slowOff, slowOn, userIgnored };
+
+ struct ModeOptions {
+ Mode mode;
+ ValType val;
+ BSONObj extra;
+ };
+
+ /**
+ * Helper class for making sure that FailPoint#shouldFailCloseBlock is called when
+ * FailPoint#shouldFailOpenBlock was called.
+ *
+ * Users don't create these. They are only used within the execute and executeIf
+ * functions and returned by the scoped() and scopedIf() functions.
+ */
+ class Scoped {
+ public:
+ Scoped(FailPoint* failPoint, RetCode ret)
+ : _failPoint(failPoint),
+ _active(ret == FailPoint::slowOn),
+ _holdsRef(ret != FailPoint::fastOff) {}
+
+ ~Scoped() {
+ if (_holdsRef) {
+ _failPoint->shouldFailCloseBlock();
+ }
+ }
+
+ Scoped(const Scoped&) = delete;
+ Scoped& operator=(const Scoped&) = delete;
+
+ /**
+ * @return true if fail point is on.
+ * Calls to isActive should be placed inside MONGO_unlikely for performance.
+ */
+ bool isActive() {
+ return _active;
+ }
+
+ /**
+ * @return the data stored in the fail point. #isActive must be true
+ * before you can call this.
+ */
+ const BSONObj& getData() const {
+ // Assert when attempting to get data without holding a ref.
+ fassert(16445, _holdsRef);
+ return _failPoint->getData();
+ }
+
+ private:
+ FailPoint* _failPoint;
+ bool _active;
+ bool _holdsRef;
+ };
/**
* Explicitly resets the seed used for the PRNG in this thread. If not called on a thread,
@@ -84,21 +141,26 @@ public:
static void setThreadPRNGSeed(int32_t seed);
/**
- * Parses the FailPoint::Mode, FailPoint::ValType, and data BSONObj from the BSON.
+ * Parses the {Mode, ValType, BSONObj} from the BSON.
*/
- static StatusWith<std::tuple<Mode, ValType, BSONObj>> parseBSON(const BSONObj& obj);
+ static StatusWith<ModeOptions> parseBSON(const BSONObj& obj);
FailPoint();
+ FailPoint(const FailPoint&) = delete;
+ FailPoint& operator=(const FailPoint&) = delete;
+
/**
* Note: This is not side-effect free - it can change the state to OFF after calling.
- * Note: see MONGO_FAIL_POINT_BLOCK_IF for information on the passed callable
+ * Note: see `executeIf` for information on `pred`.
+ *
+ * Calls to shouldFail should be placed inside MONGO_unlikely for performance.
*
* @return true if fail point is active.
*/
- template <typename Callable = std::nullptr_t>
- inline bool shouldFail(Callable&& cb = nullptr) {
- RetCode ret = shouldFailOpenBlock(std::forward<Callable>(cb));
+ template <typename Pred>
+ bool shouldFail(Pred&& pred) {
+ RetCode ret = shouldFailOpenBlock(std::forward<Pred>(pred));
if (MONGO_likely(ret == fastOff)) {
return false;
@@ -108,34 +170,11 @@ public:
return ret == slowOn;
}
- /**
- * Checks whether fail point is active and increments the reference counter without
- * decrementing it. Must call shouldFailCloseBlock afterwards when the return value
- * is not fastOff. Otherwise, this will remain read-only forever.
- *
- * Note: see MONGO_FAIL_POINT_BLOCK_IF for information on the passed callable
- *
- * @return slowOn if its active and needs to be closed
- * userIgnored if its active and needs to be closed, but shouldn't be acted on
- * slowOff if its disabled and needs to be closed
- * fastOff if its disabled and doesn't need to be closed
- */
- template <typename Callable = std::nullptr_t>
- inline RetCode shouldFailOpenBlock(Callable&& cb = nullptr) {
- if (MONGO_likely((_fpInfo.loadRelaxed() & ACTIVE_BIT) == 0)) {
- return fastOff;
- }
-
- return slowShouldFailOpenBlock(std::forward<Callable>(cb));
+ bool shouldFail() {
+ return shouldFail(nullptr);
}
/**
- * Decrements the reference counter.
- * @see #shouldFailOpenBlock
- */
- void shouldFailCloseBlock();
-
- /**
* Changes the settings of this fail point. This will turn off the fail point
* and waits for all dynamic instances referencing this fail point to go away before
* actually modifying the settings.
@@ -157,143 +196,125 @@ public:
* that can be referenced afterwards with #getData. Defaults to an empty
* document.
*/
- void setMode(Mode mode, ValType val = 0, const BSONObj& extra = BSONObj());
+ void setMode(Mode mode, ValType val = 0, BSONObj extra = {});
+ void setMode(ModeOptions opt) {
+ setMode(std::move(opt.mode), std::move(opt.val), std::move(opt.extra));
+ }
/**
* @returns a BSON object showing the current mode and data stored.
*/
BSONObj toBSON() const;
-private:
- static const ValType ACTIVE_BIT = 1 << 31;
- static const ValType REF_COUNTER_MASK = ~ACTIVE_BIT;
-
- // Bit layout:
- // 31: tells whether this fail point is active.
- // 0~30: unsigned ref counter for active dynamic instances.
- AtomicWord<unsigned> _fpInfo{0};
-
- // Invariant: These should be read only if ACTIVE_BIT of _fpInfo is set.
- Mode _mode{off};
- AtomicWord<int> _timesOrPeriod{0};
- BSONObj _data;
-
- // protects _mode, _timesOrPeriod, _data
- mutable stdx::mutex _modMutex;
-
/**
- * Enables this fail point.
+ * Create a Scoped from this FailPoint.
+ * Use the Scoped object to access failpoint data.
*/
- void enableFailPoint();
-
+ Scoped scoped() {
+ return scopedIf(nullptr);
+ }
/**
- * Disables this fail point.
+ * Create a Scoped from this FailPoint, only active when `pred(payload)` is true.
+ * See `executeIf`. Use the Scoped object to access failpoint data.
*/
- void disableFailPoint();
+ template <typename Pred>
+ Scoped scopedIf(Pred&& pred) {
+ return Scoped(this, shouldFailOpenBlock(std::forward<Pred>(pred)));
+ }
- /**
- * slow path for #shouldFailOpenBlock
- *
- * If a callable is passed, and returns false, this will return userIgnored and avoid altering
- * the mode in any way. The argument is the fail point payload.
- */
- RetCode slowShouldFailOpenBlock(std::function<bool(const BSONObj&)> cb) noexcept;
+ template <typename F>
+ void execute(F&& f) {
+ return executeIf(f, nullptr);
+ }
/**
- * @return the stored BSONObj in this fail point. Note that this cannot be safely
- * read if this fail point is off.
+ * The predicate `pred` should behave like a `bool pred(const BSONObj& payload)`.
+ * If `pred(payload)`, then `f(payload)` is executed. Otherwise, `f` is not
+ * executed and this FailPoint's mode is not altered (e.g. `nTimes` isn't consumed).
*/
- const BSONObj& getData() const;
-
- friend class ScopedFailPoint;
-};
-
-/**
- * Helper class for making sure that FailPoint#shouldFailCloseBlock is called when
- * FailPoint#shouldFailOpenBlock was called. This should only be used within the
- * MONGO_FAIL_POINT_BLOCK macro.
- */
-class ScopedFailPoint {
- ScopedFailPoint(const ScopedFailPoint&) = delete;
- ScopedFailPoint& operator=(const ScopedFailPoint&) = delete;
+ template <typename F, typename Pred>
+ void executeIf(F&& f, Pred&& pred) {
+ auto sfp = scopedIf(std::forward<Pred>(pred));
+ if (MONGO_unlikely(sfp.isActive())) {
+ std::forward<F>(f)(sfp.getData());
+ }
+ }
-public:
- template <typename Callable = std::nullptr_t>
- ScopedFailPoint(FailPoint* failPoint, Callable&& cb = nullptr) : _failPoint(failPoint) {
- FailPoint::RetCode ret = _failPoint->shouldFailOpenBlock(std::forward<Callable>(cb));
- _shouldClose = ret != FailPoint::fastOff;
- _shouldRun = ret == FailPoint::slowOn;
+ void pauseWhileSet() {
+ while (MONGO_unlikely(shouldFail())) {
+ sleepmillis(100);
+ }
}
- ~ScopedFailPoint() {
- if (_shouldClose) {
- _failPoint->shouldFailCloseBlock();
+ void pauseWhileSet(OperationContext* opCtx) {
+ while (MONGO_unlikely(shouldFail())) {
+ opCtx->sleepFor(Milliseconds(100));
}
}
+private:
+ void enable();
+ void disable();
+
/**
- * @return true if fail point is on. This will be true at most once.
+ * Checks whether fail point is active and increments the reference counter without
+ * decrementing it. Must call shouldFailCloseBlock afterwards when the return value
+ * is not fastOff. Otherwise, this will remain read-only forever.
+ *
+ * Note: see `executeIf` for information on `pred`.
+ *
+ * @return slowOn if its active and needs to be closed
+ * userIgnored if its active and needs to be closed, but shouldn't be acted on
+ * slowOff if its disabled and needs to be closed
+ * fastOff if its disabled and doesn't need to be closed
*/
- inline bool isActive() {
- if (!_shouldRun) {
- return false;
+ template <typename Pred>
+ RetCode shouldFailOpenBlock(Pred&& pred) {
+ if (MONGO_likely((_fpInfo.loadRelaxed() & kActiveBit) == 0)) {
+ return fastOff;
}
- // We use this in a for loop to prevent iteration, thus flipping to inactive after the first
- // time.
- _shouldRun = false;
- return true;
+ return slowShouldFailOpenBlock(std::forward<Pred>(pred));
+ }
+
+ RetCode shouldFailOpenBlock() {
+ return shouldFailOpenBlock(nullptr);
}
/**
- * @return the data stored in the fail point. #isActive must be true
- * before you can call this.
+ * Decrements the reference counter.
+ * @see #shouldFailOpenBlock
*/
- const BSONObj& getData() const {
- // Assert when attempting to get data without incrementing ref counter.
- fassert(16445, _shouldClose);
- return _failPoint->getData();
- }
+ void shouldFailCloseBlock();
-private:
- FailPoint* _failPoint;
- bool _shouldRun;
- bool _shouldClose;
-};
+ /**
+ * slow path for #shouldFailOpenBlock
+ *
+ * If a callable is passed, and returns false, this will return userIgnored and avoid altering
+ * the mode in any way. The argument is the fail point payload.
+ */
+ RetCode slowShouldFailOpenBlock(std::function<bool(const BSONObj&)> cb) noexcept;
-#define MONGO_FAIL_POINT(symbol) MONGO_unlikely(symbol.shouldFail())
+ /**
+ * @return the stored BSONObj in this fail point. Note that this cannot be safely
+ * read if this fail point is off.
+ */
+ const BSONObj& getData() const;
-inline void MONGO_FAIL_POINT_PAUSE_WHILE_SET(FailPoint& failPoint) {
- while (MONGO_FAIL_POINT(failPoint)) {
- sleepmillis(100);
- }
-}
+ static const ValType kActiveBit = 1 << 31;
-inline void MONGO_FAIL_POINT_PAUSE_WHILE_SET_OR_INTERRUPTED(OperationContext* opCtx,
- FailPoint& failPoint) {
- while (MONGO_FAIL_POINT(failPoint)) {
- opCtx->sleepFor(Milliseconds(100));
- }
-}
+ // Bit layout:
+ // 31: tells whether this fail point is active.
+ // 0~30: unsigned ref counter for active dynamic instances.
+ AtomicWord<std::uint32_t> _fpInfo{0};
-/**
- * Macro for creating a fail point with block context. Also use this when
- * you want to access the data stored in the fail point.
- */
-#define MONGO_FAIL_POINT_BLOCK(symbol, blockSymbol) \
- for (mongo::ScopedFailPoint blockSymbol(&symbol); MONGO_unlikely(blockSymbol.isActive());)
+ // Invariant: These should be read only if kActiveBit of _fpInfo is set.
+ Mode _mode{off};
+ AtomicWord<int> _timesOrPeriod{0};
+ BSONObj _data;
-/**
- * Macro for creating a fail point with block context and a pre-flight condition. Also use this when
- * you want to access the data stored in the fail point.
- *
- * Your passed in callable should take a const BSONObj& (the fail point payload) and return bool.
- * If it returns true, you'll process the block as normal. If you return false, you'll exit the
- * block without evaluating it and avoid altering the mode in any way (you won't consume nTimes for
- * instance).
- */
-#define MONGO_FAIL_POINT_BLOCK_IF(symbol, blockSymbol, ...) \
- for (mongo::ScopedFailPoint blockSymbol(&symbol, __VA_ARGS__); \
- MONGO_unlikely(blockSymbol.isActive());)
+ // protects _mode, _timesOrPeriod, _data
+ mutable stdx::mutex _modMutex;
+};
} // namespace mongo
diff --git a/src/mongo/util/fail_point_registry.cpp b/src/mongo/util/fail_point_registry.cpp
index 158d6ab8023..f4dfb627a36 100644
--- a/src/mongo/util/fail_point_registry.cpp
+++ b/src/mongo/util/fail_point_registry.cpp
@@ -29,34 +29,32 @@
#include "mongo/util/fail_point_registry.h"
+#include <fmt/format.h>
+
#include "mongo/bson/json.h"
#include "mongo/util/fail_point_server_parameter_gen.h"
#include "mongo/util/fail_point_service.h"
-#include "mongo/util/map_util.h"
-#include "mongo/util/str.h"
namespace mongo {
-constexpr auto kFailPointServerParameterPrefix = "failpoint."_sd;
+using namespace fmt::literals;
FailPointRegistry::FailPointRegistry() : _frozen(false) {}
-Status FailPointRegistry::addFailPoint(const std::string& name, FailPoint* failPoint) {
+Status FailPointRegistry::add(const std::string& name, FailPoint* failPoint) {
if (_frozen) {
return {ErrorCodes::CannotMutateObject, "Registry is already frozen"};
}
-
- if (_fpMap.count(name) > 0) {
- return {ErrorCodes::Error(51006),
- str::stream() << "Fail point already registered: " << name};
+ auto [pos, ok] = _fpMap.insert({name, failPoint});
+ if (!ok) {
+ return {ErrorCodes::Error(51006), "Fail point already registered: {}"_format(name)};
}
-
- _fpMap.insert(make_pair(name, failPoint));
return Status::OK();
}
-FailPoint* FailPointRegistry::getFailPoint(const std::string& name) const {
- return mapFindWithDefault(_fpMap, name, static_cast<FailPoint*>(nullptr));
+FailPoint* FailPointRegistry::find(const std::string& name) const {
+ auto iter = _fpMap.find(name);
+ return (iter == _fpMap.end()) ? nullptr : iter->second;
}
void FailPointRegistry::freeze() {
@@ -64,17 +62,19 @@ void FailPointRegistry::freeze() {
}
void FailPointRegistry::registerAllFailPointsAsServerParameters() {
- for (const auto& it : _fpMap) {
+ for (const auto& [name, ptr] : _fpMap) {
// Intentionally leaked.
- new FailPointServerParameter(it.first, ServerParameterType::kStartupOnly);
+ new FailPointServerParameter(name, ServerParameterType::kStartupOnly);
}
}
+static constexpr auto kFailPointServerParameterPrefix = "failpoint."_sd;
+
FailPointServerParameter::FailPointServerParameter(StringData name, ServerParameterType spt)
- : ServerParameter(kFailPointServerParameterPrefix.toString() + name.toString(), spt),
- _data(getGlobalFailPointRegistry()->getFailPoint(name.toString())) {
+ : ServerParameter("{}{}"_format(kFailPointServerParameterPrefix, name), spt),
+ _data(globalFailPointRegistry().find(name.toString())) {
invariant(name != "failpoint.*", "Failpoint prototype was auto-registered from IDL");
- invariant(_data != nullptr, str::stream() << "Unknown failpoint: " << name);
+ invariant(_data != nullptr, "Unknown failpoint: {}"_format(name));
}
void FailPointServerParameter::append(OperationContext* opCtx,
@@ -95,14 +95,7 @@ Status FailPointServerParameter::setFromString(const std::string& str) {
if (!swParsedOptions.isOK()) {
return swParsedOptions.getStatus();
}
-
- FailPoint::Mode mode;
- FailPoint::ValType val;
- BSONObj data;
- std::tie(mode, val, data) = std::move(swParsedOptions.getValue());
-
- _data->setMode(mode, val, data);
-
+ _data->setMode(std::move(swParsedOptions.getValue()));
return Status::OK();
}
} // namespace mongo
diff --git a/src/mongo/util/fail_point_registry.h b/src/mongo/util/fail_point_registry.h
index 7446db95eb1..7a77b47faba 100644
--- a/src/mongo/util/fail_point_registry.h
+++ b/src/mongo/util/fail_point_registry.h
@@ -53,12 +53,12 @@ public:
* 51006 - if the given name already exists in this registry.
* CannotMutateObject - if this registry is already frozen.
*/
- Status addFailPoint(const std::string& name, FailPoint* failPoint);
+ Status add(const std::string& name, FailPoint* failPoint);
/**
- * @return the fail point object registered. Returns NULL if it was not registered.
+ * @return the fail point object registered, or nullptr if it was not registered.
*/
- FailPoint* getFailPoint(const std::string& name) const;
+ FailPoint* find(const std::string& name) const;
/**
* Freezes this registry from being modified.
diff --git a/src/mongo/util/fail_point_service.cpp b/src/mongo/util/fail_point_service.cpp
index 77068f82100..44d8c69ae65 100644
--- a/src/mongo/util/fail_point_service.cpp
+++ b/src/mongo/util/fail_point_service.cpp
@@ -38,49 +38,34 @@
namespace mongo {
-using std::unique_ptr;
-
MONGO_FAIL_POINT_DEFINE(dummy); // used by tests in jstests/fail_point
-unique_ptr<FailPointRegistry> _fpRegistry(nullptr);
-
-MONGO_INITIALIZER_GENERAL(FailPointRegistry,
- MONGO_NO_PREREQUISITES,
- ("BeginGeneralStartupOptionRegistration"))
-(InitializerContext* context) {
- _fpRegistry.reset(new FailPointRegistry());
- return Status::OK();
-}
+MONGO_INITIALIZER_GROUP(FailPointRegistry, (), ("BeginStartupOptionHandling"));
-MONGO_INITIALIZER_GENERAL(AllFailPointsRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS)
+MONGO_INITIALIZER_GENERAL(AllFailPointsRegistered, (), ())
(InitializerContext* context) {
- _fpRegistry->freeze();
+ globalFailPointRegistry().freeze();
return Status::OK();
}
-FailPointRegistry* getGlobalFailPointRegistry() {
- return _fpRegistry.get();
+FailPointRegistry& globalFailPointRegistry() {
+ static auto& p = *new FailPointRegistry();
+ return p;
}
void setGlobalFailPoint(const std::string& failPointName, const BSONObj& cmdObj) {
- FailPointRegistry* registry = getGlobalFailPointRegistry();
- FailPoint* failPoint = registry->getFailPoint(failPointName);
+ FailPoint* failPoint = globalFailPointRegistry().find(failPointName);
if (failPoint == nullptr)
uasserted(ErrorCodes::FailPointSetFailed, failPointName + " not found");
- FailPoint::Mode mode;
- FailPoint::ValType val;
- BSONObj data;
- std::tie(mode, val, data) = uassertStatusOK(FailPoint::parseBSON(cmdObj));
-
- failPoint->setMode(mode, val, data);
+ failPoint->setMode(uassertStatusOK(FailPoint::parseBSON(cmdObj)));
warning() << "failpoint: " << failPointName << " set to: " << failPoint->toBSON();
}
FailPointEnableBlock::FailPointEnableBlock(const std::string& failPointName)
: _failPointName(failPointName) {
- _failPoint = getGlobalFailPointRegistry()->getFailPoint(failPointName);
+ _failPoint = globalFailPointRegistry().find(failPointName);
invariant(_failPoint != nullptr);
_failPoint->setMode(FailPoint::alwaysOn);
warning() << "failpoint: " << failPointName << " set to: " << _failPoint->toBSON();
@@ -88,7 +73,7 @@ FailPointEnableBlock::FailPointEnableBlock(const std::string& failPointName)
FailPointEnableBlock::FailPointEnableBlock(const std::string& failPointName, const BSONObj& data)
: _failPointName(failPointName) {
- _failPoint = getGlobalFailPointRegistry()->getFailPoint(failPointName);
+ _failPoint = globalFailPointRegistry().find(failPointName);
invariant(_failPoint != nullptr);
_failPoint->setMode(FailPoint::alwaysOn, 0, data);
warning() << "failpoint: " << failPointName << " set to: " << _failPoint->toBSON();
diff --git a/src/mongo/util/fail_point_service.h b/src/mongo/util/fail_point_service.h
index 64c511942fc..8f7f3261b13 100644
--- a/src/mongo/util/fail_point_service.h
+++ b/src/mongo/util/fail_point_service.h
@@ -30,15 +30,13 @@
#pragma once
#include "mongo/base/init.h"
+#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/fail_point_registry.h"
namespace mongo {
-/**
- * @return the global fail point registry.
- */
-FailPointRegistry* getGlobalFailPointRegistry();
+FailPointRegistry& globalFailPointRegistry();
/**
* Set a fail point in the global registry to a given value via BSON
@@ -46,29 +44,27 @@ FailPointRegistry* getGlobalFailPointRegistry();
*/
void setGlobalFailPoint(const std::string& failPointName, const BSONObj& cmdObj);
+struct FailPointRegisterer {
+ FailPointRegisterer(const std::string& name, FailPoint* fp) {
+ uassertStatusOK(globalFailPointRegistry().add(name, fp));
+ }
+};
+
/**
* Convenience macro for defining a fail point. Must be used at namespace scope.
* Note: that means never at local scope (inside functions) or class scope.
* NOTE: Never use in header files, only sources.
*/
-#define MONGO_FAIL_POINT_DEFINE(fp) \
- ::mongo::FailPoint fp; \
- MONGO_INITIALIZER_GENERAL(fp, ("FailPointRegistry"), ("AllFailPointsRegistered")) \
- (::mongo::InitializerContext * context) { \
- return ::mongo::getGlobalFailPointRegistry()->addFailPoint(#fp, &fp); \
- }
-
-/**
- * Convenience macro for declaring a fail point in a header.
- */
-#define MONGO_FAIL_POINT_DECLARE(fp) extern ::mongo::FailPoint fp;
+#define MONGO_FAIL_POINT_DEFINE(fp) \
+ ::mongo::FailPoint fp; \
+ ::mongo::FailPointRegisterer fp##failPointRegisterer(#fp, &fp);
/**
* Convenience class for enabling a failpoint and disabling it as this goes out of scope.
*/
class FailPointEnableBlock {
public:
- FailPointEnableBlock(const std::string& failPointName);
+ explicit FailPointEnableBlock(const std::string& failPointName);
FailPointEnableBlock(const std::string& failPointName, const BSONObj& cmdObj);
~FailPointEnableBlock();
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 0a32ec45777..1880b0d18a1 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -46,7 +46,6 @@
using mongo::BSONObj;
using mongo::FailPoint;
using mongo::FailPointEnableBlock;
-using mongo::getGlobalFailPointRegistry;
namespace stdx = mongo::stdx;
namespace mongo_test {
@@ -60,7 +59,7 @@ TEST(FailPoint, AlwaysOn) {
failPoint.setMode(FailPoint::alwaysOn);
ASSERT(failPoint.shouldFail());
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
+ if (auto scopedFp = failPoint.scoped(); MONGO_unlikely(scopedFp.isActive())) {
ASSERT(scopedFp.getData().isEmpty());
}
@@ -85,11 +84,7 @@ TEST(FailPoint, NTimes) {
TEST(FailPoint, BlockOff) {
FailPoint failPoint;
bool called = false;
-
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
- called = true;
- }
-
+ failPoint.execute([&](const BSONObj&) { called = true; });
ASSERT_FALSE(called);
}
@@ -98,9 +93,7 @@ TEST(FailPoint, BlockAlwaysOn) {
failPoint.setMode(FailPoint::alwaysOn);
bool called = false;
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
- called = true;
- }
+ failPoint.execute([&](const BSONObj&) { called = true; });
ASSERT(called);
}
@@ -111,9 +104,7 @@ TEST(FailPoint, BlockNTimes) {
size_t counter = 0;
for (size_t x = 0; x < 10; x++) {
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
- counter++;
- }
+ failPoint.execute([&](auto&&...) { counter++; });
}
ASSERT_EQUALS(1U, counter);
@@ -125,9 +116,8 @@ TEST(FailPoint, BlockWithException) {
bool threw = false;
try {
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
- throw std::logic_error("BlockWithException threw");
- }
+ failPoint.execute(
+ [&](const BSONObj&) { throw std::logic_error("BlockWithException threw"); });
} catch (const std::logic_error&) {
threw = true;
}
@@ -142,9 +132,7 @@ TEST(FailPoint, SetGetParam) {
FailPoint failPoint;
failPoint.setMode(FailPoint::alwaysOn, 0, BSON("x" << 20));
- MONGO_FAIL_POINT_BLOCK(failPoint, scopedFp) {
- ASSERT_EQUALS(20, scopedFp.getData()["x"].numberInt());
- }
+ failPoint.execute([&](const BSONObj& data) { ASSERT_EQUALS(20, data["x"].numberInt()); });
}
class FailPointStress : public mongo::unittest::Test {
@@ -181,9 +169,7 @@ public:
private:
void blockTask() {
while (true) {
- MONGO_FAIL_POINT_BLOCK(_fp, scopedFp) {
- const mongo::BSONObj& data = scopedFp.getData();
-
+ _fp.execute([](const BSONObj& data) {
// Expanded ASSERT_EQUALS since the error is not being
// printed out properly
if (data["a"].numberInt() != 44) {
@@ -191,7 +177,7 @@ private:
<< " - data: " << data << std::endl;
ASSERT(false);
}
- }
+ });
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
@@ -202,9 +188,7 @@ private:
void blockWithExceptionTask() {
while (true) {
try {
- MONGO_FAIL_POINT_BLOCK(_fp, scopedFp) {
- const mongo::BSONObj& data = scopedFp.getData();
-
+ _fp.execute([](const BSONObj& data) {
if (data["a"].numberInt() != 44) {
mongo::error() << "blockWithExceptionTask thread detected anomaly"
<< " - data: " << data << std::endl;
@@ -212,7 +196,7 @@ private:
}
throw std::logic_error("blockWithExceptionTask threw");
- }
+ });
} catch (const std::logic_error&) {
}
@@ -224,7 +208,7 @@ private:
void simpleTask() {
while (true) {
- static_cast<void>(MONGO_FAIL_POINT(_fp));
+ static_cast<void>(MONGO_unlikely(_fp.shouldFail()));
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_inShutdown)
break;
@@ -403,7 +387,7 @@ TEST(FailPoint, parseBSONValidDataSucceeds) {
}
TEST(FailPoint, FailPointBlockBasicTest) {
- auto failPoint = getGlobalFailPointRegistry()->getFailPoint("dummy");
+ auto failPoint = mongo::globalFailPointRegistry().find("dummy");
ASSERT_FALSE(failPoint->shouldFail());
@@ -418,33 +402,25 @@ TEST(FailPoint, FailPointBlockBasicTest) {
TEST(FailPoint, FailPointBlockIfBasicTest) {
FailPoint failPoint;
failPoint.setMode(FailPoint::nTimes, 1, BSON("skip" << true));
-
{
bool hit = false;
-
- MONGO_FAIL_POINT_BLOCK_IF(failPoint, scopedFp, [&](const BSONObj& obj) {
- hit = obj["skip"].trueValue();
- return false;
- }) {
- ASSERT(!"shouldn't get here");
- }
-
+ failPoint.executeIf([](const BSONObj&) { ASSERT(!"shouldn't get here"); },
+ [&hit](const BSONObj& obj) {
+ hit = obj["skip"].trueValue();
+ return false;
+ });
ASSERT(hit);
}
-
{
bool hit = false;
-
- MONGO_FAIL_POINT_BLOCK_IF(failPoint, scopedFp, [](auto) { return true; }) {
- hit = true;
- ASSERT(!scopedFp.getData().isEmpty());
- }
-
+ failPoint.executeIf(
+ [&hit](const BSONObj& data) {
+ hit = true;
+ ASSERT(!data.isEmpty());
+ },
+ [](const BSONObj&) { return true; });
ASSERT(hit);
}
-
- MONGO_FAIL_POINT_BLOCK_IF(failPoint, scopedFp, [](auto) { return true; }) {
- ASSERT(!"shouldn't get here");
- }
+ failPoint.executeIf([](auto&&) { ASSERT(!"shouldn't get here"); }, [](auto&&) { return true; });
}
} // namespace mongo_test
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index b7cecfb3324..bd183130d5a 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -395,7 +395,7 @@ int Socket::_send(const char* data, int len, const char* context) {
void Socket::send(const char* data, int len, const char* context) {
while (len > 0) {
int ret = -1;
- if (MONGO_FAIL_POINT(throwSockExcep)) {
+ if (MONGO_unlikely(throwSockExcep.shouldFail())) {
#if defined(_WIN32)
WSASetLastError(WSAENETUNREACH);
#else
@@ -458,7 +458,7 @@ void Socket::send(const vector<pair<char*, int>>& data, const char* context) {
while (meta.msg_iovlen > 0) {
int ret = -1;
- if (MONGO_FAIL_POINT(throwSockExcep)) {
+ if (MONGO_unlikely(throwSockExcep.shouldFail())) {
#if defined(_WIN32)
WSASetLastError(WSAENETUNREACH);
#else
@@ -491,7 +491,7 @@ void Socket::send(const vector<pair<char*, int>>& data, const char* context) {
void Socket::recv(char* buf, int len) {
while (len > 0) {
int ret = -1;
- if (MONGO_FAIL_POINT(throwSockExcep)) {
+ if (MONGO_unlikely(throwSockExcep.shouldFail())) {
#if defined(_WIN32)
WSASetLastError(WSAENETUNREACH);
#else
diff --git a/src/mongo/util/net/sock_test.cpp b/src/mongo/util/net/sock_test.cpp
index d9b3ec8ee84..0d6d4c69017 100644
--- a/src/mongo/util/net/sock_test.cpp
+++ b/src/mongo/util/net/sock_test.cpp
@@ -208,7 +208,7 @@ const char kSocketFailPointName[] = "throwSockExcep";
class SocketFailPointTest : public unittest::Test {
public:
SocketFailPointTest()
- : _failPoint(getGlobalFailPointRegistry()->getFailPoint(kSocketFailPointName)),
+ : _failPoint(globalFailPointRegistry().find(kSocketFailPointName)),
_sockets(socketPair(SOCK_STREAM)) {
ASSERT_TRUE(_failPoint != nullptr);
ASSERT_TRUE(_sockets.first);
diff --git a/src/mongo/util/net/ssl/detail/io.hpp b/src/mongo/util/net/ssl/detail/io.hpp
index d6e376b00f0..f6ff6edd1d8 100644
--- a/src/mongo/util/net/ssl/detail/io.hpp
+++ b/src/mongo/util/net/ssl/detail/io.hpp
@@ -29,7 +29,7 @@ namespace ssl {
namespace detail {
// Failpoint to force small reads of data to exercise the SChannel buffering code
-MONGO_FAIL_POINT_DECLARE(smallTLSReads);
+extern ::mongo::FailPoint smallTLSReads;
template <typename Stream, typename Operation>
std::size_t io(Stream& next_layer, stream_core& core, const Operation& op, asio::error_code& ec) {
@@ -42,7 +42,7 @@ std::size_t io(Stream& next_layer, stream_core& core, const Operation& op, asio:
// the underlying transport.
if (core.input_.size() == 0) {
// Read tiny amounts of TLS data to test the SChannel buffering code
- if (MONGO_FAIL_POINT(smallTLSReads)) {
+ if (MONGO_unlikely(smallTLSReads.shouldFail())) {
core.input_ =
asio::buffer(core.input_buffer_,
next_layer.read_some(