summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp2
-rw-r--r--src/mongo/db/catalog/database_impl.cpp2
-rw-r--r--src/mongo/db/catalog/drop_database.cpp2
-rw-r--r--src/mongo/db/catalog/index_create_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp2
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/generic_servers.cpp2
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp4
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp4
-rw-r--r--src/mongo/db/commands/txn_cmds.cpp2
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/exec/group.cpp321
-rw-r--r--src/mongo/db/key_generator.cpp2
-rw-r--r--src/mongo/db/keys_collection_manager.cpp2
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/operation_context.cpp6
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/query/find.cpp2
-rw-r--r--src/mongo/db/query/find_common.cpp8
-rw-r--r--src/mongo/db/query/find_common.h8
-rw-r--r--src/mongo/db/query/plan_executor.cpp2
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp2
-rw-r--r--src/mongo/db/query/query_yield.cpp4
-rw-r--r--src/mongo/db/repl/apply_ops.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp8
-rw-r--r--src/mongo/db/repl/database_cloner.cpp4
-rw-r--r--src/mongo/db/repl/do_txn.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp12
-rw-r--r--src/mongo/db/repl/initial_syncer.h8
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp2
-rw-r--r--src/mongo/db/repl/oplog_fetcher.h2
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp6
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp4
-rw-r--r--src/mongo/db/repl/replication_info.cpp2
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp4
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.h4
-rw-r--r--src/mongo/db/repl/sync_tail.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp2
-rw-r--r--src/mongo/db/s/metadata_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp18
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp8
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp14
-rw-r--r--src/mongo/db/service_entry_point_common.cpp8
-rw-r--r--src/mongo/db/service_entry_point_common.h6
-rw-r--r--src/mongo/db/session.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp2
-rw-r--r--src/mongo/db/write_concern.cpp2
65 files changed, 446 insertions, 125 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index ac8ad7e3d94..ba2f8aed1a7 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -64,7 +64,7 @@ namespace {
// Causes the server to hang when it attempts to assign UUIDs to the provided database (or all
// databases if none are provided).
-MONGO_FP_DECLARE(hangBeforeDatabaseUpgrade);
+MONGO_FAIL_POINT_DEFINE(hangBeforeDatabaseUpgrade);
struct CollModRequest {
const IndexDescriptor* idx = nullptr;
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 6ba70c24a06..09d0b69ac90 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -101,7 +101,7 @@ MONGO_REGISTER_SHIM(Collection::parseValidationAction)
namespace {
// Used below to fail during inserts.
-MONGO_FP_DECLARE(failCollectionInserts);
+MONGO_FAIL_POINT_DEFINE(failCollectionInserts);
// Uses the collator factory to convert the BSON representation of a collator to a
// CollatorInterface. Returns null if the BSONObj is empty. We expect the stored collation to be
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 0055732ad54..9fc0d082f50 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -89,7 +89,7 @@ MONGO_REGISTER_SHIM(Database::makeImpl)
}
namespace {
-MONGO_FP_DECLARE(hangBeforeLoggingCreateCollection);
+MONGO_FAIL_POINT_DEFINE(hangBeforeLoggingCreateCollection);
} // namespace
using std::endl;
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index 5108bef582d..58ae3624a62 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -51,7 +51,7 @@
namespace mongo {
-MONGO_FP_DECLARE(dropDatabaseHangAfterLastCollectionDrop);
+MONGO_FAIL_POINT_DEFINE(dropDatabaseHangAfterLastCollectionDrop);
namespace {
diff --git a/src/mongo/db/catalog/index_create_impl.cpp b/src/mongo/db/catalog/index_create_impl.cpp
index 76cef288fd5..2c332c9f1c7 100644
--- a/src/mongo/db/catalog/index_create_impl.cpp
+++ b/src/mongo/db/catalog/index_create_impl.cpp
@@ -66,10 +66,10 @@ using std::unique_ptr;
using std::string;
using std::endl;
-MONGO_FP_DECLARE(crashAfterStartingIndexBuild);
-MONGO_FP_DECLARE(hangAfterStartingIndexBuild);
-MONGO_FP_DECLARE(hangAfterStartingIndexBuildUnlocked);
-MONGO_FP_DECLARE(slowBackgroundIndexBuild);
+MONGO_FAIL_POINT_DEFINE(crashAfterStartingIndexBuild);
+MONGO_FAIL_POINT_DEFINE(hangAfterStartingIndexBuild);
+MONGO_FAIL_POINT_DEFINE(hangAfterStartingIndexBuildUnlocked);
+MONGO_FAIL_POINT_DEFINE(slowBackgroundIndexBuild);
AtomicInt32 maxIndexBuildMemoryUsageMegabytes(500);
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 0a027b3c2a5..5e05513387b 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -60,7 +60,7 @@ namespace {
// When the skipIndexCreateFieldNameValidation failpoint is enabled, validation for index field
// names will be disabled. This will allow for creation of indexes with invalid field names in their
// specification.
-MONGO_FP_DECLARE(skipIndexCreateFieldNameValidation);
+MONGO_FAIL_POINT_DEFINE(skipIndexCreateFieldNameValidation);
static const std::set<StringData> allowedFieldNames = {
IndexDescriptor::k2dIndexMaxFieldName,
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 78301d8c6d1..162e96020a2 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -38,7 +38,7 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(setAutoGetCollectionWait);
+MONGO_FAIL_POINT_DEFINE(setAutoGetCollectionWait);
void uassertLockTimeout(std::string resourceName,
LockMode lockMode,
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 8af7e4f452d..b1344f7cdcc 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -81,7 +81,7 @@ using std::vector;
using IndexVersion = IndexDescriptor::IndexVersion;
MONGO_EXPORT_SERVER_PARAMETER(skipCorruptDocumentsWhenCloning, bool, false);
-MONGO_FP_DECLARE(movePrimaryFailPoint);
+MONGO_FAIL_POINT_DEFINE(movePrimaryFailPoint);
BSONElement getErrField(const BSONObj& o);
diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp
index 3fbaea7c2a1..8d53be28ef5 100644
--- a/src/mongo/db/commands/generic_servers.cpp
+++ b/src/mongo/db/commands/generic_servers.cpp
@@ -138,7 +138,7 @@ public:
} hostInfoCmd;
-MONGO_FP_DECLARE(crashOnShutdown);
+MONGO_FAIL_POINT_DEFINE(crashOnShutdown);
int* volatile illegalAddress; // NOLINT - used for fail point only
class CmdGetCmdLineOpts : public BasicCommand {
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 2877c72bca0..00ddd4ac7da 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -66,9 +66,9 @@ namespace mongo {
namespace {
-MONGO_FP_DECLARE(rsStopGetMoreCmd);
+MONGO_FAIL_POINT_DEFINE(rsStopGetMoreCmd);
-MONGO_FP_DECLARE(waitWithPinnedCursorDuringGetMoreBatch);
+MONGO_FAIL_POINT_DEFINE(waitWithPinnedCursorDuringGetMoreBatch);
/**
* Validates that the lsid of 'opCtx' matches that of 'cursor'. This must be called after
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 892f8f6957a..1d6ca50d718 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -56,8 +56,8 @@ namespace mongo {
namespace {
-MONGO_FP_DECLARE(featureCompatibilityDowngrade);
-MONGO_FP_DECLARE(featureCompatibilityUpgrade);
+MONGO_FAIL_POINT_DEFINE(featureCompatibilityDowngrade);
+MONGO_FAIL_POINT_DEFINE(featureCompatibilityUpgrade);
/**
* Sets the minimum allowed version for the cluster. If it is 3.4, then the node should not use 3.6
* features.
diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp
index 9e7a05bba3f..23e65971b2a 100644
--- a/src/mongo/db/commands/txn_cmds.cpp
+++ b/src/mongo/db/commands/txn_cmds.cpp
@@ -93,7 +93,7 @@ public:
} commitTxn;
-MONGO_FP_DECLARE(pauseAfterTransactionPrepare);
+MONGO_FAIL_POINT_DEFINE(pauseAfterTransactionPrepare);
// TODO: This is a stub for testing storage prepare functionality.
class CmdPrepareTxn : public BasicCommand {
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 4ca9fe21135..273865e784d 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -49,7 +49,7 @@ using std::endl;
using std::string;
using std::stringstream;
-MONGO_FP_DECLARE(validateCmdCollectionNotValid);
+MONGO_FAIL_POINT_DEFINE(validateCmdCollectionNotValid);
namespace {
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 6cb7c322667..a2b42097e7c 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -271,7 +271,7 @@ void initWireSpec() {
spec.isInternalClient = true;
}
-MONGO_FP_DECLARE(shutdownAtStartup);
+MONGO_FAIL_POINT_DEFINE(shutdownAtStartup);
ExitCode _initAndListen(int listenPort) {
Client::initThread("initandlisten");
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
new file mode 100644
index 00000000000..e5fff4cfdee
--- /dev/null
+++ b/src/mongo/db/exec/group.cpp
@@ -0,0 +1,321 @@
+/**
+ * Copyright (C) 2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/exec/group.h"
+
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/bson/dotted_path_support.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/client.h"
+#include "mongo/db/curop_failpoint_helpers.h"
+#include "mongo/db/exec/scoped_timer.h"
+#include "mongo/db/exec/working_set_common.h"
+#include "mongo/stdx/memory.h"
+
+namespace mongo {
+
+// Forces a hang in the javascript execution while initializing the group stage.
+MONGO_FAIL_POINT_DEFINE(hangInGroupReduceJs);
+
+using std::unique_ptr;
+using std::vector;
+using stdx::make_unique;
+
+namespace dps = ::mongo::dotted_path_support;
+
+namespace {
+
+// Helper function that extracts the group key from a BSONObj.
+Status getKey(
+ const BSONObj& obj, const BSONObj& keyPattern, ScriptingFunction func, Scope* s, BSONObj* key) {
+ if (func) {
+ BSONObjBuilder b(obj.objsize() + 32);
+ b.append("0", obj);
+ const BSONObj& k = b.obj();
+ try {
+ s->invoke(func, &k, 0);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to invoke group keyf function: ");
+ }
+ int type = s->type("__returnValue");
+ if (type != Object) {
+ return Status(ErrorCodes::BadValue, "return of $key has to be an object");
+ }
+ *key = s->getObject("__returnValue");
+ return Status::OK();
+ }
+ *key = dps::extractElementsBasedOnTemplate(obj, keyPattern, true).getOwned();
+ return Status::OK();
+}
+
+} // namespace
+
+// static
+const char* GroupStage::kStageType = "GROUP";
+
+GroupStage::GroupStage(OperationContext* opCtx,
+ const GroupRequest& request,
+ WorkingSet* workingSet,
+ PlanStage* child)
+ : PlanStage(kStageType, opCtx),
+ _request(request),
+ _ws(workingSet),
+ _specificStats(),
+ _groupState(GroupState_Initializing),
+ _reduceFunction(0),
+ _keyFunction(0),
+ _groupMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>()) {
+ _children.emplace_back(child);
+}
+
+Status GroupStage::initGroupScripting() {
+ // Initialize _scope.
+ const std::string userToken =
+ AuthorizationSession::get(Client::getCurrent())->getAuthenticatedUserNamesToken();
+
+ _scope = getGlobalScriptEngine()->getPooledScope(
+ getOpCtx(), _request.ns.db().toString(), "group" + userToken);
+ if (!_request.reduceScope.isEmpty()) {
+ _scope->init(&_request.reduceScope);
+ }
+ _scope->setObject("$initial", _request.initial, true);
+
+ try {
+ _scope->exec(
+ "$reduce = " + _request.reduceCode, "group reduce init", false, true, true, 2 * 1000);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to initialize group reduce function: ");
+ }
+
+ try {
+ _scope->exec("$arr = [];",
+ "group reduce init 2",
+ false, // printResult
+ true, // reportError
+ true, // assertOnError
+ 2 * 1000);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to initialize group reduce function: ");
+ }
+
+ // Initialize _reduceFunction.
+ _reduceFunction = _scope->createFunction(
+ "function(){ "
+ " if ( $arr[n] == null ){ "
+ " next = {}; "
+ " Object.extend( next , $key ); "
+ " Object.extend( next , $initial , true ); "
+ " $arr[n] = next; "
+ " next = null; "
+ " } "
+ " $reduce( obj , $arr[n] ); "
+ "}");
+
+ // Initialize _keyFunction, if a key function was provided.
+ if (_request.keyFunctionCode.size()) {
+ _keyFunction = _scope->createFunction(_request.keyFunctionCode.c_str());
+ }
+
+ return Status::OK();
+}
+
+Status GroupStage::processObject(const BSONObj& obj) {
+ BSONObj key;
+ Status getKeyStatus = getKey(obj, _request.keyPattern, _keyFunction, _scope.get(), &key);
+ if (!getKeyStatus.isOK()) {
+ return getKeyStatus;
+ }
+
+ _scope->advanceGeneration();
+
+ int& n = _groupMap[key];
+ if (n == 0) {
+ n = _groupMap.size();
+ _scope->setObject("$key", key, true);
+ if (n > 20000) {
+ return Status(ErrorCodes::BadValue, "group() can't handle more than 20000 unique keys");
+ }
+ }
+
+ BSONObj objCopy = obj.getOwned();
+ _scope->setObject("obj", objCopy, true);
+ _scope->setNumber("n", n - 1);
+
+ boost::optional<std::string> oldMsg;
+ if (MONGO_FAIL_POINT(hangInGroupReduceJs)) {
+ oldMsg = CurOpFailpointHelpers::updateCurOpMsg(getOpCtx(), "hangInGroupReduceJs");
+ }
+ auto resetMsgGuard = MakeGuard([&] {
+ if (oldMsg) {
+ CurOpFailpointHelpers::updateCurOpMsg(getOpCtx(), *oldMsg);
+ }
+ });
+ try {
+ _scope->invoke(_reduceFunction, 0, 0, 0, true /*assertOnError*/);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to invoke group reduce function: ");
+ }
+
+ return Status::OK();
+}
+
+StatusWith<BSONObj> GroupStage::finalizeResults() {
+ if (!_request.finalize.empty()) {
+ try {
+ _scope->exec("$finalize = " + _request.finalize,
+ "group finalize init",
+ false, // printResult
+ true, // reportError
+ true, // assertOnError
+ 2 * 1000);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to initialize group finalize function: ");
+ }
+ ScriptingFunction finalizeFunction = _scope->createFunction(
+ "function(){ "
+ " for(var i=0; i < $arr.length; i++){ "
+ " var ret = $finalize($arr[i]); "
+ " if (ret !== undefined) "
+ " $arr[i] = ret; "
+ " } "
+ "}");
+ try {
+ _scope->invoke(finalizeFunction, 0, 0, 0, true /*assertOnError*/);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to invoke group finalize function: ");
+ }
+ }
+
+ _specificStats.nGroups = _groupMap.size();
+
+ BSONObj results = _scope->getObject("$arr").getOwned();
+
+ try {
+ _scope->exec("$arr = [];",
+ "group clean up",
+ false, // printResult
+ true, // reportError
+ true, // assertOnError
+ 2 * 1000);
+ } catch (const AssertionException& e) {
+ return e.toStatus("Failed to clean up group: ");
+ }
+
+ _scope->gc();
+
+ return results;
+}
+
+PlanStage::StageState GroupStage::doWork(WorkingSetID* out) {
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
+
+ // On the first call to work(), call initGroupScripting().
+ if (_groupState == GroupState_Initializing) {
+ Status status = initGroupScripting();
+ if (!status.isOK()) {
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
+ }
+ _groupState = GroupState_ReadingFromChild;
+ return PlanStage::NEED_TIME;
+ }
+
+ // Otherwise, read from our child.
+ invariant(_groupState == GroupState_ReadingFromChild);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState state = child()->work(&id);
+
+ if (PlanStage::NEED_TIME == state) {
+ return state;
+ } else if (PlanStage::NEED_YIELD == state) {
+ *out = id;
+ return state;
+ } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ // The stage which produces a failure is responsible for allocating a working set member
+ // with error details.
+ invariant(WorkingSet::INVALID_ID != id);
+ *out = id;
+ return state;
+ } else if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = _ws->get(id);
+ // Group queries can't have projections. This means that covering analysis will always
+ // add a fetch. We should always get fetched data, and never just key data.
+ invariant(member->hasObj());
+
+ Status status = processObject(member->obj.value());
+ if (!status.isOK()) {
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
+ }
+
+ _ws->free(id);
+
+ return PlanStage::NEED_TIME;
+ } else {
+ // We're done reading from our child.
+ invariant(PlanStage::IS_EOF == state);
+
+ auto results = finalizeResults();
+ if (!results.isOK()) {
+ *out = WorkingSetCommon::allocateStatusMember(_ws, results.getStatus());
+ return PlanStage::FAILURE;
+ }
+
+ // Transition to state "done." Future calls to work() will return IS_EOF.
+ _groupState = GroupState_Done;
+
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), results.getValue());
+ member->transitionToOwnedObj();
+
+ return PlanStage::ADVANCED;
+ }
+}
+
+bool GroupStage::isEOF() {
+ return _groupState == GroupState_Done;
+}
+
+unique_ptr<PlanStageStats> GroupStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_GROUP);
+ ret->specific = make_unique<GroupStats>(_specificStats);
+ ret->children.emplace_back(child()->getStats());
+ return ret;
+}
+
+const SpecificStats* GroupStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/key_generator.cpp b/src/mongo/db/key_generator.cpp
index 588380117dd..627869b335a 100644
--- a/src/mongo/db/key_generator.cpp
+++ b/src/mongo/db/key_generator.cpp
@@ -41,7 +41,7 @@ namespace mongo {
namespace {
-MONGO_FP_DECLARE(disableKeyGeneration);
+MONGO_FAIL_POINT_DEFINE(disableKeyGeneration);
/**
* Inserts a new key to the keys collection.
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index 6b8703ce2a4..6c142de2314 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -63,7 +63,7 @@ Milliseconds kMaxRefreshWaitTime(10 * 60 * 1000);
// Prevents the refresher thread from waiting longer than the given number of milliseconds, even on
// a successful refresh.
-MONGO_FP_DECLARE(maxKeyRefreshWaitTimeOverrideMS);
+MONGO_FAIL_POINT_DEFINE(maxKeyRefreshWaitTimeOverrideMS);
/**
* Returns the amount of time to wait until the monitoring thread should attempt to refresh again.
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 655cc73bbef..40173774735 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -59,7 +59,7 @@ namespace mongo {
using repl::OplogEntry;
namespace {
-MONGO_FP_DECLARE(failCollectionUpdates);
+MONGO_FAIL_POINT_DEFINE(failCollectionUpdates);
const auto getDeleteState = OperationContext::declareDecoration<ShardObserverDeleteState>();
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 50209b8d0ff..f878eead361 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -53,13 +53,13 @@ namespace {
// created with a valid non-zero max time will also fail immediately.
//
// This fail point cannot be used with the maxTimeNeverTimeOut fail point.
-MONGO_FP_DECLARE(maxTimeAlwaysTimeOut);
+MONGO_FAIL_POINT_DEFINE(maxTimeAlwaysTimeOut);
// Enabling the maxTimeNeverTimeOut fail point will cause the server to never time out any
// query, command, or getmore operation, regardless of whether a max time is set.
//
// This fail point cannot be used with the maxTimeAlwaysTimeOut fail point.
-MONGO_FP_DECLARE(maxTimeNeverTimeOut);
+MONGO_FAIL_POINT_DEFINE(maxTimeNeverTimeOut);
// Enabling the checkForInterruptFail fail point will start a game of random chance on the
// connection specified in the fail point data, generating an interrupt with a given fixed
@@ -73,7 +73,7 @@ MONGO_FP_DECLARE(maxTimeNeverTimeOut);
// name 'threadName' will generate a kill on the current operation with probability p(.01),
// including interrupt points of nested operations. "chance" must be a double between 0 and 1,
// inclusive.
-MONGO_FP_DECLARE(checkForInterruptFail);
+MONGO_FAIL_POINT_DEFINE(checkForInterruptFail);
} // namespace
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 2ca80f617a2..4450ba31870 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -82,10 +82,10 @@ namespace mongo {
// single type of operation are static functions defined above their caller.
namespace {
-MONGO_FP_DECLARE(failAllInserts);
-MONGO_FP_DECLARE(failAllUpdates);
-MONGO_FP_DECLARE(failAllRemoves);
-MONGO_FP_DECLARE(hangDuringBatchInsert);
+MONGO_FAIL_POINT_DEFINE(failAllInserts);
+MONGO_FAIL_POINT_DEFINE(failAllUpdates);
+MONGO_FAIL_POINT_DEFINE(failAllRemoves);
+MONGO_FAIL_POINT_DEFINE(hangDuringBatchInsert);
void updateRetryStats(OperationContext* opCtx, bool containsRetry) {
if (containsRetry) {
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 823225b8417..249a61349fb 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -72,7 +72,7 @@ using std::unique_ptr;
using stdx::make_unique;
// Failpoint for checking whether we've received a getmore.
-MONGO_FP_DECLARE(failReceivedGetmore);
+MONGO_FAIL_POINT_DEFINE(failReceivedGetmore);
bool shouldSaveCursor(OperationContext* opCtx,
const Collection* collection,
diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp
index f30d395308a..4c9fcdd5f26 100644
--- a/src/mongo/db/query/find_common.cpp
+++ b/src/mongo/db/query/find_common.cpp
@@ -37,13 +37,13 @@
namespace mongo {
-MONGO_FP_DECLARE(waitInFindBeforeMakingBatch);
+MONGO_FAIL_POINT_DEFINE(waitInFindBeforeMakingBatch);
-MONGO_FP_DECLARE(disableAwaitDataForGetMoreCmd);
+MONGO_FAIL_POINT_DEFINE(disableAwaitDataForGetMoreCmd);
-MONGO_FP_DECLARE(waitAfterPinningCursorBeforeGetMoreBatch);
+MONGO_FAIL_POINT_DEFINE(waitAfterPinningCursorBeforeGetMoreBatch);
-MONGO_FP_DECLARE(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch);
+MONGO_FAIL_POINT_DEFINE(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch);
const OperationContext::Decoration<AwaitDataState> awaitDataState =
OperationContext::declareDecoration<AwaitDataState>();
diff --git a/src/mongo/db/query/find_common.h b/src/mongo/db/query/find_common.h
index d0a6001d18a..a720d24c71c 100644
--- a/src/mongo/db/query/find_common.h
+++ b/src/mongo/db/query/find_common.h
@@ -55,19 +55,19 @@ class BSONObj;
class QueryRequest;
// Failpoint for making find hang.
-MONGO_FP_FORWARD_DECLARE(waitInFindBeforeMakingBatch);
+MONGO_FAIL_POINT_DECLARE(waitInFindBeforeMakingBatch);
// Failpoint for making getMore not wait for an awaitdata cursor. Allows us to avoid waiting during
// tests.
-MONGO_FP_FORWARD_DECLARE(disableAwaitDataForGetMoreCmd);
+MONGO_FAIL_POINT_DECLARE(disableAwaitDataForGetMoreCmd);
// Enabling this fail point will cause the getMore command to busy wait after pinning the cursor
// but before we have started building the batch, until the fail point is disabled.
-MONGO_FP_FORWARD_DECLARE(waitAfterPinningCursorBeforeGetMoreBatch);
+MONGO_FAIL_POINT_DECLARE(waitAfterPinningCursorBeforeGetMoreBatch);
// Enabling this failpoint will cause the getMore to wait just before it unpins its cursor after it
// has completed building the current batch.
-MONGO_FP_FORWARD_DECLARE(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch);
+MONGO_FAIL_POINT_DECLARE(waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch);
/**
* Suite of find/getMore related functions used in both the mongod and mongos query paths.
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 3bc8ab9202d..31a1439b6d2 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -73,7 +73,7 @@ struct CappedInsertNotifierData {
namespace {
-MONGO_FP_DECLARE(planExecutorAlwaysFails);
+MONGO_FAIL_POINT_DEFINE(planExecutorAlwaysFails);
/**
* Constructs a PlanYieldPolicy based on 'policy'.
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 295d46124e1..b24f1787de1 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -44,7 +44,7 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(setInterruptOnlyPlansCheckForInterruptHang);
+MONGO_FAIL_POINT_DEFINE(setInterruptOnlyPlansCheckForInterruptHang);
} // namespace
PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index d7ee0271f16..66397c409ca 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -39,8 +39,8 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(setYieldAllLocksHang);
-MONGO_FP_DECLARE(setYieldAllLocksWait);
+MONGO_FAIL_POINT_DEFINE(setYieldAllLocksHang);
+MONGO_FAIL_POINT_DEFINE(setYieldAllLocksWait);
} // namespace
// static
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index e73efba3d41..698b67f4c17 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -63,7 +63,7 @@ constexpr StringData ApplyOps::kOplogApplicationModeFieldName;
namespace {
// If enabled, causes loop in _applyOps() to hang after applying current operation.
-MONGO_FP_DECLARE(applyOpsPauseBetweenOperations);
+MONGO_FAIL_POINT_DEFINE(applyOpsPauseBetweenOperations);
/**
* Return true iff the applyOpsCmd can be executed in a single WriteUnitOfWork.
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 7e5ff744acc..f979975ec55 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -141,7 +141,7 @@ size_t getSize(const BSONObj& o) {
} // namespace
// Failpoint which causes rollback to hang before starting.
-MONGO_FP_DECLARE(rollbackHangBeforeStart);
+MONGO_FAIL_POINT_DEFINE(rollbackHangBeforeStart);
// The count of items in the buffer
static Counter64 bufferCountGauge;
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 553cc1d20bf..57c3b876fff 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -72,19 +72,19 @@ MONGO_EXPORT_SERVER_PARAMETER(numInitialSyncCollectionFindAttempts, int, 3);
// Failpoint which causes initial sync to hang before establishing its cursor to clone the
// 'namespace' collection.
-MONGO_FP_DECLARE(initialSyncHangBeforeCollectionClone);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeCollectionClone);
// Failpoint which causes initial sync to hang when it has cloned 'numDocsToClone' documents to
// collection 'namespace'.
-MONGO_FP_DECLARE(initialSyncHangDuringCollectionClone);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangDuringCollectionClone);
// Failpoint which causes initial sync to hang after handling the next batch of results from the
// 'AsyncResultsMerger', optionally limited to a specific collection.
-MONGO_FP_DECLARE(initialSyncHangCollectionClonerAfterHandlingBatchResponse);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangCollectionClonerAfterHandlingBatchResponse);
// Failpoint which causes initial sync to hang before establishing the cursors (but after
// listIndexes), optionally limited to a specific collection.
-MONGO_FP_DECLARE(initialSyncHangCollectionClonerBeforeEstablishingCursor);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangCollectionClonerBeforeEstablishingCursor);
BSONObj makeCommandWithUUIDorCollectionName(StringData command,
OptionalCollectionUUID uuid,
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 74da2fe22be..e2f7d4aa432 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -52,7 +52,7 @@ namespace mongo {
namespace repl {
// Failpoint which causes the initial sync function to hang before running listCollections.
-MONGO_FP_DECLARE(initialSyncHangBeforeListCollections);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeListCollections);
namespace {
@@ -77,7 +77,7 @@ MONGO_EXPORT_SERVER_PARAMETER(maxNumInitialSyncCollectionClonerCursors, int, 1);
// Failpoint which causes initial sync to hang right after listCollections, but before cloning
// any colelctions in the 'database' database.
-MONGO_FP_DECLARE(initialSyncHangAfterListCollections);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangAfterListCollections);
/**
* Default listCollections predicate.
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
index 5579ceb1c33..33d306d5fce 100644
--- a/src/mongo/db/repl/do_txn.cpp
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -62,7 +62,7 @@ constexpr StringData DoTxn::kPreconditionFieldName;
namespace {
// If enabled, causes loop in _doTxn() to hang after applying current operation.
-MONGO_FP_DECLARE(doTxnPauseBetweenOperations);
+MONGO_FAIL_POINT_DEFINE(doTxnPauseBetweenOperations);
/**
* Return true iff the doTxnCmd can be executed in a single WriteUnitOfWork.
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 33e5e729a13..a578b4206b5 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -72,23 +72,23 @@ namespace mongo {
namespace repl {
// Failpoint for initial sync
-MONGO_FP_DECLARE(failInitialSyncWithBadHost);
+MONGO_FAIL_POINT_DEFINE(failInitialSyncWithBadHost);
// Failpoint which fails initial sync and leaves an oplog entry in the buffer.
-MONGO_FP_DECLARE(failInitSyncWithBufferedEntriesLeft);
+MONGO_FAIL_POINT_DEFINE(failInitSyncWithBufferedEntriesLeft);
// Failpoint which causes the initial sync function to hang before copying databases.
-MONGO_FP_DECLARE(initialSyncHangBeforeCopyingDatabases);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeCopyingDatabases);
// Failpoint which causes the initial sync function to hang before finishing.
-MONGO_FP_DECLARE(initialSyncHangBeforeFinish);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeFinish);
// Failpoint which causes the initial sync function to hang before calling shouldRetry on a failed
// operation.
-MONGO_FP_DECLARE(initialSyncHangBeforeGettingMissingDocument);
+MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeGettingMissingDocument);
// Failpoint which stops the applier.
-MONGO_FP_DECLARE(rsSyncApplyStop);
+MONGO_FAIL_POINT_DEFINE(rsSyncApplyStop);
namespace {
using namespace executor;
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index 063900142c9..7f0e5112857 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -59,17 +59,17 @@ namespace repl {
// TODO: Remove forward declares once we remove rs_initialsync.cpp and other dependents.
// Failpoint which fails initial sync and leaves an oplog entry in the buffer.
-MONGO_FP_FORWARD_DECLARE(failInitSyncWithBufferedEntriesLeft);
+MONGO_FAIL_POINT_DECLARE(failInitSyncWithBufferedEntriesLeft);
// Failpoint which causes the initial sync function to hang before copying databases.
-MONGO_FP_FORWARD_DECLARE(initialSyncHangBeforeCopyingDatabases);
+MONGO_FAIL_POINT_DECLARE(initialSyncHangBeforeCopyingDatabases);
// Failpoint which causes the initial sync function to hang before calling shouldRetry on a failed
// operation.
-MONGO_FP_FORWARD_DECLARE(initialSyncHangBeforeGettingMissingDocument);
+MONGO_FAIL_POINT_DECLARE(initialSyncHangBeforeGettingMissingDocument);
// Failpoint which stops the applier.
-MONGO_FP_FORWARD_DECLARE(rsSyncApplyStop);
+MONGO_FAIL_POINT_DECLARE(rsSyncApplyStop);
struct InitialSyncState;
struct MemberState;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 930a17df044..ff2f07f9d99 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -112,7 +112,7 @@ using IndexVersion = IndexDescriptor::IndexVersion;
namespace repl {
namespace {
-MONGO_FP_DECLARE(sleepBetweenInsertOpTimeGenerationAndLogOp);
+MONGO_FAIL_POINT_DEFINE(sleepBetweenInsertOpTimeGenerationAndLogOp);
/**
* The `_localOplogCollection` pointer is always valid (or null) because an
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 960b8c6df75..31c0eef2ff1 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -48,7 +48,7 @@ namespace repl {
Seconds OplogFetcher::kDefaultProtocolZeroAwaitDataTimeout(2);
-MONGO_FP_DECLARE(stopReplProducer);
+MONGO_FAIL_POINT_DEFINE(stopReplProducer);
namespace {
diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h
index b23d8023cfc..8aab9ce6a53 100644
--- a/src/mongo/db/repl/oplog_fetcher.h
+++ b/src/mongo/db/repl/oplog_fetcher.h
@@ -44,7 +44,7 @@
namespace mongo {
namespace repl {
-MONGO_FP_FORWARD_DECLARE(stopReplProducer);
+MONGO_FAIL_POINT_DECLARE(stopReplProducer);
/**
* The oplog fetcher, once started, reads operations from a remote oplog using a tailable cursor.
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 5a7a8ddaab6..6b2877cacb7 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -649,7 +649,7 @@ bool isHeartbeatRequestV1(const BSONObj& cmdObj) {
} // namespace
-MONGO_FP_DECLARE(rsDelayHeartbeatResponse);
+MONGO_FAIL_POINT_DEFINE(rsDelayHeartbeatResponse);
/* { replSetHeartbeat : <setname> } */
class CmdReplSetHeartbeat : public ReplSetCommand {
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index fb178001730..e7f96cb50d3 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -119,7 +119,7 @@ const char meCollectionName[] = "local.me";
const auto meDatabaseName = localDbName;
const char tsFieldName[] = "ts";
-MONGO_FP_DECLARE(dropPendingCollectionReaperHang);
+MONGO_FAIL_POINT_DEFINE(dropPendingCollectionReaperHang);
// Set this to specify maximum number of times the oplog fetcher will consecutively restart the
// oplog tailing query on non-cancellation errors.
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 5b51a59cbc4..8c6bcf727da 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -89,8 +89,8 @@
namespace mongo {
namespace repl {
-MONGO_FP_DECLARE(stepdownHangBeforePerformingPostMemberStateUpdateActions);
-MONGO_FP_DECLARE(transitionToPrimaryHangBeforeTakingGlobalExclusiveLock);
+MONGO_FAIL_POINT_DEFINE(stepdownHangBeforePerformingPostMemberStateUpdateActions);
+MONGO_FAIL_POINT_DEFINE(transitionToPrimaryHangBeforeTakingGlobalExclusiveLock);
using CallbackArgs = executor::TaskExecutor::CallbackArgs;
using CallbackFn = executor::TaskExecutor::CallbackFn;
@@ -3419,7 +3419,7 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {
return _uncommittedSnapshotsSize.load();
}
-MONGO_FP_DECLARE(disableSnapshotting);
+MONGO_FAIL_POINT_DEFINE(disableSnapshotting);
bool ReplicationCoordinatorImpl::_updateCommittedSnapshot_inlock(
const OpTime& newCommittedSnapshot) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 6f19a1f944e..a8e9187618a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -65,8 +65,8 @@ namespace repl {
namespace {
-MONGO_FP_DECLARE(blockHeartbeatStepdown);
-MONGO_FP_DECLARE(blockHeartbeatReconfigFinish);
+MONGO_FAIL_POINT_DEFINE(blockHeartbeatStepdown);
+MONGO_FAIL_POINT_DEFINE(blockHeartbeatReconfigFinish);
} // namespace
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 0b4ea7df501..ca56b594432 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -71,7 +71,7 @@ namespace repl {
namespace {
-MONGO_FP_DECLARE(impersonateFullyUpgradedFutureVersion);
+MONGO_FAIL_POINT_DEFINE(impersonateFullyUpgradedFutureVersion);
} // namespace
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index 35a98d2d623..9760a15a101 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -44,10 +44,10 @@ namespace repl {
// functionality for rs_rollback_no_uuid.cpp. See SERVER-29766.
// Failpoint which causes rollback to hang before finishing.
-MONGO_FP_DECLARE(rollbackHangBeforeFinish);
+MONGO_FAIL_POINT_DEFINE(rollbackHangBeforeFinish);
// Failpoint which causes rollback to hang and then fail after minValid is written.
-MONGO_FP_DECLARE(rollbackHangThenFailAfterWritingMinValid);
+MONGO_FAIL_POINT_DEFINE(rollbackHangThenFailAfterWritingMinValid);
namespace {
diff --git a/src/mongo/db/repl/roll_back_local_operations.h b/src/mongo/db/repl/roll_back_local_operations.h
index 69e88e5091e..324296a3013 100644
--- a/src/mongo/db/repl/roll_back_local_operations.h
+++ b/src/mongo/db/repl/roll_back_local_operations.h
@@ -47,8 +47,8 @@ namespace repl {
// two separate files, rs_rollback and rs_rollback_no_uuid. However, after
// MongoDB 3.8 is released, we no longer need to maintain rs_rollback_no_uuid
// code and these forward declares can be removed. See SERVER-29766.
-MONGO_FP_FORWARD_DECLARE(rollbackHangBeforeFinish);
-MONGO_FP_FORWARD_DECLARE(rollbackHangThenFailAfterWritingMinValid);
+MONGO_FAIL_POINT_DECLARE(rollbackHangBeforeFinish);
+MONGO_FAIL_POINT_DECLARE(rollbackHangThenFailAfterWritingMinValid);
class RollBackLocalOperations {
MONGO_DISALLOW_COPYING(RollBackLocalOperations);
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 9922aa9b475..c9a54f1e9eb 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -91,7 +91,7 @@ AtomicInt32 SyncTail::replBatchLimitOperations{50 * 1000};
namespace {
-MONGO_FP_DECLARE(pauseBatchApplicationBeforeCompletion);
+MONGO_FAIL_POINT_DEFINE(pauseBatchApplicationBeforeCompletion);
/**
* This variable determines the number of writer threads SyncTail will have. It can be overridden
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index f5898afeea8..eb9d23437f2 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -63,7 +63,7 @@
namespace mongo {
namespace repl {
-MONGO_FP_DECLARE(forceSyncSourceCandidate);
+MONGO_FAIL_POINT_DEFINE(forceSyncSourceCandidate);
const Seconds TopologyCoordinator::VoteLease::leaseTime = Seconds(30);
@@ -74,7 +74,7 @@ MONGO_EXPORT_STARTUP_SERVER_PARAMETER(priorityTakeoverFreshnessWindowSeconds, in
// If this fail point is enabled, TopologyCoordinator::shouldChangeSyncSource() will ignore
// the option TopologyCoordinator::Options::maxSyncSourceLagSecs. The sync source will not be
// re-evaluated if it lags behind another node by more than 'maxSyncSourceLagSecs' seconds.
-MONGO_FP_DECLARE(disableMaxSyncSourceLagSecs);
+MONGO_FAIL_POINT_DEFINE(disableMaxSyncSourceLagSecs);
constexpr Milliseconds TopologyCoordinator::PingStats::UninitializedPingTime;
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index 2e91223c164..c16b4fec09a 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -49,7 +49,7 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(setDropCollDistLockWait);
+MONGO_FAIL_POINT_DEFINE(setDropCollDistLockWait);
/**
* Internal sharding command run on config servers to drop a collection from a database.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index e1e8374c5c3..3350dae2a5c 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -55,7 +55,7 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(migrationCommitVersionError);
+MONGO_FAIL_POINT_DEFINE(migrationCommitVersionError);
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 070b8b0710a..941772e0dac 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -116,7 +116,7 @@ namespace {
using TaskExecutor = executor::TaskExecutor;
using CallbackArgs = TaskExecutor::CallbackArgs;
-MONGO_FP_DECLARE(suspendRangeDeletion);
+MONGO_FAIL_POINT_DEFINE(suspendRangeDeletion);
/**
* Deletes ranges, in background, until done, normally using a task executor attached to the
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index b1545848bd5..6c18f329b17 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -197,15 +197,15 @@ BSONObj createTransferModsRequest(const NamespaceString& nss, const MigrationSes
// Enabling / disabling these fail points pauses / resumes MigrateStatus::_go(), the thread which
// receives a chunk migration from the donor.
-MONGO_FP_DECLARE(migrateThreadHangAtStep1);
-MONGO_FP_DECLARE(migrateThreadHangAtStep2);
-MONGO_FP_DECLARE(migrateThreadHangAtStep3);
-MONGO_FP_DECLARE(migrateThreadHangAtStep4);
-MONGO_FP_DECLARE(migrateThreadHangAtStep5);
-MONGO_FP_DECLARE(migrateThreadHangAtStep6);
-
-MONGO_FP_DECLARE(failMigrationLeaveOrphans);
-MONGO_FP_DECLARE(failMigrationReceivedOutOfRangeOperation);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep1);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep2);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep3);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep4);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep5);
+MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep6);
+
+MONGO_FAIL_POINT_DEFINE(failMigrationLeaveOrphans);
+MONGO_FAIL_POINT_DEFINE(failMigrationReceivedOutOfRangeOperation);
} // namespace
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index e68dc4c443f..97aa70f72d9 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -126,10 +126,10 @@ Status checkCollectionEpochMatches(const ScopedCollectionMetadata& metadata, OID
} // namespace
-MONGO_FP_DECLARE(doNotRefreshRecipientAfterCommit);
-MONGO_FP_DECLARE(failMigrationCommit);
-MONGO_FP_DECLARE(hangBeforeLeavingCriticalSection);
-MONGO_FP_DECLARE(migrationCommitNetworkError);
+MONGO_FAIL_POINT_DEFINE(doNotRefreshRecipientAfterCommit);
+MONGO_FAIL_POINT_DEFINE(failMigrationCommit);
+MONGO_FAIL_POINT_DEFINE(hangBeforeLeavingCriticalSection);
+MONGO_FAIL_POINT_DEFINE(migrationCommitNetworkError);
MigrationSourceManager* MigrationSourceManager::get(CollectionShardingState& css) {
return msmForCss(css);
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 80b31ba76c7..d90b4b2dddf 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -65,13 +65,13 @@ void uassertStatusOKWithWarning(const Status& status) {
}
// Tests can pause and resume moveChunk's progress at each step by enabling/disabling each failpoint
-MONGO_FP_DECLARE(moveChunkHangAtStep1);
-MONGO_FP_DECLARE(moveChunkHangAtStep2);
-MONGO_FP_DECLARE(moveChunkHangAtStep3);
-MONGO_FP_DECLARE(moveChunkHangAtStep4);
-MONGO_FP_DECLARE(moveChunkHangAtStep5);
-MONGO_FP_DECLARE(moveChunkHangAtStep6);
-MONGO_FP_DECLARE(moveChunkHangAtStep7);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep1);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep2);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep3);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep4);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep5);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep6);
+MONGO_FAIL_POINT_DEFINE(moveChunkHangAtStep7);
class MoveChunkCommand : public BasicCommand {
public:
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 57d0a4e1e3e..6290e12e62b 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -94,10 +94,10 @@
namespace mongo {
-MONGO_FP_DECLARE(failCommand);
-MONGO_FP_DECLARE(rsStopGetMore);
-MONGO_FP_DECLARE(respondWithNotPrimaryInCommandDispatch);
-MONGO_FP_DECLARE(skipCheckingForNotMasterInCommandDispatch);
+MONGO_FAIL_POINT_DEFINE(failCommand);
+MONGO_FAIL_POINT_DEFINE(rsStopGetMore);
+MONGO_FAIL_POINT_DEFINE(respondWithNotPrimaryInCommandDispatch);
+MONGO_FAIL_POINT_DEFINE(skipCheckingForNotMasterInCommandDispatch);
namespace {
using logger::LogComponent;
diff --git a/src/mongo/db/service_entry_point_common.h b/src/mongo/db/service_entry_point_common.h
index ac524dbcdc6..1ec084d18b4 100644
--- a/src/mongo/db/service_entry_point_common.h
+++ b/src/mongo/db/service_entry_point_common.h
@@ -39,12 +39,12 @@
namespace mongo {
-MONGO_FP_FORWARD_DECLARE(rsStopGetMore);
-MONGO_FP_FORWARD_DECLARE(respondWithNotPrimaryInCommandDispatch);
+MONGO_FAIL_POINT_DECLARE(rsStopGetMore);
+MONGO_FAIL_POINT_DECLARE(respondWithNotPrimaryInCommandDispatch);
// When active, we won't check if we are master in command dispatch. Activate this if you want to
// test failing during command execution.
-MONGO_FP_FORWARD_DECLARE(skipCheckingForNotMasterInCommandDispatch);
+MONGO_FAIL_POINT_DECLARE(skipCheckingForNotMasterInCommandDispatch);
/**
* Helpers for writing ServiceEntryPointImpl implementations from a reusable core.
diff --git a/src/mongo/db/session.cpp b/src/mongo/db/session.cpp
index 3bdba2ada1b..6e724db8e80 100644
--- a/src/mongo/db/session.cpp
+++ b/src/mongo/db/session.cpp
@@ -286,11 +286,11 @@ void updateSessionEntry(OperationContext* opCtx, const UpdateRequest& updateRequ
// failBeforeCommitExceptionCode (int, default = not specified): If set, the specified exception
// code will be thrown, which will cause the write to not commit; if not specified, the write
// will be allowed to commit.
-MONGO_FP_DECLARE(onPrimaryTransactionalWrite);
+MONGO_FAIL_POINT_DEFINE(onPrimaryTransactionalWrite);
// Failpoint which will pause an operation just after allocating a point-in-time storage engine
// transaction.
-MONGO_FP_DECLARE(hangAfterPreallocateSnapshot);
+MONGO_FAIL_POINT_DEFINE(hangAfterPreallocateSnapshot);
} // namespace
const BSONObj Session::kDeadEndSentinel(BSON("$incompleteOplogHistory" << 1));
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 3f95c013442..daf9a13c659 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -82,7 +82,7 @@ using std::stringstream;
unsigned long long FileAllocator::_uniqueNumber = 0;
static SimpleMutex _uniqueNumberMutex;
-MONGO_FP_DECLARE(allocateDiskFull);
+MONGO_FAIL_POINT_DEFINE(allocateDiskFull);
/**
* Aliases for Win32 CRT functions
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 211db208156..f3127dc56b0 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -67,7 +67,7 @@ using std::stringstream;
// recordNeedsFetch().
static const int kNeedsFetchFailFreq = 2;
static Counter64 needsFetchFailCounter;
-MONGO_FP_DECLARE(recordNeedsFetchFail);
+MONGO_FAIL_POINT_DEFINE(recordNeedsFetchFail);
// Used to make sure the compiler doesn't get too smart on us when we're
// trying to touch records.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index b90916bc823..33d085ca529 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -75,7 +75,7 @@
namespace mongo {
namespace {
-MONGO_FP_DECLARE(WTEmulateOutOfOrderNextIndexKey);
+MONGO_FAIL_POINT_DEFINE(WTEmulateOutOfOrderNextIndexKey);
using std::string;
using std::vector;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 8e91828bf49..e12ece4460e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -1156,7 +1156,7 @@ bool WiredTigerKVEngine::initRsOplogBackgroundThread(StringData ns) {
namespace {
-MONGO_FP_DECLARE(WTPreserveSnapshotHistoryIndefinitely);
+MONGO_FAIL_POINT_DEFINE(WTPreserveSnapshotHistoryIndefinitely);
} // namespace
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 6aee81d30e6..42158b8dabc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -47,7 +47,7 @@ namespace {
const uint64_t kMinimumTimestamp = 1;
} // namespace
-MONGO_FP_DECLARE(WTPausePrimaryOplogDurabilityLoop);
+MONGO_FAIL_POINT_DEFINE(WTPausePrimaryOplogDurabilityLoop);
void WiredTigerOplogManager::start(OperationContext* opCtx,
const std::string& uri,
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp
index cc8d6020c02..853d78e59f1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp
@@ -39,7 +39,7 @@
namespace mongo {
// When set, simulates WT_PREPARE_CONFLICT returned from WiredTiger API calls.
-MONGO_FP_DECLARE(WTPrepareConflictForReads);
+MONGO_FAIL_POINT_DEFINE(WTPrepareConflictForReads);
void wiredTigerPrepareConflictLog(int attempts) {
LOG(1) << "Caught WT_PREPARE_CONFLICT, attempt " << attempts
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
index e77069cf18b..9bb45c928d8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h
@@ -38,7 +38,7 @@
namespace mongo {
// When set, returns simulates returning WT_PREPARE_CONFLICT on WT cursor read operations.
-MONGO_FP_FORWARD_DECLARE(WTPrepareConflictForReads);
+MONGO_FAIL_POINT_DECLARE(WTPrepareConflictForReads);
/**
* Logs a message with the number of prepare conflict retry attempts.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index efaf41c8e58..5754885de2e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -89,8 +89,8 @@ void checkOplogFormatVersion(OperationContext* opCtx, const std::string& uri) {
}
} // namespace
-MONGO_FP_DECLARE(WTWriteConflictException);
-MONGO_FP_DECLARE(WTWriteConflictExceptionForReads);
+MONGO_FAIL_POINT_DEFINE(WTWriteConflictException);
+MONGO_FAIL_POINT_DEFINE(WTWriteConflictExceptionForReads);
const std::string kWiredTigerEngineName = "wiredTiger";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 2ef3e0b9b2a..4d89d2a04fb 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -515,11 +515,11 @@ private:
// WT failpoint to throw write conflict exceptions randomly
-MONGO_FP_FORWARD_DECLARE(WTWriteConflictException);
-MONGO_FP_FORWARD_DECLARE(WTWriteConflictExceptionForReads);
+MONGO_FAIL_POINT_DECLARE(WTWriteConflictException);
+MONGO_FAIL_POINT_DECLARE(WTWriteConflictExceptionForReads);
// Prevents oplog writes from being considered durable on the primary. Once activated, new writes
// will not be considered durable until deactivated. It is unspecified whether writes that commit
// before activation will become visible while active.
-MONGO_FP_FORWARD_DECLARE(WTPausePrimaryOplogDurabilityLoop);
+MONGO_FAIL_POINT_DECLARE(WTPausePrimaryOplogDurabilityLoop);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 1fc3ec265dc..f65eaeb88ed 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -51,7 +51,7 @@ namespace {
// transaction is not prepared. This should always be enabled if WTPrepareConflictForReads is
// used, which fails randomly. If this is not enabled, no prepare conflicts will be resolved,
// because the recovery unit may not ever actually be in a prepared state.
-MONGO_FP_DECLARE(WTAlwaysNotifyPrepareConflictWaiters);
+MONGO_FAIL_POINT_DEFINE(WTAlwaysNotifyPrepareConflictWaiters);
// SnapshotIds need to be globally unique, as they are used in a WorkingSetMember to
// determine if documents changed, but a different recovery unit may be used across a getMore,
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 91e98404a53..b0d0dd25106 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -60,7 +60,7 @@ static Counter64 gleWtimeouts;
static ServerStatusMetricField<Counter64> gleWtimeoutsDisplay("getLastError.wtimeouts",
&gleWtimeouts);
-MONGO_FP_DECLARE(hangBeforeWaitingForWriteConcern);
+MONGO_FAIL_POINT_DEFINE(hangBeforeWaitingForWriteConcern);
bool commandSpecifiesWriteConcern(const BSONObj& cmdObj) {
return cmdObj.hasField(WriteConcernOptions::kWriteConcernField);