summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-09-17 23:22:19 +0000
committerevergreen <evergreen@mongodb.com>2019-09-17 23:22:19 +0000
commitbc11369435ca51e2ff6897433d00f6b909f6a25f (patch)
tree251653ec8285d798b41846e343e7e414e80ff277 /src/mongo/db
parent45aea2495306dd61fab46bd398735bb6aaf7b53a (diff)
downloadmongo-bc11369435ca51e2ff6897433d00f6b909f6a25f.tar.gz
SERVER-42165 Replace uses of stdx::mutex with mongo::Mutex
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp2
-rw-r--r--src/mongo/db/auth/authorization_manager.h4
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp20
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.h8
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp12
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h4
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp8
-rw-r--r--src/mongo/db/background.cpp34
-rw-r--r--src/mongo/db/baton.cpp10
-rw-r--r--src/mongo/db/catalog/collection.cpp8
-rw-r--r--src/mongo/db/catalog/collection.h6
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp46
-rw-r--r--src/mongo/db/catalog/collection_catalog.h6
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp10
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h5
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp6
-rw-r--r--src/mongo/db/catalog/multi_index_block.h4
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/collection_index_builds_tracker.cpp3
-rw-r--r--src/mongo/db/collection_index_builds_tracker.h4
-rw-r--r--src/mongo/db/commands/dbhash.cpp2
-rw-r--r--src/mongo/db/commands/fsync.cpp30
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/parameters.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/validate.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp8
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp6
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp4
-rw-r--r--src/mongo/db/concurrency/deferred_writer.h4
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.cpp6
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.h6
-rw-r--r--src/mongo/db/concurrency/lock_manager.h4
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/curop.cpp5
-rw-r--r--src/mongo/db/database_index_builds_tracker.cpp2
-rw-r--r--src/mongo/db/database_index_builds_tracker.h4
-rw-r--r--src/mongo/db/default_baton.cpp8
-rw-r--r--src/mongo/db/default_baton.h6
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.cpp18
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h2
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp22
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h10
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.h8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.cpp8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.h2
-rw-r--r--src/mongo/db/ftdc/controller.cpp34
-rw-r--r--src/mongo/db/ftdc/controller.h6
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.h3
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp44
-rw-r--r--src/mongo/db/index_builds_coordinator.h6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp6
-rw-r--r--src/mongo/db/keys_collection_cache.cpp10
-rw-r--r--src/mongo/db/keys_collection_cache.h4
-rw-r--r--src/mongo/db/keys_collection_manager.cpp18
-rw-r--r--src/mongo/db/keys_collection_manager.h5
-rw-r--r--src/mongo/db/logical_clock.cpp12
-rw-r--r--src/mongo/db/logical_clock.h4
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp34
-rw-r--r--src/mongo/db/logical_session_cache_impl.h2
-rw-r--r--src/mongo/db/logical_time_validator.cpp22
-rw-r--r--src/mongo/db/logical_time_validator.h7
-rw-r--r--src/mongo/db/operation_context.cpp2
-rw-r--r--src/mongo/db/operation_context.h4
-rw-r--r--src/mongo/db/operation_context_group.cpp10
-rw-r--r--src/mongo/db/operation_context_group.h4
-rw-r--r--src/mongo/db/operation_context_test.cpp54
-rw-r--r--src/mongo/db/operation_time_tracker.cpp6
-rw-r--r--src/mongo/db/operation_time_tracker.h4
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.h4
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h5
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.h8
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp9
-rw-r--r--src/mongo/db/query/plan_cache.cpp20
-rw-r--r--src/mongo/db/query/plan_cache.h4
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings.cpp10
-rw-r--r--src/mongo/db/query/query_settings.h4
-rw-r--r--src/mongo/db/read_concern_mongod.cpp6
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp18
-rw-r--r--src/mongo/db/repl/abstract_async_component.h10
-rw-r--r--src/mongo/db/repl/abstract_async_component_test.cpp14
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.cpp20
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.h6
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h6
-rw-r--r--src/mongo/db/repl/bgsync.cpp34
-rw-r--r--src/mongo/db/repl/bgsync.h6
-rw-r--r--src/mongo/db/repl/callback_completion_guard.h10
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp6
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp34
-rw-r--r--src/mongo/db/repl/collection_cloner.h8
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp18
-rw-r--r--src/mongo/db/repl/database_cloner.cpp8
-rw-r--r--src/mongo/db/repl/database_cloner.h8
-rw-r--r--src/mongo/db/repl/databases_cloner.cpp4
-rw-r--r--src/mongo/db/repl/databases_cloner.h10
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp10
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp12
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.h6
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp58
-rw-r--r--src/mongo/db/repl/initial_syncer.h12
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp10
-rw-r--r--src/mongo/db/repl/local_oplog_info.cpp4
-rw-r--r--src/mongo/db/repl/local_oplog_info.h2
-rw-r--r--src/mongo/db/repl/multiapplier.cpp14
-rw-r--r--src/mongo/db/repl/multiapplier.h6
-rw-r--r--src/mongo/db/repl/noop_writer.cpp10
-rw-r--r--src/mongo/db/repl/noop_writer.h4
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp4
-rw-r--r--src/mongo/db/repl/oplog_applier.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp30
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.cpp22
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.h6
-rw-r--r--src/mongo/db/repl/oplog_test.cpp22
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.cpp26
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp18
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h9
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h7
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp237
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp26
-rw-r--r--src/mongo/db/repl/replication_metrics.cpp66
-rw-r--r--src/mongo/db/repl/replication_metrics.h4
-rw-r--r--src/mongo/db/repl/replication_process.cpp8
-rw-r--r--src/mongo/db/repl/replication_process.h4
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp18
-rw-r--r--src/mongo/db/repl/reporter.cpp26
-rw-r--r--src/mongo/db/repl/reporter.h6
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp7
-rw-r--r--src/mongo/db/repl/rollback_checker.h5
-rw-r--r--src/mongo/db/repl/rollback_checker_test.cpp6
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.h18
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_interface_mock.cpp14
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h4
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp12
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h6
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp16
-rw-r--r--src/mongo/db/repl/sync_source_resolver.h6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp18
-rw-r--r--src/mongo/db/repl/sync_tail.h4
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp8
-rw-r--r--src/mongo/db/repl/task_runner.cpp14
-rw-r--r--src/mongo/db/repl/task_runner.h6
-rw-r--r--src/mongo/db/repl/task_runner_test.cpp56
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1
-rw-r--r--src/mongo/db/repl_index_build_state.h4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp12
-rw-r--r--src/mongo/db/s/active_migrations_registry.h4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp6
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_rename_collection_registry.cpp4
-rw-r--r--src/mongo/db/s/active_rename_collection_registry.h2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp30
-rw-r--r--src/mongo/db/s/balancer/balancer.h6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp22
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h6
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp6
-rw-r--r--src/mongo/db/s/chunk_splitter.h2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp10
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state_factory_shard.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.h6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h4
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/implicit_create_collection.cpp16
-rw-r--r--src/mongo/db/s/metadata_manager.cpp30
-rw-r--r--src/mongo/db/s/metadata_manager.h2
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp28
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp28
-rw-r--r--src/mongo/db/s/migration_destination_manager.h6
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.cpp8
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.h4
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp18
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.h6
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp22
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.h7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp48
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.h8
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.h3
-rw-r--r--src/mongo/db/s/sharding_state.cpp10
-rw-r--r--src/mongo/db/s/sharding_state.h4
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp26
-rw-r--r--src/mongo/db/s/transaction_coordinator.h2
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp20
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.h6
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp16
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h14
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp8
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.h2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.cpp2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.h4
-rw-r--r--src/mongo/db/s/wait_for_majority_service_test.cpp10
-rw-r--r--src/mongo/db/server_recovery.cpp6
-rw-r--r--src/mongo/db/server_recovery.h4
-rw-r--r--src/mongo/db/service_context.cpp15
-rw-r--r--src/mongo/db/service_context.h10
-rw-r--r--src/mongo/db/service_context_test_fixture.cpp1
-rw-r--r--src/mongo/db/service_liaison_mock.cpp16
-rw-r--r--src/mongo/db/service_liaison_mock.h6
-rw-r--r--src/mongo/db/service_liaison_mongod.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongod.h2
-rw-r--r--src/mongo/db/service_liaison_mongos.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongos.h2
-rw-r--r--src/mongo/db/session_catalog.cpp18
-rw-r--r--src/mongo/db/session_catalog.h6
-rw-r--r--src/mongo/db/session_catalog_test.cpp4
-rw-r--r--src/mongo/db/session_killer.cpp8
-rw-r--r--src/mongo/db/session_killer.h8
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/db/sessions_collection_config_server.h4
-rw-r--r--src/mongo/db/sessions_collection_mock.cpp12
-rw-r--r--src/mongo/db/sessions_collection_mock.h4
-rw-r--r--src/mongo/db/sessions_collection_rs.h4
-rw-r--r--src/mongo/db/snapshot_window_util.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.h4
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp8
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.h7
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.cpp12
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.h5
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp20
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp16
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h7
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h2
-rw-r--r--src/mongo/db/storage/flow_control.cpp8
-rw-r--r--src/mongo/db/storage/flow_control.h4
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp12
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h4
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h4
-rw-r--r--src/mongo/db/storage/kv/storage_engine_test.cpp12
-rw-r--r--src/mongo/db/storage/mobile/mobile_kv_engine.h6
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.cpp16
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.h4
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.cpp8
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h6
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp10
-rw-r--r--src/mongo/db/storage/storage_engine_impl.h4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp48
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h9
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp36
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp26
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h8
-rw-r--r--src/mongo/db/time_proof_service.cpp4
-rw-r--r--src/mongo/db/time_proof_service.h4
-rw-r--r--src/mongo/db/traffic_recorder.cpp22
-rw-r--r--src/mongo/db/traffic_recorder.h4
-rw-r--r--src/mongo/db/ttl_collection_cache.cpp6
-rw-r--r--src/mongo/db/ttl_collection_cache.h4
-rw-r--r--src/mongo/db/views/view_catalog.cpp18
-rw-r--r--src/mongo/db/views/view_catalog.h4
286 files changed, 1575 insertions, 1561 deletions
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index b40e48b4ece..cfdcf367452 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -57,7 +57,7 @@
#include "mongo/db/global_settings.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index 19034dee81b..035d797f48d 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -49,8 +49,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 219f07d5cc0..8c94db89c40 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -62,7 +62,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/mongod_options.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -115,7 +115,7 @@ class PinnedUserSetParameter {
public:
void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) const {
BSONArrayBuilder sub(b.subarrayStart(name));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& username : _pinnedUsersList) {
BSONObjBuilder nameObj(sub.subobjStart());
nameObj << AuthorizationManager::USER_NAME_FIELD_NAME << username.getUser()
@@ -138,7 +138,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -171,7 +171,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -183,7 +183,7 @@ public:
}
void setAuthzManager(AuthorizationManager* authzManager) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_authzManager = authzManager;
_authzManager->updatePinnedUsersList(std::move(_pinnedUsersList));
}
@@ -200,7 +200,7 @@ private:
}
AuthorizationManager* _authzManager = nullptr;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PinnedUserSetParameter::_mutex");
std::vector<UserName> _pinnedUsersList;
} authorizationManagerPinnedUsers;
@@ -339,7 +339,7 @@ private:
bool _isThisGuardInFetchPhase;
AuthorizationManagerImpl* _authzManager;
- stdx::unique_lock<stdx::mutex> _cacheLock;
+ stdx::unique_lock<Latch> _cacheLock;
};
AuthorizationManagerImpl::AuthorizationManagerImpl()
@@ -396,7 +396,7 @@ Status AuthorizationManagerImpl::getAuthorizationVersion(OperationContext* opCtx
}
OID AuthorizationManagerImpl::getCacheGeneration() {
- stdx::lock_guard<stdx::mutex> lk(_cacheWriteMutex);
+ stdx::lock_guard<Latch> lk(_cacheWriteMutex);
return _fetchGeneration;
}
@@ -641,7 +641,7 @@ Status AuthorizationManagerImpl::_fetchUserV2(OperationContext* opCtx,
}
void AuthorizationManagerImpl::updatePinnedUsersList(std::vector<UserName> names) {
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
_usersToPin = std::move(names);
bool noUsersToPin = _usersToPin->empty();
_pinnedUsersCond.notify_one();
@@ -664,7 +664,7 @@ void AuthorizationManagerImpl::_pinnedUsersThreadRoutine() noexcept try {
while (true) {
auto opCtx = cc().makeOperationContext();
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
const Milliseconds timeout(authorizationManagerPinnedUsersRefreshIntervalMillis.load());
auto waitRes = opCtx->waitForConditionOrInterruptFor(
_pinnedUsersCond, lk, timeout, [&] { return _usersToPin.has_value(); });
diff --git a/src/mongo/db/auth/authorization_manager_impl.h b/src/mongo/db/auth/authorization_manager_impl.h
index d18b6e7724a..725bb27b7d8 100644
--- a/src/mongo/db/auth/authorization_manager_impl.h
+++ b/src/mongo/db/auth/authorization_manager_impl.h
@@ -50,8 +50,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/invalidating_lru_cache.h"
@@ -232,7 +232,7 @@ private:
InvalidatingLRUCache<UserName, User, UserCacheInvalidator> _userCache;
- stdx::mutex _pinnedUsersMutex;
+ Mutex _pinnedUsersMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_pinnedUsersMutex");
stdx::condition_variable _pinnedUsersCond;
std::once_flag _pinnedThreadTrackerStarted;
boost::optional<std::vector<UserName>> _usersToPin;
@@ -241,7 +241,7 @@ private:
* Protects _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
* via CacheGuard.
*/
- stdx::mutex _cacheWriteMutex;
+ Mutex _cacheWriteMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_cacheWriteMutex");
/**
* Current generation of cached data. Updated every time part of the cache gets
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 311e68ad533..923acc650fa 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -204,7 +204,7 @@ void AuthzManagerExternalStateLocal::resolveUserRoles(mutablebson::Document* use
bool isRoleGraphConsistent = false;
{
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
isRoleGraphConsistent = _roleGraphState == roleGraphStateConsistent;
for (const auto& role : directRoles) {
indirectRoles.insert(role);
@@ -306,7 +306,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescription(
*result = resultDoc.getObject();
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
return _getRoleDescription_inlock(roleName, showPrivileges, showRestrictions, result);
}
@@ -326,7 +326,7 @@ Status AuthzManagerExternalStateLocal::getRolesDescription(
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
BSONArrayBuilder resultBuilder;
for (const RoleName& role : roles) {
BSONObj roleDoc;
@@ -441,7 +441,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(
"Cannot get user fragment for all roles in a database");
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname); it.more(); it.next()) {
if (!showBuiltinRoles && _roleGraph.isBuiltinRole(it.get())) {
continue;
@@ -476,7 +476,7 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
} // namespace
Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
+ stdx::lock_guard<Latch> lkInitialzeRoleGraph(_roleGraphMutex);
_roleGraphState = roleGraphStateInitial;
_roleGraph = RoleGraph();
@@ -562,7 +562,7 @@ private:
void _refreshRoleGraph() {
- stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_externalState->_roleGraphMutex);
Status status = _externalState->_roleGraph.handleLogOp(
_opCtx, _op.c_str(), _nss, _o, _o2 ? &*_o2 : nullptr);
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 31b3869f114..a5965abf3c2 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -37,7 +37,7 @@
#include "mongo/db/auth/role_graph.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user_name.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -161,7 +161,7 @@ private:
/**
* Guards _roleGraphState and _roleGraph.
*/
- stdx::mutex _roleGraphMutex;
+ Mutex _roleGraphMutex = MONGO_MAKE_LATCH("AuthzManagerExternalStateLocal::_roleGraphMutex");
};
} // namespace mongo
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 893cc5ddae7..2b0c89e4f1b 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -43,9 +43,9 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/duration.h"
@@ -63,7 +63,7 @@ public:
void setInterval(Seconds interval) {
{
- stdx::lock_guard<stdx::mutex> twiddle(_mutex);
+ stdx::lock_guard<Latch> twiddle(_mutex);
MONGO_LOG(5) << "setInterval: old=" << _interval << ", new=" << interval;
_interval = interval;
}
@@ -75,7 +75,7 @@ public:
}
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (true) {
Date_t now = Date_t::now();
Date_t expiry = _last + _interval;
@@ -95,7 +95,7 @@ public:
private:
Seconds _interval;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ThreadSleepInterval::_mutex");
stdx::condition_variable _condition;
Date_t _last;
};
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index 0f7b9cd7eea..62afb3e099b 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -35,8 +35,8 @@
#include <string>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/map_util.h"
@@ -56,13 +56,13 @@ public:
void recordBegin();
int recordEnd();
- void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
+ void awaitNoBgOps(stdx::unique_lock<Latch>& lk);
int getOpsInProgCount() const {
return _opsInProgCount;
}
- void waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
private:
int _opsInProgCount;
@@ -75,7 +75,7 @@ typedef StringMap<std::shared_ptr<BgInfo>> BgInfoMap;
typedef BgInfoMap::const_iterator BgInfoMapIterator;
// Static data for this file is never destroyed.
-stdx::mutex& m = *(new stdx::mutex());
+Mutex& m = *(new Mutex());
BgInfoMap& dbsInProg = *(new BgInfoMap());
BgInfoMap& nsInProg = *(new BgInfoMap());
@@ -94,12 +94,12 @@ int BgInfo::recordEnd() {
return _opsInProgCount;
}
-void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
+void BgInfo::awaitNoBgOps(stdx::unique_lock<Latch>& lk) {
while (_opsInProgCount > 0)
_noOpsInProg.wait(lk);
}
-void BgInfo::waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx) {
+void BgInfo::waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx) {
int startOpRemovalsCount = _opRemovalsCount;
// Wait for an index build to finish.
@@ -122,7 +122,7 @@ void recordEndAndRemove(BgInfoMap& bgiMap, StringData key) {
}
}
-void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringData key) {
+void awaitNoBgOps(stdx::unique_lock<Latch>& lk, BgInfoMap* bgiMap, StringData key) {
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(*bgiMap, key, std::shared_ptr<BgInfo>());
if (!bgInfo)
return;
@@ -132,7 +132,7 @@ void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringD
} // namespace
void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx, StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(nsInProg, ns, std::shared_ptr<BgInfo>());
if (!bgInfo) {
// There are no index builds in progress on the collection, so no need to wait.
@@ -142,12 +142,12 @@ void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx,
}
bool BackgroundOperation::inProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return dbsInProg.find(db) != dbsInProg.end();
}
int BackgroundOperation::numInProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(dbsInProg, db, std::shared_ptr<BgInfo>());
if (!bgInfo)
return 0;
@@ -155,7 +155,7 @@ int BackgroundOperation::numInProgForDb(StringData db) {
}
bool BackgroundOperation::inProgForNs(StringData ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return nsInProg.find(ns) != nsInProg.end();
}
@@ -189,29 +189,29 @@ void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
}
void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &dbsInProg, db);
}
void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &nsInProg, ns);
}
BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordBeginAndInsert(dbsInProg, _ns.db());
recordBeginAndInsert(nsInProg, _ns.ns());
}
BackgroundOperation::~BackgroundOperation() {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordEndAndRemove(dbsInProg, _ns.db());
recordEndAndRemove(nsInProg, _ns.ns());
}
void BackgroundOperation::dump(std::ostream& ss) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
if (nsInProg.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for (BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i)
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index f648c3e13ed..937a8a61664 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/baton.h"
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -81,7 +81,7 @@ public:
}
_baton->schedule([this, anchor = shared_from_this()](Status status) {
- _runJobs(stdx::unique_lock(_mutex), status);
+ _runJobs(stdx::unique_lock<Latch>(_mutex), status);
});
}
@@ -114,14 +114,14 @@ public:
}
void detachImpl() noexcept override {
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isDead = true;
_runJobs(std::move(lk), kDetached);
}
private:
- void _runJobs(stdx::unique_lock<stdx::mutex> lk, Status status) {
+ void _runJobs(stdx::unique_lock<Latch> lk, Status status) {
if (status.isOK() && _isDead) {
status = kDetached;
}
@@ -140,7 +140,7 @@ private:
BatonHandle _baton;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SubBaton::_mutex");
bool _isDead = false;
std::vector<Task> _scheduled;
};
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 0d86d5c1572..f46d636240f 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -42,13 +42,13 @@ namespace mongo {
//
void CappedInsertNotifier::notifyAll() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
++_version;
_notifier.notify_all();
}
void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_dead && prevVersion == _version) {
if (stdx::cv_status::timeout == _notifier.wait_until(lk, deadline.toSystemTimePoint())) {
return;
@@ -57,13 +57,13 @@ void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) cons
}
void CappedInsertNotifier::kill() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dead = true;
_notifier.notify_all();
}
bool CappedInsertNotifier::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _dead;
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 4247900f0f4..ee12d90656d 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -51,8 +51,8 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/snapshot.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/decorable.h"
namespace mongo {
@@ -136,7 +136,7 @@ private:
mutable stdx::condition_variable _notifier;
// Mutex used with '_notifier'. Protects access to '_version'.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CappedInsertNotifier::_mutex");
// A counter, incremented on insertion of new data into the capped collection.
//
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 0513e37b9cb..4e7dc82f9a5 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -73,7 +73,7 @@ CollectionCatalog::iterator::iterator(StringData dbName,
: _dbName(dbName), _genNum(genNum), _catalog(&catalog) {
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_mapIter = _catalog->_orderedCollections.lower_bound(std::make_pair(_dbName, minUuid));
if (_mapIter != _catalog->_orderedCollections.end() && _mapIter->first.first == _dbName) {
@@ -86,7 +86,7 @@ CollectionCatalog::iterator::iterator(
: _mapIter(mapIter) {}
const CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_repositionIfNeeded();
if (_exhausted()) {
return _nullCollection;
@@ -100,7 +100,7 @@ boost::optional<CollectionUUID> CollectionCatalog::iterator::uuid() {
}
CollectionCatalog::iterator CollectionCatalog::iterator::operator++() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (!_repositionIfNeeded()) {
_mapIter++; // If the position was not updated, increment iterator to next element.
@@ -125,7 +125,7 @@ CollectionCatalog::iterator CollectionCatalog::iterator::operator++(int) {
}
bool CollectionCatalog::iterator::operator==(const iterator& other) {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (other._mapIter == _catalog->_orderedCollections.end()) {
return _uuid == boost::none;
@@ -183,7 +183,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
// manager locks) are held. The purpose of this function is ensure that we write to the
// Collection's namespace string under '_catalogLock'.
invariant(coll);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(toCollection);
@@ -197,7 +197,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
addResource(newRid, toCollection.ns());
opCtx->recoveryUnit()->onRollback([this, coll, fromCollection, toCollection] {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(std::move(fromCollection));
_collections[fromCollection] = _collections[toCollection];
@@ -219,7 +219,7 @@ void CollectionCatalog::onCloseDatabase(OperationContext* opCtx, std::string dbN
void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(!_shadowCatalog);
_shadowCatalog.emplace();
for (auto& entry : _catalog)
@@ -228,13 +228,13 @@ void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
void CollectionCatalog::onOpenCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_shadowCatalog);
_shadowCatalog.reset();
}
Collection* CollectionCatalog::lookupCollectionByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
return _lookupCollectionByUUID(lock, uuid);
}
@@ -244,13 +244,13 @@ Collection* CollectionCatalog::_lookupCollectionByUUID(WithLock, CollectionUUID
}
Collection* CollectionCatalog::lookupCollectionByNamespace(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto it = _collections.find(nss);
return it == _collections.end() ? nullptr : it->second;
}
boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto foundIt = _catalog.find(uuid);
if (foundIt != _catalog.end()) {
NamespaceString ns = foundIt->second->ns();
@@ -271,7 +271,7 @@ boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUU
boost::optional<CollectionUUID> CollectionCatalog::lookupUUIDByNSS(
const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(nss.db().toString(), minUuid));
@@ -312,7 +312,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
CollectionInfoFn predicate) const {
invariant(predicate);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto collection = _lookupCollectionByUUID(lock, uuid);
if (!collection) {
@@ -324,7 +324,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
std::vector<CollectionUUID> CollectionCatalog::getAllCollectionUUIDsFromDb(
StringData dbName) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(dbName.toString(), minUuid));
@@ -340,7 +340,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
OperationContext* opCtx, StringData dbName) const {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_S));
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
std::vector<NamespaceString> ret;
@@ -354,7 +354,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
std::vector<std::string> CollectionCatalog::getAllDbNames() const {
std::vector<std::string> ret;
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto maxUuid = UUID::parse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").getValue();
auto iter = _orderedCollections.upper_bound(std::make_pair("", maxUuid));
while (iter != _orderedCollections.end()) {
@@ -366,7 +366,7 @@ std::vector<std::string> CollectionCatalog::getAllDbNames() const {
}
void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<Collection> coll) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(1) << "Registering collection " << coll->ns() << " with UUID " << uuid;
@@ -391,7 +391,7 @@ void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<
}
std::unique_ptr<Collection> CollectionCatalog::deregisterCollection(CollectionUUID uuid) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_catalog.find(uuid) != _catalog.end());
@@ -426,7 +426,7 @@ std::unique_ptr<RecoveryUnit::Change> CollectionCatalog::makeFinishDropCollectio
}
void CollectionCatalog::deregisterAllCollections() {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(0) << "Deregistering all the collections";
for (auto& entry : _catalog) {
@@ -444,7 +444,7 @@ void CollectionCatalog::deregisterAllCollections() {
_orderedCollections.clear();
_catalog.clear();
- stdx::lock_guard<stdx::mutex> resourceLock(_resourceLock);
+ stdx::lock_guard<Latch> resourceLock(_resourceLock);
_resourceInformation.clear();
_generationNumber++;
@@ -460,7 +460,7 @@ CollectionCatalog::iterator CollectionCatalog::end() const {
boost::optional<std::string> CollectionCatalog::lookupResourceName(const ResourceId& rid) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -480,7 +480,7 @@ boost::optional<std::string> CollectionCatalog::lookupResourceName(const Resourc
void CollectionCatalog::removeResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -498,7 +498,7 @@ void CollectionCatalog::removeResource(const ResourceId& rid, const std::string&
void CollectionCatalog::addResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index e54c6141a13..66a12a92ae5 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -248,8 +248,8 @@ private:
Collection* _lookupCollectionByUUID(WithLock, CollectionUUID uuid) const;
const std::vector<CollectionUUID>& _getOrdering_inlock(const StringData& db,
- const stdx::lock_guard<stdx::mutex>&);
- mutable mongo::stdx::mutex _catalogLock;
+ const stdx::lock_guard<Latch>&);
+ mutable mongo::Mutex _catalogLock;
/**
* When present, indicates that the catalog is in closed state, and contains a map from UUID
@@ -273,7 +273,7 @@ private:
uint64_t _generationNumber;
// Protects _resourceInformation.
- mutable stdx::mutex _resourceLock;
+ mutable Mutex _resourceLock = MONGO_MAKE_LATCH("CollectionCatalog::_resourceLock");
// Mapping from ResourceId to a set of strings that contains collection and database namespaces.
std::map<ResourceId, std::set<std::string>> _resourceInformation;
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 2720ffd09c7..56493b64610 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -253,7 +253,7 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
}
bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -270,7 +270,7 @@ bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::strin
bool IndexBuildsManager::interruptIndexBuild(OperationContext* opCtx,
const UUID& buildUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -305,14 +305,14 @@ void IndexBuildsManager::verifyNoIndexBuilds_forTestOnly() {
}
void IndexBuildsManager::_registerIndexBuild(UUID buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::shared_ptr<MultiIndexBlock> mib = std::make_shared<MultiIndexBlock>();
invariant(_builders.insert(std::make_pair(buildUUID, mib)).second);
}
void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
@@ -320,7 +320,7 @@ void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
}
std::shared_ptr<MultiIndexBlock> IndexBuildsManager::_getBuilder(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
return builderIt->second;
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index 21678546061..55f2fe73211 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -36,7 +36,7 @@
#include "mongo/db/catalog/multi_index_block.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -205,7 +205,7 @@ private:
std::shared_ptr<MultiIndexBlock> _getBuilder(const UUID& buildUUID);
// Protects the map data structures below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsManager::_mutex");
// Map of index builders by build UUID. Allows access to the builders so that actions can be
// taken on and information passed to and from index builds.
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index f4d55c60880..073b4d1b8dc 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -40,7 +40,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/debug_util.h"
namespace mongo {
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 69da31c0ae2..0e4796ca6c4 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -74,7 +74,7 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
_isReady = _catalogIsReady(opCtx);
{
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
_isMultikey.store(_catalogIsMultikey(opCtx, &_indexMultikeyPaths));
_indexTracksPathLevelMultikeyInfo = !_indexMultikeyPaths.empty();
}
@@ -148,7 +148,7 @@ bool IndexCatalogEntryImpl::isMultikey() const {
}
MultikeyPaths IndexCatalogEntryImpl::getMultikeyPaths(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
return _indexMultikeyPaths;
}
@@ -173,7 +173,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
}
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
invariant(multikeyPaths.size() == _indexMultikeyPaths.size());
bool newPathIsMultikey = false;
@@ -241,7 +241,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
_isMultikey.store(true);
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
for (size_t i = 0; i < multikeyPaths.size(); ++i) {
_indexMultikeyPaths[i].insert(multikeyPaths[i].begin(), multikeyPaths[i].end());
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index df7f053537f..5ec961f65af 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -41,7 +41,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -223,7 +223,8 @@ private:
// Controls concurrent access to '_indexMultikeyPaths'. We acquire this mutex rather than the
// RESOURCE_METADATA lock as a performance optimization so that it is cheaper to detect whether
// there is actually any path-level multikey information to update or not.
- mutable stdx::mutex _indexMultikeyPathsMutex;
+ mutable Mutex _indexMultikeyPathsMutex =
+ MONGO_MAKE_LATCH("IndexCatalogEntryImpl::_indexMultikeyPathsMutex");
// Non-empty only if '_indexTracksPathLevelMultikeyInfo' is true.
//
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index ee75f9ed64e..10c4fe6d485 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -884,18 +884,18 @@ MultiIndexBlock::State MultiIndexBlock::getState_forTest() const {
}
MultiIndexBlock::State MultiIndexBlock::_getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void MultiIndexBlock::_setState(State newState) {
invariant(State::kAborted != newState);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = newState;
}
void MultiIndexBlock::_setStateToAbortedIfNotCommitted(StringData reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (State::kCommitted == _state) {
return;
}
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 69aa9cd4a34..df940d2121e 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -46,7 +46,7 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/record_id.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/fail_point_service.h"
namespace mongo {
@@ -344,7 +344,7 @@ private:
bool _constraintsChecked = false;
// Protects member variables of this class declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiIndexBlock::_mutex");
State _state = State::kUninitialized;
std::string _abortReason;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index c449932f653..e6966e30ce3 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -39,7 +39,7 @@
#include <boost/align/aligned_allocator.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/with_alignment.h"
diff --git a/src/mongo/db/collection_index_builds_tracker.cpp b/src/mongo/db/collection_index_builds_tracker.cpp
index 17f62d1a644..40b4f292c27 100644
--- a/src/mongo/db/collection_index_builds_tracker.cpp
+++ b/src/mongo/db/collection_index_builds_tracker.cpp
@@ -104,8 +104,7 @@ int CollectionIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _buildStateByBuildUUID.size();
}
-void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(
- stdx::unique_lock<stdx::mutex>& lk) {
+void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] {
if (_buildStateByBuildUUID.empty()) {
return true;
diff --git a/src/mongo/db/collection_index_builds_tracker.h b/src/mongo/db/collection_index_builds_tracker.h
index 5a4ab1eb5f9..8d43dbd96bc 100644
--- a/src/mongo/db/collection_index_builds_tracker.h
+++ b/src/mongo/db/collection_index_builds_tracker.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/repl_index_build_state.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/uuid.h"
@@ -96,7 +96,7 @@ public:
/**
* Returns when no index builds remain on this collection.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Maps of index build states on the collection, by build UUID and index name.
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index a8ac6e5c022..751ac181371 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -50,7 +50,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
#include "mongo/util/net/socket_utils.h"
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 66535156467..45fde032004 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -49,7 +49,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/backup_cursor_hooks.h"
#include "mongo/db/storage/storage_engine.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/background.h"
#include "mongo/util/exit.h"
@@ -95,7 +95,7 @@ public:
virtual ~FSyncCommand() {
// The FSyncLockThread is owned by the FSyncCommand and accesses FsyncCommand state. It must
// be shut down prior to FSyncCommand destruction.
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
if (_lockCount > 0) {
_lockCount = 0;
releaseFsyncLockSyncCV.notify_one();
@@ -166,7 +166,7 @@ public:
Status status = Status::OK();
{
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
threadStatus = Status::OK();
threadStarted = false;
_lockThread = std::make_unique<FSyncLockThread>(allowFsyncFailure);
@@ -199,13 +199,13 @@ public:
// Returns whether we are currently fsyncLocked. For use by callers not holding lockStateMutex.
bool fsyncLocked() {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
return _fsyncLocked;
}
// For callers not already holding 'lockStateMutex'.
int64_t getLockCount() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
return getLockCount_inLock();
}
@@ -215,17 +215,17 @@ public:
}
void releaseLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
releaseLock_inLock(lk);
}
- void releaseLock_inLock(stdx::unique_lock<stdx::mutex>& lk) {
+ void releaseLock_inLock(stdx::unique_lock<Latch>& lk) {
invariant(_lockCount >= 1);
_lockCount--;
if (_lockCount == 0) {
{
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = false;
}
releaseFsyncLockSyncCV.notify_one();
@@ -237,7 +237,7 @@ public:
// Allows for control of lock state change between the fsyncLock and fsyncUnlock commands and
// the FSyncLockThread that maintains the global read lock.
- stdx::mutex lockStateMutex;
+ Mutex lockStateMutex = MONGO_MAKE_LATCH("FSyncCommand::lockStateMutex");
stdx::condition_variable acquireFsyncLockSyncCV;
stdx::condition_variable releaseFsyncLockSyncCV;
@@ -248,11 +248,11 @@ public:
private:
void acquireLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
_lockCount++;
if (_lockCount == 1) {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = true;
}
}
@@ -263,7 +263,7 @@ private:
// number is decremented to 0. May only be accessed while 'lockStateMutex' is held.
int64_t _lockCount = 0;
- stdx::mutex _fsyncLockedMutex;
+ Mutex _fsyncLockedMutex = MONGO_MAKE_LATCH("FSyncCommand::_fsyncLockedMutex");
bool _fsyncLocked = false;
} fsyncCmd;
@@ -302,7 +302,7 @@ public:
Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
auto lockCount = fsyncCmd.getLockCount_inLock();
if (lockCount == 0) {
@@ -340,7 +340,7 @@ bool FSyncLockThread::_shutdownTaskRegistered = false;
void FSyncLockThread::run() {
ThreadClient tc("fsyncLockWorker", getGlobalServiceContext());
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
- stdx::unique_lock<stdx::mutex> lk(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> lk(fsyncCmd.lockStateMutex);
invariant(fsyncCmd.getLockCount_inLock() == 1);
@@ -357,7 +357,7 @@ void FSyncLockThread::run() {
if (!_shutdownTaskRegistered) {
_shutdownTaskRegistered = true;
registerShutdownTask([&] {
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
if (fsyncCmd.getLockCount_inLock() > 0) {
warning() << "Interrupting fsync because the server is shutting down.";
while (fsyncCmd.getLockCount_inLock()) {
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 3319caa2fcf..80045f62d90 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -64,6 +64,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/parallel.h"
#include "mongo/s/client/shard_connection.h"
@@ -72,7 +73,6 @@
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/stale_exception.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index 4f2306ad1b2..625b3aaee76 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -185,7 +185,7 @@ Status setLogComponentVerbosity(const BSONObj& bsonSettings) {
}
// for automationServiceDescription
-stdx::mutex autoServiceDescriptorMutex;
+Mutex autoServiceDescriptorMutex;
std::string autoServiceDescriptorValue;
} // namespace
@@ -436,7 +436,7 @@ Status LogComponentVerbosityServerParameter::setFromString(const std::string& st
void AutomationServiceDescriptorServerParameter::append(OperationContext*,
BSONObjBuilder& builder,
const std::string& name) {
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
if (!autoServiceDescriptorValue.empty()) {
builder.append(name, autoServiceDescriptorValue);
}
@@ -458,7 +458,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
<< " must be no more than " << kMaxSize << " bytes"};
{
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
autoServiceDescriptorValue = str;
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index ae199964060..90bec146f42 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -67,9 +67,9 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/icu.h"
#include "mongo/util/log.h"
@@ -558,7 +558,7 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* opCtx,
return status;
}
-auto getUMCMutex = ServiceContext::declareDecoration<stdx::mutex>();
+auto getUMCMutex = ServiceContext::declareDecoration<Mutex>();
class AuthzLockGuard {
AuthzLockGuard(AuthzLockGuard&) = delete;
@@ -590,7 +590,7 @@ public:
private:
OperationContext* _opCtx;
AuthorizationManager* _authzManager;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
InvalidationMode _mode;
OID _cacheGeneration;
};
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index f90a3ace4fe..482baff8b74 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -50,7 +50,7 @@ MONGO_FAIL_POINT_DEFINE(validateCmdCollectionNotValid);
namespace {
// Protects the state below.
-stdx::mutex _validationMutex;
+Mutex _validationMutex;
// Holds the set of full `databaseName.collectionName` namespace strings in progress. Validation
// commands register themselves in this data structure so that subsequent commands on the same
@@ -140,7 +140,7 @@ public:
// Only one validation per collection can be in progress, the rest wait.
{
- stdx::unique_lock<stdx::mutex> lock(_validationMutex);
+ stdx::unique_lock<Latch> lock(_validationMutex);
try {
while (_validationsInProgress.find(nss.ns()) != _validationsInProgress.end()) {
opCtx->waitForConditionOrInterrupt(_validationNotifier, lock);
@@ -157,7 +157,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lock(_validationMutex);
+ stdx::lock_guard<Latch> lock(_validationMutex);
_validationsInProgress.erase(nss.ns());
_validationNotifier.notify_all();
});
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 5daed970104..28d5017f6ae 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/concurrency/flow_control_ticketholder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -77,7 +77,7 @@ public:
}
static std::string nameForId(ResourceId resourceId) {
- stdx::lock_guard<stdx::mutex> lk(resourceIdFactory->labelsMutex);
+ stdx::lock_guard<Latch> lk(resourceIdFactory->labelsMutex);
return resourceIdFactory->labels.at(resourceId.getHashId());
}
@@ -93,7 +93,7 @@ public:
private:
ResourceId _newResourceIdForMutex(std::string resourceLabel) {
- stdx::lock_guard<stdx::mutex> lk(labelsMutex);
+ stdx::lock_guard<Latch> lk(labelsMutex);
invariant(nextId == labels.size());
labels.push_back(std::move(resourceLabel));
@@ -104,7 +104,7 @@ private:
std::uint64_t nextId = 0;
std::vector<std::string> labels;
- stdx::mutex labelsMutex;
+ Mutex labelsMutex = MONGO_MAKE_LATCH("ResourceIdFactory::labelsMutex");
};
ResourceIdFactory* ResourceIdFactory::resourceIdFactory;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index 95c6771badf..a13df7a3ea4 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
#include "mongo/db/storage/recovery_unit_noop.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -67,10 +67,10 @@ protected:
};
BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) {
- static stdx::mutex mtx;
+ static auto mtx = MONGO_MAKE_LATCH();
for (auto keepRunning : state) {
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
}
}
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index 6f7c7df6ea5..4bedbe1995c 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -118,7 +118,7 @@ void DeferredWriter::_worker(InsertStatement stmt) {
return Status::OK();
});
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_numBytes -= stmt.doc.objsize();
@@ -166,7 +166,7 @@ bool DeferredWriter::insertDocument(BSONObj obj) {
// We can't insert documents if we haven't been started up.
invariant(_pool);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Check if we're allowed to insert this object.
if (_numBytes + obj.objsize() >= _maxNumBytes) {
diff --git a/src/mongo/db/concurrency/deferred_writer.h b/src/mongo/db/concurrency/deferred_writer.h
index d573f497851..0ac8238fa8d 100644
--- a/src/mongo/db/concurrency/deferred_writer.h
+++ b/src/mongo/db/concurrency/deferred_writer.h
@@ -32,7 +32,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -158,7 +158,7 @@ private:
/**
* Guards all non-const, non-thread-safe members.
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DeferredWriter::_mutex");
/**
* The number of bytes currently in the in-memory buffer.
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.cpp b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
index 8055a7597bd..6bb95797502 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.cpp
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
@@ -80,7 +80,7 @@ void FlowControlTicketholder::set(ServiceContext* service,
void FlowControlTicketholder::refreshTo(int numTickets) {
invariant(numTickets >= 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LOG(4) << "Refreshing tickets. Before: " << _tickets << " Now: " << numTickets;
_tickets = numTickets;
_cv.notify_all();
@@ -88,7 +88,7 @@ void FlowControlTicketholder::refreshTo(int numTickets) {
void FlowControlTicketholder::getTicket(OperationContext* opCtx,
FlowControlTicketholder::CurOp* stats) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -135,7 +135,7 @@ void FlowControlTicketholder::getTicket(OperationContext* opCtx,
// Should only be called once, during shutdown.
void FlowControlTicketholder::setInShutdown() {
LOG(0) << "Stopping further Flow Control ticket acquisitions.";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
}
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.h b/src/mongo/db/concurrency/flow_control_ticketholder.h
index 599779ddd15..39413477937 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.h
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.h
@@ -31,8 +31,8 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _totalTimeAcquiringMicros;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FlowControlTicketHolder::_mutex");
stdx::condition_variable _cv;
int _tickets;
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index 50b2116d953..e8cbfd39054 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -40,8 +40,8 @@
#include "mongo/db/concurrency/lock_request_list.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/mutex.h"
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index fd840437c89..5f5171e6129 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -220,7 +220,7 @@ void CondVarLockGrantNotification::clear() {
}
LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _cond.wait_for(
lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; })
? _result
@@ -229,7 +229,7 @@ LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseconds timeout) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opCtx->waitForConditionOrInterruptFor(
_cond, lock, timeout, [this] { return _result != LOCK_INVALID; })) {
// Because waitForConditionOrInterruptFor evaluates the predicate before checking for
@@ -243,7 +243,7 @@ LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseco
}
void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_result == LOCK_INVALID);
_result = result;
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 9371ba0ae13..9994e25f7be 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -75,7 +75,7 @@ private:
virtual void notify(ResourceId resId, LockResult result);
// These two go together to implement the conditional variable pattern.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CondVarLockGrantNotification::_mutex");
stdx::condition_variable _cond;
// Result from the last call to notify
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index cef1fcd7449..945dbfc9bdb 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -305,12 +305,11 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx,
CurOp::get(clientOpCtx)->reportState(infoBuilder, truncateOps);
}
- std::shared_ptr<DiagnosticInfo> diagnostic = DiagnosticInfo::Diagnostic::get(client);
- if (diagnostic && backtraceMode) {
+ if (auto diagnostic = DiagnosticInfo::get(*client)) {
BSONObjBuilder waitingForLatchBuilder(infoBuilder->subobjStart("waitingForLatch"));
waitingForLatchBuilder.append("timestamp", diagnostic->getTimestamp());
waitingForLatchBuilder.append("captureName", diagnostic->getCaptureName());
- {
+ if (backtraceMode) {
BSONArrayBuilder backtraceBuilder(waitingForLatchBuilder.subarrayStart("backtrace"));
for (const auto& frame : diagnostic->makeStackTrace().frames) {
BSONObjBuilder backtraceObj(backtraceBuilder.subobjStart());
diff --git a/src/mongo/db/database_index_builds_tracker.cpp b/src/mongo/db/database_index_builds_tracker.cpp
index 03097a4844a..4f7bb13a472 100644
--- a/src/mongo/db/database_index_builds_tracker.cpp
+++ b/src/mongo/db/database_index_builds_tracker.cpp
@@ -74,7 +74,7 @@ int DatabaseIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _allIndexBuilds.size();
}
-void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk) {
+void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] {
if (_allIndexBuilds.empty()) {
return true;
diff --git a/src/mongo/db/database_index_builds_tracker.h b/src/mongo/db/database_index_builds_tracker.h
index b91ab826527..372f5f08210 100644
--- a/src/mongo/db/database_index_builds_tracker.h
+++ b/src/mongo/db/database_index_builds_tracker.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/repl_index_build_state.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/uuid.h"
@@ -87,7 +87,7 @@ public:
/**
* Returns when no index builds remain on this database.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Map of index build states on the database, by build UUID.
diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp
index 8ae455226cf..cd9332cb92e 100644
--- a/src/mongo/db/default_baton.cpp
+++ b/src/mongo/db/default_baton.cpp
@@ -61,7 +61,7 @@ void DefaultBaton::detachImpl() noexcept {
decltype(_scheduled) scheduled;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_opCtx->getBaton().get() == this);
_opCtx->setBaton(nullptr);
@@ -79,7 +79,7 @@ void DefaultBaton::detachImpl() noexcept {
}
void DefaultBaton::schedule(Task func) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
lk.unlock();
@@ -97,14 +97,14 @@ void DefaultBaton::schedule(Task func) noexcept {
}
void DefaultBaton::notify() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_notified = true;
_cv.notify_one();
}
Waitable::TimeoutState DefaultBaton::run_until(ClockSource* clkSource,
Date_t oldDeadline) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
const auto guard = makeGuard([&] {
diff --git a/src/mongo/db/default_baton.h b/src/mongo/db/default_baton.h
index 063b12edd07..c406741e332 100644
--- a/src/mongo/db/default_baton.h
+++ b/src/mongo/db/default_baton.h
@@ -32,8 +32,8 @@
#include <vector>
#include "mongo/db/baton.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/functional.h"
namespace mongo {
@@ -62,7 +62,7 @@ public:
private:
void detachImpl() noexcept override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DefaultBaton::_mutex");
stdx::condition_variable _cv;
bool _notified = false;
bool _sleeping = false;
diff --git a/src/mongo/db/free_mon/free_mon_controller.cpp b/src/mongo/db/free_mon/free_mon_controller.cpp
index 057ae5ecd7a..a9de7ca4c49 100644
--- a/src/mongo/db/free_mon/free_mon_controller.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller.cpp
@@ -61,7 +61,7 @@ FreeMonNetworkInterface::~FreeMonNetworkInterface() = default;
void FreeMonController::addRegistrationCollector(
std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_registrationCollectors.add(std::move(collector));
@@ -70,7 +70,7 @@ void FreeMonController::addRegistrationCollector(
void FreeMonController::addMetricsCollector(std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_metricCollectors.add(std::move(collector));
@@ -128,7 +128,7 @@ void FreeMonController::notifyOnRollback() {
void FreeMonController::_enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -139,7 +139,7 @@ void FreeMonController::start(RegistrationType registrationType,
std::vector<std::string>& tags,
Seconds gatherMetricsInterval) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
}
@@ -154,7 +154,7 @@ void FreeMonController::start(RegistrationType registrationType,
_thread = stdx::thread([this] { _processor->run(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -170,7 +170,7 @@ void FreeMonController::stop() {
log() << "Shutting down free monitoring";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -194,7 +194,7 @@ void FreeMonController::stop() {
void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -205,7 +205,7 @@ void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
@@ -218,7 +218,7 @@ void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* statu
void FreeMonController::getServerStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 9307ab7570c..5c74a8a4b5f 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -191,7 +191,7 @@ private:
State _state{State::kNotStarted};
// Mutext to protect internal state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonController::_mutex");
// Set of registration collectors
FreeMonCollectorCollection _registrationCollectors;
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 090eed90cc9..4db1bfa3c44 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -86,7 +86,7 @@ public:
builder.append("mock", "some data");
{
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
++_counter;
@@ -105,12 +105,12 @@ public:
}
std::uint32_t count() {
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
return _counter;
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -130,7 +130,7 @@ private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMetricsCollectorMock::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -158,7 +158,7 @@ public:
* Set the count of events to wait for.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_EQ(_count, 0UL);
ASSERT_GT(count, 0UL);
@@ -170,7 +170,7 @@ public:
* Set the payload and signal waiter.
*/
void set(T payload) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -187,7 +187,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<T> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(
lock, duration.toSystemDuration(), [this]() { return _count == 0; })) {
@@ -202,7 +202,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CountdownLatchResult::_mutex");
// Count to wait fore
uint32_t _count;
@@ -309,7 +309,7 @@ public:
auto cdr = req.getMetrics();
{
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
auto metrics = decompressMetrics(cdr);
_lastMetrics = metrics;
_countdownMetrics.set(metrics);
@@ -354,7 +354,7 @@ public:
}
BSONArray getLastMetrics() {
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
return _lastMetrics;
}
@@ -365,7 +365,7 @@ private:
executor::ThreadPoolTaskExecutor* _threadPool;
- stdx::mutex _metricsLock;
+ Mutex _metricsLock = MONGO_MAKE_LATCH("FreeMonNetworkInterfaceMock::_metricsLock");
BSONArray _lastMetrics;
Options _options;
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 71a34dd84b4..11fab7c8501 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -33,8 +33,8 @@
#include <vector>
#include "mongo/db/free_mon/free_mon_protocol_gen.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -292,7 +292,7 @@ public:
* Set Status and signal waiter.
*/
void set(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_set);
if (!_set) {
@@ -308,7 +308,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<Status> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(lock, duration.toSystemDuration(), [this]() { return _set; })) {
return {};
@@ -322,7 +322,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitableResult::_mutex");
// Indicates whether _status has been set
bool _set{false};
diff --git a/src/mongo/db/free_mon/free_mon_processor.h b/src/mongo/db/free_mon/free_mon_processor.h
index 11584595147..ab519bfb84d 100644
--- a/src/mongo/db/free_mon/free_mon_processor.h
+++ b/src/mongo/db/free_mon/free_mon_processor.h
@@ -235,7 +235,7 @@ public:
* Reset countdown latch wait for N events.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
dassert(_count == 0);
dassert(count > 0);
_count = count;
@@ -245,7 +245,7 @@ public:
* Count down an event.
*/
void countDown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -259,13 +259,13 @@ public:
* Wait until the N events specified in reset have occured.
*/
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.wait(lock, [&] { return _count == 0; });
}
private:
// mutex to break count and cond var
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonCountdownLatch::_mutex");
// cond var to signal and wait on
stdx::condition_variable _condvar;
diff --git a/src/mongo/db/free_mon/free_mon_queue.cpp b/src/mongo/db/free_mon/free_mon_queue.cpp
index d2cc1115ef5..56b01eade93 100644
--- a/src/mongo/db/free_mon/free_mon_queue.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue.cpp
@@ -74,7 +74,7 @@ FreeMonMessage::~FreeMonMessage() {}
void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If we were stopped, drop messages
if (_stop) {
@@ -98,7 +98,7 @@ void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
ClockSource* clockSource) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stop) {
return {};
}
@@ -188,7 +188,7 @@ boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
void FreeMonMessageQueue::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// We can be stopped twice in some situations:
// 1. Stop on unexpected error
@@ -204,7 +204,7 @@ void FreeMonMessageQueue::turnCrankForTest(size_t countMessagesToIgnore) {
invariant(_useCrank);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_waitable = std::make_unique<WaitableResult>();
diff --git a/src/mongo/db/free_mon/free_mon_queue.h b/src/mongo/db/free_mon/free_mon_queue.h
index 18be1b7a330..6e7bb85dcbf 100644
--- a/src/mongo/db/free_mon/free_mon_queue.h
+++ b/src/mongo/db/free_mon/free_mon_queue.h
@@ -133,7 +133,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMessageQueue::_mutex");
// Indicates whether queue has been stopped.
bool _stop{false};
diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp
index db944bae91d..11f80503acd 100644
--- a/src/mongo/db/ftdc/controller.cpp
+++ b/src/mongo/db/ftdc/controller.cpp
@@ -39,8 +39,8 @@
#include "mongo/db/ftdc/collector.h"
#include "mongo/db/ftdc/util.h"
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/exit.h"
@@ -50,7 +50,7 @@
namespace mongo {
Status FTDCController::setEnabled(bool enabled) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_path.empty()) {
return Status(ErrorCodes::FTDCPathNotSet,
@@ -65,37 +65,37 @@ Status FTDCController::setEnabled(bool enabled) {
}
void FTDCController::setPeriod(Milliseconds millis) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.period = millis;
_condvar.notify_one();
}
void FTDCController::setMaxDirectorySizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxDirectorySizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxFileSizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxFileSizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerArchiveMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerArchiveMetricChunk = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerInterimMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerInterimMetricChunk = size;
_condvar.notify_one();
}
Status FTDCController::setDirectory(const boost::filesystem::path& path) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_path.empty()) {
return Status(ErrorCodes::FTDCPathAlreadySet,
@@ -113,7 +113,7 @@ Status FTDCController::setDirectory(const boost::filesystem::path& path) {
void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_periodicCollectors.add(std::move(collector));
@@ -122,7 +122,7 @@ void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface
void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_rotateCollectors.add(std::move(collector));
@@ -131,7 +131,7 @@ void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface
BSONObj FTDCController::getMostRecentPeriodicDocument() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _mostRecentPeriodicDocument.getOwned();
}
}
@@ -144,7 +144,7 @@ void FTDCController::start() {
_thread = stdx::thread([this] { doLoop(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -155,7 +155,7 @@ void FTDCController::stop() {
log() << "Shutting down full-time diagnostic data capture";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -189,7 +189,7 @@ void FTDCController::doLoop() {
try {
// Update config
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_config = _configTemp;
}
@@ -206,7 +206,7 @@ void FTDCController::doLoop() {
// Wait for the next run or signal to shutdown
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// We ignore spurious wakeups by just doing an iteration of the loop
@@ -252,7 +252,7 @@ void FTDCController::doLoop() {
// Store a reference to the most recent document from the periodic collectors
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_mostRecentPeriodicDocument = std::get<0>(collectSample);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 5d1f2f5487a..949117cbc79 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -37,8 +37,8 @@
#include "mongo/db/ftdc/config.h"
#include "mongo/db/ftdc/file_manager.h"
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -187,7 +187,7 @@ private:
boost::filesystem::path _path;
// Mutex to protect the condvar, configuration changes, and most recent periodic document.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCController::_mutex");
stdx::condition_variable _condvar;
// Config settings that are used by controller, file manager, and all other classes.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 43872f1ccd5..6788f66881b 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -107,7 +107,7 @@ public:
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -133,7 +133,7 @@ private:
std::vector<BSONObj> _docs;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCMetricsCollectorMockTee::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 048163d6c6e..e8b88e91d1f 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -388,7 +388,7 @@ bool IndexBuildInterceptor::areAllWritesApplied(OperationContext* opCtx) const {
}
boost::optional<MultikeyPaths> IndexBuildInterceptor::getMultikeyPaths() const {
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
return _multikeyPaths;
}
@@ -414,7 +414,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// SERVER-39705: It's worth noting that a document may not generate any keys, but be
// described as being multikey. This step must be done to maintain parity with `validate`s
// expectations.
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
if (_multikeyPaths) {
MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.get(), multikeyPaths);
} else {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 97dca244576..01c9ba40326 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -167,7 +167,8 @@ private:
// shared resource.
std::shared_ptr<AtomicWord<long long>> _sideWritesCounter;
- mutable stdx::mutex _multikeyPathMutex;
+ mutable Mutex _multikeyPathMutex =
+ MONGO_MAKE_LATCH("IndexBuildInterceptor::_multikeyPathMutex");
boost::optional<MultikeyPaths> _multikeyPaths;
};
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index d4e6da3ba2b..92127b4253a 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -240,7 +240,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
/*commitQuorum=*/boost::none);
Status status = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _registerIndexBuild(lk, replIndexBuildState);
}();
if (!status.isOK()) {
@@ -276,7 +276,7 @@ Future<void> IndexBuildsCoordinator::joinIndexBuilds(const NamespaceString& nss,
}
void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// All index builds should have been signaled to stop via the ServiceContext.
@@ -291,7 +291,7 @@ void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the collection.
auto it = _disallowedCollections.find(collectionUUID);
@@ -311,7 +311,7 @@ void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUU
}
void IndexBuildsCoordinator::abortDatabaseIndexBuilds(StringData db, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the database.
auto it = _disallowedDbs.find(db);
@@ -343,7 +343,7 @@ void IndexBuildsCoordinator::recoverIndexBuilds() {
}
int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -353,7 +353,7 @@ int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
}
void IndexBuildsCoordinator::dump(std::ostream& ss) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_collectionIndexBuilds.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
@@ -370,17 +370,17 @@ void IndexBuildsCoordinator::dump(std::ostream& ss) const {
}
bool IndexBuildsCoordinator::inProgForCollection(const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _collectionIndexBuilds.find(collectionUUID) != _collectionIndexBuilds.end();
}
bool IndexBuildsCoordinator::inProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _databaseIndexBuilds.find(db) != _databaseIndexBuilds.end();
}
void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
<< _allIndexBuilds.size() << " index builds running.",
@@ -406,7 +406,7 @@ void IndexBuildsCoordinator::assertNoBgOpInProgForDb(StringData db) const {
void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collIndexBuildsIt = _collectionIndexBuilds.find(collectionUUID);
if (collIndexBuildsIt == _collectionIndexBuilds.end()) {
@@ -420,7 +420,7 @@ void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
}
void IndexBuildsCoordinator::awaitNoBgOpInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -438,7 +438,7 @@ void IndexBuildsCoordinator::onReplicaSetReconfig() {
}
void IndexBuildsCoordinator::sleepIndexBuilds_forTestOnly(bool sleep) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sleepForTest = sleep;
}
@@ -576,7 +576,7 @@ IndexBuildsCoordinator::_registerAndSetUpIndexBuild(
// Lock from when we ascertain what indexes to build through to when the build is registered
// on the Coordinator and persistedly set up in the catalog. This serializes setting up an
// index build so that no attempts are made to register the same build twice.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::vector<BSONObj> filteredSpecs;
try {
@@ -694,7 +694,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
const UUID& buildUUID,
const IndexBuildOptions& indexBuildOptions) noexcept {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_sleepForTest) {
lk.unlock();
sleepmillis(100);
@@ -703,7 +703,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
}
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -735,7 +735,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
// Ensure the index build is unregistered from the Coordinator and the Promise is set with
// the build's result so that callers are notified of the outcome.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
@@ -1040,7 +1040,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(opCtx->lockState()->isW());
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -1102,7 +1102,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(indexCatalogStats.numIndexesBefore == indexCatalogStats.numIndexesAfter);
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
}
@@ -1113,7 +1113,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
}
void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
if (it != _disallowedDbs.end()) {
@@ -1124,7 +1124,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
if (it != _disallowedCollections.end()) {
@@ -1135,7 +1135,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collection
}
void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
invariant(it != _disallowedDbs.end());
@@ -1146,7 +1146,7 @@ void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_allowIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
invariant(it != _disallowedCollections.end());
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index 7ab40ef1e85..19bf083689a 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -43,8 +43,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl_index_build_state.h"
#include "mongo/db/storage/durable_catalog.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/future.h"
@@ -414,7 +414,7 @@ protected:
const UUID& buildUUID) noexcept;
// Protects the below state.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsCoordinator::_mutex");
// New index builds are not allowed on a collection or database if the collection or database is
// in either of these maps. These are used when concurrent operations need to abort index builds
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 31a9859b38c..5a2f3686c81 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -111,7 +111,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
}
auto replState = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -172,7 +172,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
](auto status) noexcept {
// Clean up the index build if we failed to schedule it.
if (!status.isOK()) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Unregister the index build before setting the promises,
// so callers do not see the build again.
@@ -249,7 +249,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
UUID collectionUUID = collection->uuid();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collectionIt = _collectionIndexBuilds.find(collectionUUID);
if (collectionIt == _collectionIndexBuilds.end()) {
return Status(ErrorCodes::IndexNotFound,
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index c97697aea41..0e57d6b091a 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -47,7 +47,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
decltype(_cache)::size_type originalSize = 0;
{
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.crbegin();
if (iter != _cache.crend()) {
newerThanThis = iter->second.getExpiresAt();
@@ -73,7 +73,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
auto& newKeys = refreshStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (originalSize > _cache.size()) {
// _cache cleared while we getting the new keys, just return the newest key without
// touching the _cache so the next refresh will populate it properly.
@@ -96,7 +96,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long keyId,
const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
for (auto iter = _cache.lower_bound(forThisTime); iter != _cache.cend(); ++iter) {
if (iter->second.getKeyId() == keyId) {
@@ -111,7 +111,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.upper_bound(forThisTime);
@@ -126,7 +126,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime
void KeysCollectionCache::resetCache() {
// keys that read with non majority readConcern level can be rolled back.
if (!_client->supportsMajorityReads()) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
_cache.clear();
}
}
diff --git a/src/mongo/db/keys_collection_cache.h b/src/mongo/db/keys_collection_cache.h
index 28d72892277..61989d6ae5b 100644
--- a/src/mongo/db/keys_collection_cache.h
+++ b/src/mongo/db/keys_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/base/status_with.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -68,7 +68,7 @@ private:
const std::string _purpose;
KeysCollectionClient* const _client;
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("KeysCollectionCache::_cacheMutex");
std::map<LogicalTime, KeysCollectionDocument> _cache; // expiresAt -> KeysDocument
};
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index bbad4f450c6..155c52b163c 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -193,7 +193,7 @@ void KeysCollectionManager::clearCache() {
void KeysCollectionManager::PeriodicRunner::refreshNow(OperationContext* opCtx) {
auto refreshRequest = [this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
uasserted(ErrorCodes::ShutdownInProgress,
@@ -227,7 +227,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
unsigned errorCount = 0;
std::shared_ptr<RefreshFunc> doRefresh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
@@ -250,7 +250,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
auto currentTime = LogicalClock::get(service)->getClusterTime();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_hasSeenKeys = true;
}
@@ -269,7 +269,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
nextWakeup = std::min(nextWakeup, Milliseconds(data["overrideMS"].numberInt()));
});
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
if (!hasRefreshRequestInitially) {
@@ -297,7 +297,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
_refreshRequest->set();
_refreshRequest.reset();
@@ -305,7 +305,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
void KeysCollectionManager::PeriodicRunner::setFunc(RefreshFunc newRefreshStrategy) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_doRefresh = std::make_shared<RefreshFunc>(std::move(newRefreshStrategy));
_refreshNeededCV.notify_all();
}
@@ -318,7 +318,7 @@ void KeysCollectionManager::PeriodicRunner::switchFunc(OperationContext* opCtx,
void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
const std::string& threadName,
Milliseconds refreshInterval) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_backgroundThread.joinable());
invariant(!_inShutdown);
@@ -329,7 +329,7 @@ void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
void KeysCollectionManager::PeriodicRunner::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_backgroundThread.joinable()) {
return;
}
@@ -343,7 +343,7 @@ void KeysCollectionManager::PeriodicRunner::stop() {
}
bool KeysCollectionManager::PeriodicRunner::hasSeenKeys() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _hasSeenKeys;
}
diff --git a/src/mongo/db/keys_collection_manager.h b/src/mongo/db/keys_collection_manager.h
index c220e1ba990..1131b7c3612 100644
--- a/src/mongo/db/keys_collection_manager.h
+++ b/src/mongo/db/keys_collection_manager.h
@@ -37,7 +37,7 @@
#include "mongo/db/keys_collection_cache.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/keys_collection_manager_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/duration.h"
@@ -169,7 +169,8 @@ private:
std::string threadName,
Milliseconds refreshInterval);
- stdx::mutex _mutex; // protects all the member variables below.
+ // protects all the member variables below.
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicRunner::_mutex");
std::shared_ptr<Notification<void>> _refreshRequest;
stdx::condition_variable _refreshNeededCV;
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index 415566094d2..5cc982465d9 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -76,12 +76,12 @@ void LogicalClock::set(ServiceContext* service, std::unique_ptr<LogicalClock> cl
LogicalClock::LogicalClock(ServiceContext* service) : _service(service) {}
LogicalTime LogicalClock::getClusterTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _clusterTime;
}
Status LogicalClock::advanceClusterTime(const LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rateLimitStatus = _passesRateLimiter_inlock(newTime);
if (!rateLimitStatus.isOK()) {
@@ -99,7 +99,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
invariant(nTicks > 0 && nTicks <= kMaxSignedInt);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
LogicalTime clusterTime = _clusterTime;
@@ -142,7 +142,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
}
void LogicalClock::setClusterTimeFromTrustedSource(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Rate limit checks are skipped here so a server with no activity for longer than
// maxAcceptableLogicalClockDriftSecs seconds can still have its cluster time initialized.
@@ -177,12 +177,12 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
}
bool LogicalClock::isEnabled() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isEnabled;
}
void LogicalClock::disable() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_isEnabled = false;
}
diff --git a/src/mongo/db/logical_clock.h b/src/mongo/db/logical_clock.h
index c6cebe983d8..28191be87f6 100644
--- a/src/mongo/db/logical_clock.h
+++ b/src/mongo/db/logical_clock.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class ServiceContext;
@@ -107,7 +107,7 @@ private:
ServiceContext* const _service;
// The mutex protects _clusterTime and _isEnabled.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalClock::_mutex");
LogicalTime _clusterTime;
bool _isEnabled{true};
};
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index 8afd9f6889f..17b136f566a 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -116,7 +116,7 @@ Status LogicalSessionCacheImpl::reapNow(Client* client) {
}
size_t LogicalSessionCacheImpl::size() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _activeSessions.size();
}
@@ -140,7 +140,7 @@ void LogicalSessionCacheImpl::_periodicReap(Client* client) {
Status LogicalSessionCacheImpl::_reap(Client* client) {
// Take the lock to update some stats.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the last set of stats for our new run.
_stats.setLastTransactionReaperJobDurationMillis(0);
@@ -187,7 +187,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
Minutes(gTransactionRecordMinimumLifetimeMinutes));
} catch (const DBException& ex) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
}
@@ -196,7 +196,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
_stats.setLastTransactionReaperJobEntriesCleanedUp(numReaped);
@@ -208,7 +208,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
void LogicalSessionCacheImpl::_refresh(Client* client) {
// Stats for serverStatus:
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the refresh-related stats with the beginning of our run.
_stats.setLastSessionsCollectionJobDurationMillis(0);
@@ -223,7 +223,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// This will finish timing _refresh for our stats no matter when we return.
const auto timeRefreshJob = makeGuard([this] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
});
@@ -255,7 +255,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
{
using std::swap;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
swap(explicitlyEndingSessions, _endingSessions);
swap(activeSessions, _activeSessions);
}
@@ -264,7 +264,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// swapped out of LogicalSessionCache, and merges in any records that had been added since we
// swapped them out.
auto backSwap = [this](auto& member, auto& temp) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
using std::swap;
swap(member, temp);
for (const auto& it : temp) {
@@ -300,7 +300,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->refreshSessions(opCtx, activeSessionRecords));
activeSessionsBackSwapper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesRefreshed(activeSessionRecords.size());
}
@@ -308,7 +308,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->removeRecords(opCtx, explicitlyEndingSessions));
explicitlyEndingBackSwaper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesEnded(explicitlyEndingSessions.size());
}
@@ -321,7 +321,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// Exclude sessions added to _activeSessions from the openCursorSession to avoid race between
// killing cursors on the removed sessions and creating sessions.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& it : _activeSessions) {
auto newSessionIt = openCursorSessions.find(it.first);
@@ -351,18 +351,18 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
SessionKiller::Matcher matcher(std::move(patterns));
auto killRes = _service->killCursorsWithMatchingSessions(opCtx, std::move(matcher));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobCursorsClosed(killRes.second);
}
}
void LogicalSessionCacheImpl::endSessions(const LogicalSessionIdSet& sessions) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_endingSessions.insert(begin(sessions), end(sessions));
}
LogicalSessionCacheStats LogicalSessionCacheImpl::getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setActiveSessionsCount(_activeSessions.size());
return _stats;
}
@@ -380,7 +380,7 @@ Status LogicalSessionCacheImpl::_addToCache(WithLock, LogicalSessionRecord recor
}
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
ret.reserve(_activeSessions.size());
for (const auto& id : _activeSessions) {
@@ -391,7 +391,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
const std::vector<SHA256Block>& userDigests) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
for (const auto& it : _activeSessions) {
if (std::find(userDigests.cbegin(), userDigests.cend(), it.first.getUid()) !=
@@ -404,7 +404,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
boost::optional<LogicalSessionRecord> LogicalSessionCacheImpl::peekCached(
const LogicalSessionId& id) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _activeSessions.find(id);
if (it == _activeSessions.end()) {
return boost::none;
diff --git a/src/mongo/db/logical_session_cache_impl.h b/src/mongo/db/logical_session_cache_impl.h
index dcc827a98ef..c92e45fee4a 100644
--- a/src/mongo/db/logical_session_cache_impl.h
+++ b/src/mongo/db/logical_session_cache_impl.h
@@ -109,7 +109,7 @@ private:
const std::shared_ptr<SessionsCollection> _sessionsColl;
const ReapSessionsOlderThanFn _reapSessionsOlderThanFn;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalSessionCacheImpl::_mutex");
LogicalSessionIdMap<LogicalSessionRecord> _activeSessions;
diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp
index 66136950343..df814663ff6 100644
--- a/src/mongo/db/logical_time_validator.cpp
+++ b/src/mongo/db/logical_time_validator.cpp
@@ -51,7 +51,7 @@ namespace {
const auto getLogicalClockValidator =
ServiceContext::declareDecoration<std::unique_ptr<LogicalTimeValidator>>();
-stdx::mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
+Mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
std::vector<Privilege> advanceClusterTimePrivilege;
@@ -67,7 +67,7 @@ Milliseconds kRefreshIntervalIfErrored(200);
} // unnamed namespace
LogicalTimeValidator* LogicalTimeValidator::get(ServiceContext* service) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
return getLogicalClockValidator(service).get();
}
@@ -77,7 +77,7 @@ LogicalTimeValidator* LogicalTimeValidator::get(OperationContext* ctx) {
void LogicalTimeValidator::set(ServiceContext* service,
std::unique_ptr<LogicalTimeValidator> newValidator) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
auto& validator = getLogicalClockValidator(service);
validator = std::move(newValidator);
}
@@ -91,7 +91,7 @@ SignedLogicalTime LogicalTimeValidator::_getProof(const KeysCollectionDocument&
// Compare and calculate HMAC inside mutex to prevent multiple threads computing HMAC for the
// same cluster time.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: _lastSeenValidTime will initially not have a proof set.
if (newTime == _lastSeenValidTime.getTime() && _lastSeenValidTime.getProof()) {
return _lastSeenValidTime;
@@ -143,7 +143,7 @@ SignedLogicalTime LogicalTimeValidator::signLogicalTime(OperationContext* opCtx,
Status LogicalTimeValidator::validate(OperationContext* opCtx, const SignedLogicalTime& newTime) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (newTime.getTime() <= _lastSeenValidTime.getTime()) {
return Status::OK();
}
@@ -173,7 +173,7 @@ void LogicalTimeValidator::init(ServiceContext* service) {
}
void LogicalTimeValidator::shutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
if (_keyManager) {
_keyManager->stopMonitoring();
}
@@ -198,23 +198,23 @@ bool LogicalTimeValidator::shouldGossipLogicalTime() {
void LogicalTimeValidator::resetKeyManagerCache() {
log() << "Resetting key manager cache";
{
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
invariant(_keyManager);
_keyManager->clearCache();
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
}
void LogicalTimeValidator::stopKeyManager() {
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
if (_keyManager) {
log() << "Stopping key manager";
_keyManager->stopMonitoring();
_keyManager->clearCache();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
} else {
@@ -223,7 +223,7 @@ void LogicalTimeValidator::stopKeyManager() {
}
std::shared_ptr<KeysCollectionManager> LogicalTimeValidator::_getKeyManagerCopy() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
invariant(_keyManager);
return _keyManager;
}
diff --git a/src/mongo/db/logical_time_validator.h b/src/mongo/db/logical_time_validator.h
index b87ff47436e..e639b4435d8 100644
--- a/src/mongo/db/logical_time_validator.h
+++ b/src/mongo/db/logical_time_validator.h
@@ -33,7 +33,7 @@
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -122,8 +122,9 @@ private:
SignedLogicalTime _getProof(const KeysCollectionDocument& keyDoc, LogicalTime newTime);
- stdx::mutex _mutex; // protects _lastSeenValidTime
- stdx::mutex _mutexKeyManager; // protects _keyManager
+ Mutex _mutex = MONGO_MAKE_LATCH("LogicalTimeValidator::_mutex"); // protects _lastSeenValidTime
+ Mutex _mutexKeyManager =
+ MONGO_MAKE_LATCH("LogicalTimevalidator::_mutexKeyManager"); // protects _keyManager
SignedLogicalTime _lastSeenValidTime;
TimeProofService _timeProofService;
std::shared_ptr<KeysCollectionManager> _keyManager;
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 8a2fbca0e7b..c471c870fe1 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/client.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/baton.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/clock_source.h"
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 025e8f7c1f3..a834f89ae1d 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -41,8 +41,8 @@
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/session.h"
#include "mongo/util/decorable.h"
#include "mongo/util/interruptible.h"
diff --git a/src/mongo/db/operation_context_group.cpp b/src/mongo/db/operation_context_group.cpp
index c3f46ea9f9f..bb215d21095 100644
--- a/src/mongo/db/operation_context_group.cpp
+++ b/src/mongo/db/operation_context_group.cpp
@@ -61,7 +61,7 @@ OperationContextGroup::Context::Context(OperationContext& ctx, OperationContextG
void OperationContextGroup::Context::discard() {
if (!_movedFrom) {
- stdx::lock_guard<stdx::mutex> lk(_ctxGroup._lock);
+ stdx::lock_guard<Latch> lk(_ctxGroup._lock);
auto it = find(_ctxGroup._contexts, &_opCtx);
_ctxGroup._contexts.erase(it);
_movedFrom = true;
@@ -77,7 +77,7 @@ auto OperationContextGroup::makeOperationContext(Client& client) -> Context {
auto OperationContextGroup::adopt(UniqueOperationContext opCtx) -> Context {
auto cp = opCtx.get();
invariant(cp);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_contexts.emplace_back(std::move(opCtx));
return Context(*cp, *this);
}
@@ -87,7 +87,7 @@ auto OperationContextGroup::take(Context ctx) -> Context {
return ctx;
}
{
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
auto it = find(ctx._ctxGroup._contexts, &ctx._opCtx);
_contexts.emplace_back(std::move(*it));
ctx._ctxGroup._contexts.erase(it);
@@ -98,14 +98,14 @@ auto OperationContextGroup::take(Context ctx) -> Context {
void OperationContextGroup::interrupt(ErrorCodes::Error code) {
invariant(code);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
for (auto&& uniqueOperationContext : _contexts) {
interruptOne(uniqueOperationContext.get(), code);
}
}
bool OperationContextGroup::isEmpty() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _contexts.empty();
}
diff --git a/src/mongo/db/operation_context_group.h b/src/mongo/db/operation_context_group.h
index 189069cdb17..0de0792e269 100644
--- a/src/mongo/db/operation_context_group.h
+++ b/src/mongo/db/operation_context_group.h
@@ -32,7 +32,7 @@
#include "mongo/db/client.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -96,7 +96,7 @@ public:
private:
friend class Context;
- stdx::mutex _lock;
+ Mutex _lock = MONGO_MAKE_LATCH("OperationContextGroup::_lock");
std::vector<UniqueOperationContext> _contexts;
}; // class OperationContextGroup
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index 43b4a51df5d..d805541218c 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -254,9 +254,9 @@ public:
}
void checkForInterruptForTimeout(OperationContext* opCtx) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
opCtx->waitForConditionOrInterrupt(cv, lk);
}
@@ -334,18 +334,18 @@ TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesNanoseconds) {
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCV) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(
ErrorCodes::ExceededTimeLimit,
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now() + Seconds{10})
@@ -598,17 +598,17 @@ TEST_F(OperationDeadlineTests, DeadlineAfterRunWithoutInterruptDoesntSeeUnviolat
TEST_F(OperationDeadlineTests, WaitForKilledOpCV) {
auto opCtx = client->makeOperationContext();
opCtx->markKilled();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::Interrupted, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -617,9 +617,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now() + Seconds{10}, ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -627,9 +627,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_FALSE(opCtx->waitForConditionOrInterruptFor(
cv, lk, Milliseconds(-1000), []() -> bool { return false; }));
}
@@ -637,9 +637,9 @@ TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
TEST_F(OperationDeadlineTests, DuringWaitMaxTimeExpirationDominatesUntilExpiration) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(ErrorCodes::ExceededTimeLimit ==
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now()));
}
@@ -648,17 +648,17 @@ class ThreadedOperationDeadlineTests : public OperationDeadlineTests {
public:
using CvPred = std::function<bool()>;
using WaitFn = std::function<bool(
- OperationContext*, stdx::condition_variable&, stdx::unique_lock<stdx::mutex>&, CvPred)>;
+ OperationContext*, stdx::condition_variable&, stdx::unique_lock<Latch>&, CvPred)>;
struct WaitTestState {
void signal() {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
invariant(!isSignaled);
isSignaled = true;
cv.notify_all();
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("WaitTestState::mutex");
stdx::condition_variable cv;
bool isSignaled = false;
};
@@ -674,7 +674,7 @@ public:
opCtx->setDeadlineByDate(maxTime, ErrorCodes::ExceededTimeLimit);
}
auto predicate = [state] { return state->isSignaled; };
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
barrier->countDownAndWait();
return waitFn(opCtx, state->cv, lk, predicate);
});
@@ -684,7 +684,7 @@ public:
// Now we know that the waiter task must own the mutex, because it does not signal the
// barrier until it does.
- stdx::lock_guard<stdx::mutex> lk(state->mutex);
+ stdx::lock_guard<Latch> lk(state->mutex);
// Assuming that opCtx has not already been interrupted and that maxTime and until are
// unexpired, we know that the waiter must be blocked in the condition variable, because it
@@ -699,7 +699,7 @@ public:
Date_t maxTime) {
const auto waitFn = [until](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
if (until < Date_t::max()) {
return opCtx->waitForConditionOrInterruptUntil(cv, lk, until, predicate);
@@ -718,7 +718,7 @@ public:
Date_t maxTime) {
const auto waitFn = [duration](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
return opCtx->waitForConditionOrInterruptFor(cv, lk, duration, predicate);
};
@@ -735,7 +735,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepUntil](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepUntil(sleepUntil);
@@ -752,7 +752,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepFor](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepFor(sleepFor);
@@ -956,9 +956,9 @@ TEST(OperationContextTest, TestWaitForConditionOrInterruptNoAssertUntilAPI) {
auto client = serviceCtx->makeClient("OperationContextTest");
auto opCtx = client->makeOperationContext();
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Case (2). Expect a Status::OK with a cv_status::timeout.
Date_t deadline = Date_t::now() + Milliseconds(500);
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 27832209b69..2d45b49747c 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/operation_time_tracker.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace {
@@ -51,12 +51,12 @@ std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext
}
LogicalTime OperationTimeTracker::getMaxOperationTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _maxOperationTime;
}
void OperationTimeTracker::updateOperationTime(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (newTime > _maxOperationTime) {
_maxOperationTime = std::move(newTime);
}
diff --git a/src/mongo/db/operation_time_tracker.h b/src/mongo/db/operation_time_tracker.h
index 45b06ccac6e..a259ee22d37 100644
--- a/src/mongo/db/operation_time_tracker.h
+++ b/src/mongo/db/operation_time_tracker.h
@@ -31,7 +31,7 @@
#include "mongo/db/logical_time.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -57,7 +57,7 @@ public:
private:
// protects _maxOperationTime
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OperationTimeTracker::_mutex");
LogicalTime _maxOperationTime;
};
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
index 88bf08d7ee5..f372db87226 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -55,7 +55,7 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToAbortExpiredTransactions>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PeriodicThreadToAbortExpiredTransactions::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
index 81c46260f34..1705d2d01d1 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -59,7 +59,8 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToDecreaseSnapshotHistoryCachePressure>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex =
+ MONGO_MAKE_LATCH("PeriodicThreadToDecreaseSnapshotHistoryCachePressure::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index 76ca6839e96..9e8e37e97cd 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -48,13 +48,13 @@ MONGO_FAIL_POINT_DEFINE(exchangeFailLoadNextBatch);
class MutexAndResourceLock {
OperationContext* _opCtx;
ResourceYielder* _resourceYielder;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
public:
// Must be constructed with the mutex held. 'yielder' may be null if there are no resources
// which need to be yielded while waiting.
MutexAndResourceLock(OperationContext* opCtx,
- stdx::unique_lock<stdx::mutex> m,
+ stdx::unique_lock<Latch> m,
ResourceYielder* yielder)
: _opCtx(opCtx), _resourceYielder(yielder), _lock(std::move(m)) {
invariant(_lock.owns_lock());
@@ -78,7 +78,7 @@ public:
* Releases ownership of the lock to the caller. May only be called when the mutex is held
* (after a call to unlock(), for example).
*/
- stdx::unique_lock<stdx::mutex> releaseLockOwnership() {
+ stdx::unique_lock<Latch> releaseLockOwnership() {
invariant(_lock.owns_lock());
return std::move(_lock);
}
@@ -280,7 +280,7 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx,
size_t consumerId,
ResourceYielder* resourceYielder) {
// Grab a lock.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (;;) {
// Guard against some of the trickiness we do with moving the lock to/from the
@@ -438,7 +438,7 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
void Exchange::dispose(OperationContext* opCtx, size_t consumerId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_disposeRunDown < getConsumers());
diff --git a/src/mongo/db/pipeline/document_source_exchange.h b/src/mongo/db/pipeline/document_source_exchange.h
index e30d66698d1..df423ff28bb 100644
--- a/src/mongo/db/pipeline/document_source_exchange.h
+++ b/src/mongo/db/pipeline/document_source_exchange.h
@@ -36,8 +36,8 @@
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/exchange_spec_gen.h"
#include "mongo/db/pipeline/field_path.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -170,8 +170,8 @@ private:
std::unique_ptr<Pipeline, PipelineDeleter> _pipeline;
// Synchronization.
- stdx::mutex _mutex;
- stdx::condition_variable_any _haveBufferSpace;
+ Mutex _mutex = MONGO_MAKE_LATCH("Exchange::_mutex");
+ stdx::condition_variable _haveBufferSpace;
// A thread that is currently loading the exchange buffers.
size_t _loadingThreadId{kInvalidThreadId};
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index c6a5c4945fd..38145832b22 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -65,7 +65,7 @@ namespace {
*/
class MutexYielder : public ResourceYielder {
public:
- MutexYielder(stdx::mutex* mutex) : _lock(*mutex, stdx::defer_lock) {}
+ MutexYielder(Mutex* mutex) : _lock(*mutex, stdx::defer_lock) {}
void yield(OperationContext* opCtx) override {
_lock.unlock();
@@ -75,12 +75,12 @@ public:
_lock.lock();
}
- stdx::unique_lock<stdx::mutex>& getLock() {
+ stdx::unique_lock<Latch>& getLock() {
return _lock;
}
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
};
/**
@@ -523,11 +523,10 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
// thread holds this while it calls getNext(). This is to simulate the case where a thread may
// hold some "real" resources which need to be yielded while waiting, such as the Session, or
// the locks held in a transaction.
- stdx::mutex artificalGlobalMutex;
+ auto artificalGlobalMutex = MONGO_MAKE_LATCH();
boost::intrusive_ptr<Exchange> ex =
new Exchange(std::move(spec), unittest::assertGet(Pipeline::create({source}, getExpCtx())));
-
std::vector<ThreadInfo> threads;
for (size_t idx = 0; idx < nConsumers; ++idx) {
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 5dc3778ecc6..9d03e0b1012 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -553,7 +553,7 @@ Status PlanCache::set(const CanonicalQuery& query,
const auto key = computeKey(query);
const size_t newWorks = why->stats[0]->common.works;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
bool isNewEntryActive = false;
uint32_t queryHash;
uint32_t planCacheKey;
@@ -608,7 +608,7 @@ void PlanCache::deactivate(const CanonicalQuery& query) {
}
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -625,7 +625,7 @@ PlanCache::GetResult PlanCache::get(const CanonicalQuery& query) const {
}
PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -642,7 +642,7 @@ PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
PlanCacheKey ck = computeKey(cq);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(ck, &entry);
if (!cacheStatus.isOK()) {
@@ -659,12 +659,12 @@ Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
}
Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.remove(computeKey(canonicalQuery));
}
void PlanCache::clear() {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
_cache.clear();
}
@@ -679,7 +679,7 @@ PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQuery& query) const {
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -691,7 +691,7 @@ StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQ
}
std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
std::vector<std::unique_ptr<PlanCacheEntry>> entries;
for (auto&& cacheEntry : _cache) {
@@ -703,7 +703,7 @@ std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
}
size_t PlanCache::size() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.size();
}
@@ -715,7 +715,7 @@ std::vector<BSONObj> PlanCache::getMatchingStats(
const std::function<BSONObj(const PlanCacheEntry&)>& serializationFunc,
const std::function<bool(const BSONObj&)>& filterFunc) const {
std::vector<BSONObj> results;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
for (auto&& cacheEntry : _cache) {
const auto entry = cacheEntry.second;
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 8fc9b6bf3fe..06e648be653 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -39,7 +39,7 @@
#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -605,7 +605,7 @@ private:
LRUKeyValue<PlanCacheKey, PlanCacheEntry, PlanCacheKeyHasher> _cache;
// Protects _cache.
- mutable stdx::mutex _cacheMutex;
+ mutable Mutex _cacheMutex = MONGO_MAKE_LATCH("PlanCache::_cacheMutex");
// Holds computed information about the collection's indexes. Used for generating plan
// cache keys.
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index 7a5fa236f75..794a959fa1f 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -557,7 +557,6 @@ TEST_F(QueryPlannerWildcardTest, OrEqualityWithTwoPredicatesUsesTwoPaths) {
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {'$_path': 1, b: 1},"
"bounds: {'$_path': [['b','b',true,true]], b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoIndexes) {
@@ -572,7 +571,6 @@ TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoI
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {b: 1},"
"bounds: {b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, BasicSkip) {
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index da477a862e1..5060d6d9ac8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -78,7 +78,7 @@ AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
const CanonicalQuery::QueryShapeString& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -90,7 +90,7 @@ boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
}
std::vector<AllowedIndexEntry> QuerySettings::getAllAllowedIndices() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
std::vector<AllowedIndexEntry> entries;
for (const auto& entryPair : _allowedIndexEntryMap) {
entries.push_back(entryPair.second);
@@ -109,7 +109,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
const BSONObj collation =
canonicalQuery.getCollator() ? canonicalQuery.getCollator()->getSpec().toBSON() : BSONObj();
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.erase(key);
_allowedIndexEntryMap.emplace(
std::piecewise_construct,
@@ -118,7 +118,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
}
void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString& key) {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -130,7 +130,7 @@ void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString&
}
void QuerySettings::clearAllowedIndices() {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.clear();
}
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index 4fac8e39161..f317a2780c2 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -37,7 +37,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
/**
* Protects data in query settings.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("QuerySettings::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 8207e58fed4..5cd75844f95 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -73,7 +73,7 @@ public:
*/
std::tuple<bool, std::shared_ptr<Notification<Status>>> getOrCreateWriteRequest(
LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto lastEl = _writeRequests.rbegin();
if (lastEl != _writeRequests.rend() && lastEl->first >= clusterTime.asTimestamp()) {
return std::make_tuple(false, lastEl->second);
@@ -88,7 +88,7 @@ public:
* Erases writeRequest that happened at clusterTime
*/
void deleteWriteRequest(LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto el = _writeRequests.find(clusterTime.asTimestamp());
invariant(el != _writeRequests.end());
invariant(el->second);
@@ -97,7 +97,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WriteRequestSynchronizer::_mutex");
std::map<Timestamp, std::shared_ptr<Notification<Status>>> _writeRequests;
};
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 1b99507fc5c..77b086af97e 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -52,7 +52,7 @@ std::string AbstractAsyncComponent::_getComponentName() const {
}
bool AbstractAsyncComponent::isActive() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isActive_inlock();
}
@@ -61,7 +61,7 @@ bool AbstractAsyncComponent::_isActive_inlock() noexcept {
}
bool AbstractAsyncComponent::_isShuttingDown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isShuttingDown_inlock();
}
@@ -70,7 +70,7 @@ bool AbstractAsyncComponent::_isShuttingDown_inlock() noexcept {
}
Status AbstractAsyncComponent::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -97,7 +97,7 @@ Status AbstractAsyncComponent::startup() noexcept {
}
void AbstractAsyncComponent::shutdown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -116,17 +116,17 @@ void AbstractAsyncComponent::shutdown() noexcept {
}
void AbstractAsyncComponent::join() noexcept {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
AbstractAsyncComponent::State AbstractAsyncComponent::getState_forTest() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _state;
}
void AbstractAsyncComponent::_transitionToComplete() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_transitionToComplete_inlock();
}
@@ -138,13 +138,13 @@ void AbstractAsyncComponent::_transitionToComplete_inlock() noexcept {
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(
const executor::TaskExecutor::CallbackArgs& callbackArgs, const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(callbackArgs, message);
}
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(const Status& status,
const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(status, message);
}
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 64d88ad41e8..c5ce2da5afa 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -37,8 +37,8 @@
#include "mongo/base/static_assert.h"
#include "mongo/base/status.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -207,7 +207,7 @@ private:
/**
* Returns mutex to guard this component's state variable.
*/
- virtual stdx::mutex* _getMutex() noexcept = 0;
+ virtual Mutex* _getMutex() noexcept = 0;
private:
// All member variables are labeled with one of the following codes indicating the
@@ -259,7 +259,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
template <typename T>
Status AbstractAsyncComponent::_startupComponent(std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _startupComponent_inlock(component);
}
@@ -275,7 +275,7 @@ void AbstractAsyncComponent::_shutdownComponent_inlock(const std::unique_ptr<T>&
template <typename T>
void AbstractAsyncComponent::_shutdownComponent(const std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_shutdownComponent_inlock(component);
}
diff --git a/src/mongo/db/repl/abstract_async_component_test.cpp b/src/mongo/db/repl/abstract_async_component_test.cpp
index c6f3703b960..cdf892e4885 100644
--- a/src/mongo/db/repl/abstract_async_component_test.cpp
+++ b/src/mongo/db/repl/abstract_async_component_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/task_executor_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
@@ -95,10 +95,10 @@ public:
private:
Status _doStartup_inlock() noexcept override;
void _doShutdown_inlock() noexcept override;
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
// Used by AbstractAsyncComponent to guard start changes.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockAsyncComponent::_mutex");
public:
// Returned by _doStartup_inlock(). Override for testing.
@@ -125,7 +125,7 @@ Status MockAsyncComponent::scheduleWorkAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAndSaveHandle_inlock(std::move(work), handle, name);
}
@@ -134,12 +134,12 @@ Status MockAsyncComponent::scheduleWorkAtAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAtAndSaveHandle_inlock(when, std::move(work), handle, name);
}
void MockAsyncComponent::cancelHandle_forTest(executor::TaskExecutor::CallbackHandle handle) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelHandle_inlock(handle);
}
@@ -160,7 +160,7 @@ Status MockAsyncComponent::_doStartup_inlock() noexcept {
void MockAsyncComponent::_doShutdown_inlock() noexcept {}
-stdx::mutex* MockAsyncComponent::_getMutex() noexcept {
+Mutex* MockAsyncComponent::_getMutex() noexcept {
return &_mutex;
}
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.cpp b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
index 4bb2762219a..0cdf534cfe7 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -93,7 +93,7 @@ Milliseconds AbstractOplogFetcher::_getGetMoreMaxTime() const {
}
std::string AbstractOplogFetcher::toString() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
str::stream msg;
msg << _getComponentName() << " -"
<< " last optime fetched: " << _lastFetched.toString();
@@ -118,7 +118,7 @@ void AbstractOplogFetcher::_makeAndScheduleFetcherCallback(
Status scheduleStatus = Status::OK();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_fetcher = _makeFetcher(findCommandObj, metadataObj, _getInitialFindMaxTime());
scheduleStatus = _scheduleFetcher_inlock();
}
@@ -144,7 +144,7 @@ void AbstractOplogFetcher::_doShutdown_inlock() noexcept {
}
}
-stdx::mutex* AbstractOplogFetcher::_getMutex() noexcept {
+Mutex* AbstractOplogFetcher::_getMutex() noexcept {
return &_mutex;
}
@@ -158,12 +158,12 @@ OpTime AbstractOplogFetcher::getLastOpTimeFetched_forTest() const {
}
OpTime AbstractOplogFetcher::_getLastOpTimeFetched() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _lastFetched;
}
BSONObj AbstractOplogFetcher::getCommandObject_forTest() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _fetcher->getCommandObject();
}
@@ -198,7 +198,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
_makeFindCommandObject(_nss, _getLastOpTimeFetched(), _getRetriedFindMaxTime());
BSONObj metadataObj = _makeMetadataObject();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_fetcherRestarts == _maxFetcherRestarts) {
log() << "Error returned from oplog query (no more query restarts left): "
<< redact(responseStatus);
@@ -230,7 +230,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
// Reset fetcher restart counter on successful response.
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_isActive_inlock());
_fetcherRestarts = 0;
}
@@ -275,7 +275,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
LOG(3) << _getComponentName()
<< " setting last fetched optime ahead after batch: " << lastDoc;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastFetched = lastDoc;
}
@@ -296,7 +296,7 @@ void AbstractOplogFetcher::_finishCallback(Status status) {
_onShutdownCallbackFn(status);
decltype(_onShutdownCallbackFn) onShutdownCallbackFn;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_transitionToComplete_inlock();
// Release any resources that might be held by the '_onShutdownCallbackFn' function object.
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.h b/src/mongo/db/repl/abstract_oplog_fetcher.h
index 81497bf1258..19f9873a6e3 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.h
@@ -36,7 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/optime_with.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -148,7 +148,7 @@ protected:
virtual void _doShutdown_inlock() noexcept override;
private:
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
/**
* This function must be overriden by subclass oplog fetchers to specify what `find` command
@@ -214,7 +214,7 @@ private:
const std::size_t _maxFetcherRestarts;
// Protects member data of this AbstractOplogFetcher.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AbstractOplogFetcher::_mutex");
// Function to call when the oplog fetcher shuts down.
OnShutdownCallbackFn _onShutdownCallbackFn;
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 24841605f83..b3e5870aa2c 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -148,13 +148,13 @@ void BaseClonerTest::clear() {
}
void BaseClonerTest::setStatus(const Status& status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_status = status;
_setStatusCondition.notify_all();
}
const Status& BaseClonerTest::getStatus() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _status;
}
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index c4d56c00397..d0e5a9b9289 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -41,8 +41,8 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -135,7 +135,7 @@ protected:
private:
// Protects member data of this base cloner fixture.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BaseCloner::_mutex");
stdx::condition_variable _setStatusCondition;
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 870f9d54e45..3a0cb62de62 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -134,7 +134,7 @@ void BackgroundSync::startup(OperationContext* opCtx) {
}
void BackgroundSync::shutdown(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
@@ -158,7 +158,7 @@ void BackgroundSync::join(OperationContext* opCtx) {
}
bool BackgroundSync::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown_inlock();
}
@@ -241,7 +241,7 @@ void BackgroundSync::_produce() {
HostAndPort source;
SyncSourceResolverResponse syncSourceResp;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
@@ -264,7 +264,7 @@ void BackgroundSync::_produce() {
auto opCtx = cc().makeOperationContext();
minValidSaved = _replicationProcess->getConsistencyMarkers()->getMinValid(opCtx.get());
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -294,7 +294,7 @@ void BackgroundSync::_produce() {
fassert(40349, status);
_syncSourceResolver->join();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceResolver.reset();
}
@@ -340,7 +340,7 @@ void BackgroundSync::_produce() {
return;
} else if (syncSourceResp.isOK() && !syncSourceResp.getSyncSource().empty()) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceHost = syncSourceResp.getSyncSource();
source = _syncSourceHost;
}
@@ -378,7 +378,7 @@ void BackgroundSync::_produce() {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -426,7 +426,7 @@ void BackgroundSync::_produce() {
},
onOplogFetcherShutdownCallbackFn,
bgSyncOplogFetcherBatchSize);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -502,7 +502,7 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
// are done to prevent going into shutdown. This avoids a race where shutdown() clears the
// buffer between the time we check _inShutdown and the point where we finish writing to the
// buffer.
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return Status::OK();
}
@@ -554,7 +554,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
OpTime lastOpTimeFetched;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
}
@@ -631,7 +631,7 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint(
rollbackRemoteOplogQueryBatchSize.load());
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -668,18 +668,18 @@ void BackgroundSync::_fallBackOnRollbackViaRefetch(
}
HostAndPort BackgroundSync::getSyncTarget() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _syncSourceHost;
}
void BackgroundSync::clearSyncTarget() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
log() << "Resetting sync source to empty, which was " << _syncSourceHost;
_syncSourceHost = HostAndPort();
}
void BackgroundSync::stop(bool resetLastFetchedOptime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
log() << "Stopping replication producer";
@@ -709,7 +709,7 @@ void BackgroundSync::start(OperationContext* opCtx) {
do {
lastAppliedOpTime = _readLastAppliedOpTime(opCtx);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Double check the state after acquiring the mutex.
if (_state != ProducerState::Starting) {
return;
@@ -779,12 +779,12 @@ bool BackgroundSync::shouldStopFetching() const {
}
BackgroundSync::ProducerState BackgroundSync::getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void BackgroundSync::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Let producer run if it's already running.
if (_state == ProducerState::Stopped) {
_state = ProducerState::Starting;
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index e44427a656d..0bacdc71d29 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -43,8 +43,8 @@
#include "mongo/db/repl/rollback_impl.h"
#include "mongo/db/repl/sync_source_resolver.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -236,7 +236,7 @@ private:
// Protects member data of BackgroundSync.
// Never hold the BackgroundSync mutex when trying to acquire the ReplicationCoordinator mutex.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BackgroundSync::_mutex"); // (S)
OpTime _lastOpTimeFetched; // (M)
diff --git a/src/mongo/db/repl/callback_completion_guard.h b/src/mongo/db/repl/callback_completion_guard.h
index 4effb49c1f2..4ed13f55dff 100644
--- a/src/mongo/db/repl/callback_completion_guard.h
+++ b/src/mongo/db/repl/callback_completion_guard.h
@@ -33,7 +33,7 @@
#include <boost/optional.hpp>
#include <functional>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -83,9 +83,9 @@ public:
* Requires either a unique_lock or lock_guard to be passed in to ensure that we call
* _cancelRemainingWork_inlock()) while we have a lock on the callers's mutex.
*/
- void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<Latch>& lock,
const Result& result);
- void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<Latch>& lock,
const Result& result);
private:
@@ -124,13 +124,13 @@ CallbackCompletionGuard<Result>::~CallbackCompletionGuard() {
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::lock_guard<stdx::mutex>& lock, const Result& result) {
+ const stdx::lock_guard<Latch>& lock, const Result& result) {
_setResultAndCancelRemainingWork_inlock(result);
}
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::unique_lock<stdx::mutex>& lock, const Result& result) {
+ const stdx::unique_lock<Latch>& lock, const Result& result) {
invariant(lock.owns_lock());
_setResultAndCancelRemainingWork_inlock(result);
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 5bb4fefbc08..d592d3fbfba 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -88,7 +88,7 @@ private:
std::unique_ptr<stdx::thread> _quorumCheckThread;
Status _quorumCheckStatus;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CheckQuorumTest::_mutex");
bool _isQuorumCheckDone;
};
@@ -109,13 +109,13 @@ Status CheckQuorumTest::waitForQuorumCheck() {
}
bool CheckQuorumTest::isQuorumCheckDone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isQuorumCheckDone;
}
void CheckQuorumTest::_runQuorumCheck(const ReplSetConfig& config, int myIndex) {
_quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isQuorumCheckDone = true;
}
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index c5270c5370d..62f601affd9 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -57,8 +57,8 @@ namespace mongo {
namespace repl {
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
constexpr auto kCountResponseDocumentCountFieldName = "n"_sd;
@@ -199,7 +199,7 @@ bool CollectionCloner::_isActive_inlock() const {
}
bool CollectionCloner::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -230,7 +230,7 @@ Status CollectionCloner::startup() noexcept {
}
void CollectionCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -263,12 +263,12 @@ void CollectionCloner::_cancelRemainingWork_inlock() {
}
CollectionCloner::Stats CollectionCloner::getStats() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stats;
}
void CollectionCloner::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() {
return (_queryState == QueryState::kNotStarted || _queryState == QueryState::kFinished) &&
!_isActive_inlock();
@@ -288,7 +288,7 @@ void CollectionCloner::setScheduleDbWorkFn_forTest(ScheduleDbWorkFn scheduleDbWo
}
void CollectionCloner::setCreateClientFn_forTest(const CreateClientFn& createClientFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_createClientFn = createClientFn;
}
@@ -474,7 +474,7 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
auto cancelRemainingWorkInLock = [this]() { _cancelRemainingWork_inlock(); };
auto finishCallbackFn = [this](const Status& status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_queryState = QueryState::kFinished;
_clientConnection.reset();
}
@@ -494,13 +494,13 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& callbackData,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!callbackData.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, callbackData.status);
return;
}
bool queryStateOK = false;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
queryStateOK = _queryState == QueryState::kNotStarted;
if (queryStateOK) {
_queryState = QueryState::kRunning;
@@ -525,12 +525,12 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
Status clientConnectionStatus = _clientConnection->connect(_source, StringData());
if (!clientConnectionStatus.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, clientConnectionStatus);
return;
}
if (!replAuthenticate(_clientConnection.get())) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(
lock,
{ErrorCodes::AuthenticationFailed,
@@ -552,7 +552,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
} catch (const DBException& e) {
auto queryStatus = e.toStatus().withContext(str::stream() << "Error querying collection '"
<< _sourceNss.ns());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (queryStatus.code() == ErrorCodes::OperationFailed ||
queryStatus.code() == ErrorCodes::CursorNotFound ||
queryStatus.code() == ErrorCodes::QueryPlanKilled) {
@@ -572,7 +572,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
}
}
waitForDbWorker();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, Status::OK());
}
@@ -580,7 +580,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
DBClientCursorBatchIterator& iter) {
_stats.receivedBatches++;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::CallbackCanceled,
"Collection cloning cancelled.",
_queryState != QueryState::kCanceling);
@@ -621,7 +621,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
}
void CollectionCloner::_verifyCollectionWasDropped(
- const stdx::unique_lock<stdx::mutex>& lk,
+ const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// If we already have a _verifyCollectionDroppedScheduler, just return; the existing
@@ -684,7 +684,7 @@ void CollectionCloner::_insertDocumentsCallback(
const executor::TaskExecutor::CallbackArgs& cbd,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!cbd.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, cbd.status);
return;
}
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index 817925ed765..ba8139dd98f 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -48,8 +48,8 @@
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/progress_meter.h"
@@ -239,7 +239,7 @@ private:
* Verifies that an error from the query was the result of a collection drop. If
* so, cloning is stopped with no error. Otherwise it is stopped with the given error.
*/
- void _verifyCollectionWasDropped(const stdx::unique_lock<stdx::mutex>& lk,
+ void _verifyCollectionWasDropped(const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard);
@@ -259,7 +259,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CollectionCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R) Not owned by us.
ThreadPool* _dbWorkThreadPool; // (R) Not owned by us.
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index aabb5619894..949d419c746 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -65,7 +65,7 @@ public:
: MockDBClientConnection(remote), _net(net) {}
virtual ~FailableMockDBClientConnection() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_paused = false;
_cond.notify_all();
_cond.wait(lk, [this] { return !_resuming; });
@@ -86,13 +86,13 @@ public:
int batchSize) override {
ON_BLOCK_EXIT([this]() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_queryCount++;
}
_cond.notify_all();
});
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waiting = _paused;
_cond.notify_all();
while (_paused) {
@@ -118,14 +118,14 @@ public:
void pause() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_paused = true;
}
_cond.notify_all();
}
void resume() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_resuming = false;
@@ -135,13 +135,13 @@ public:
// Waits for the next query after pause() is called to start.
void waitForPausedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cond.wait(lk, [this] { return _waiting; });
}
// Resumes, then waits for the next query to run after resume() is called to complete.
void resumeAndWaitForResumedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_cond.notify_all(); // This is to wake up the paused thread.
@@ -152,7 +152,7 @@ public:
private:
executor::NetworkInterfaceMock* _net;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FailableMockDBClientConnection::_mutex");
stdx::condition_variable _cond;
bool _paused = false;
bool _waiting = false;
@@ -162,7 +162,7 @@ private:
Status _failureForConnect = Status::OK();
Status _failureForQuery = Status::OK();
- void _resume(stdx::unique_lock<stdx::mutex>* lk) {
+ void _resume(stdx::unique_lock<Latch>* lk) {
invariant(lk->owns_lock());
_paused = false;
_resumedQueryCount = _queryCount;
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 63a00583854..55eab0a0aa4 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -57,8 +57,8 @@ MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeListCollections);
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
const char* kNameFieldName = "name";
@@ -208,7 +208,7 @@ Status DatabaseCloner::startup() noexcept {
}
void DatabaseCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -256,7 +256,7 @@ void DatabaseCloner::setStartCollectionClonerFn(
}
DatabaseCloner::State DatabaseCloner::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index e2790956089..666f23610fb 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -41,8 +41,8 @@
#include "mongo/db/repl/base_cloner.h"
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -201,7 +201,7 @@ private:
/**
* Calls the above method after unlocking.
*/
- void _finishCallback_inlock(stdx::unique_lock<stdx::mutex>& lk, const Status& status);
+ void _finishCallback_inlock(stdx::unique_lock<Latch>& lk, const Status& status);
//
// All member variables are labeled with one of the following codes indicating the
@@ -212,7 +212,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabaseCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R)
ThreadPool* _dbWorkThreadPool; // (R)
diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp
index 554acab9651..f0d746cec52 100644
--- a/src/mongo/db/repl/databases_cloner.cpp
+++ b/src/mongo/db/repl/databases_cloner.cpp
@@ -56,8 +56,8 @@ namespace {
using Request = executor::RemoteCommandRequest;
using Response = executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
} // namespace
diff --git a/src/mongo/db/repl/databases_cloner.h b/src/mongo/db/repl/databases_cloner.h
index 890c6c2a7e1..e5ea692d8bc 100644
--- a/src/mongo/db/repl/databases_cloner.h
+++ b/src/mongo/db/repl/databases_cloner.h
@@ -42,8 +42,8 @@
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/database_cloner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -138,10 +138,10 @@ private:
void _setStatus_inlock(Status s);
/** Will fail the cloner, call the completion function, and become inactive. */
- void _fail_inlock(stdx::unique_lock<stdx::mutex>* lk, Status s);
+ void _fail_inlock(stdx::unique_lock<Latch>* lk, Status s);
/** Will call the completion function, and become inactive. */
- void _succeed_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _succeed_inlock(stdx::unique_lock<Latch>* lk);
/** Called each time a database clone is finished */
void _onEachDBCloneFinish(const Status& status, const std::string& name);
@@ -175,7 +175,7 @@ private:
// (M) Reads and writes guarded by _mutex
// (S) Self-synchronizing; access in any way from any context.
//
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabasesCloner::_mutex"); // (S)
Status _status{ErrorCodes::NotYetInitialized, ""}; // (M) If it is not OK, we stop everything.
executor::TaskExecutor* _exec; // (R) executor to schedule things with
ThreadPool* _dbWorkThreadPool; // (R) db worker thread pool for collection cloning.
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index ba75f96c6e4..fa386d4b2c7 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -43,7 +43,7 @@
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/task_executor_proxy.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/thread_name.h"
@@ -57,9 +57,9 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using mutex = stdx::mutex;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using mutex = Mutex;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
using namespace unittest;
using Responses = std::vector<std::pair<std::string, BSONObj>>;
@@ -288,7 +288,7 @@ protected:
void runCompleteClone(Responses responses) {
Status result{Status::OK()};
bool done = false;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cvDone;
DatabasesCloner cloner{&getStorage(),
&getExecutor(),
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index cb4c85c5cd4..4c380e4c8c5 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -80,7 +80,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
const OpTime& dropOpTime,
const NamespaceString& dropPendingNamespace) {
invariant(dropPendingNamespace.isDropPendingNamespace());
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(dropOpTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -97,7 +97,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
_dropPendingNamespaces.insert(std::make_pair(dropOpTime, dropPendingNamespace));
if (opCtx->lockState()->inAWriteUnitOfWork()) {
opCtx->recoveryUnit()->onRollback([this, dropPendingNamespace, dropOpTime]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(dropOpTime);
const auto& lowerBound = equalRange.first;
@@ -114,7 +114,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
}
boost::optional<OpTime> DropPendingCollectionReaper::getEarliestDropOpTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
if (it == _dropPendingNamespaces.cend()) {
return boost::none;
@@ -129,7 +129,7 @@ bool DropPendingCollectionReaper::rollBackDropPendingCollection(
const auto pendingNss = collectionNamespace.makeDropPendingNamespace(opTime);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(opTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -154,7 +154,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
const OpTime& opTime) {
DropPendingNamespaces toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingNamespaces.cbegin();
it != _dropPendingNamespaces.cend() && it->first <= opTime;
++it) {
@@ -194,7 +194,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropOpTime()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
while (it != _dropPendingNamespaces.cend() && it->first <= opTime) {
if (toDrop.find(it->first) != toDrop.cend()) {
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.h b/src/mongo/db/repl/drop_pending_collection_reaper.h
index 48795159066..18c359ada2a 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.h
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.h
@@ -36,7 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -101,7 +101,7 @@ public:
void dropCollectionsOlderThan(OperationContext* opCtx, const OpTime& opTime);
void clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingNamespaces.clear();
}
@@ -127,7 +127,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DropPendingCollectionReaper::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index d3f03d1276c..8f19951e265 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -116,8 +116,8 @@ using Event = executor::TaskExecutor::EventHandle;
using Handle = executor::TaskExecutor::CallbackHandle;
using Operations = MultiApplier::Operations;
using QueryResponseStatus = StatusWith<Fetcher::QueryResponse>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
// Used to reset the oldest timestamp during initial sync to a non-null timestamp.
const Timestamp kTimestampOne(0, 1);
@@ -197,7 +197,7 @@ InitialSyncer::~InitialSyncer() {
}
bool InitialSyncer::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -210,7 +210,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
invariant(opCtx);
invariant(initialSyncMaxAttempts >= 1U);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -243,7 +243,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
}
Status InitialSyncer::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -281,22 +281,22 @@ void InitialSyncer::_cancelRemainingWork_inlock() {
}
void InitialSyncer::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
InitialSyncer::State InitialSyncer::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
Date_t InitialSyncer::getWallClockTime_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastApplied.wallTime;
}
bool InitialSyncer::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isShuttingDown_inlock();
}
@@ -468,7 +468,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// Lock guard must be declared after completion guard because completion guard destructor
// has to run outside lock.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_oplogApplier = {};
@@ -522,7 +522,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
std::uint32_t chooseSyncSourceAttempt,
std::uint32_t chooseSyncSourceMaxAttempts,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Cancellation should be treated the same as other errors. In this case, the most likely cause
// of a failed _chooseSyncSourceCallback() task is a cancellation triggered by
// InitialSyncer::shutdown() or the task executor shutting down.
@@ -678,7 +678,7 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock(
void InitialSyncer::_rollbackCheckerResetCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting base rollback ID");
if (!status.isOK()) {
@@ -696,7 +696,7 @@ void InitialSyncer::_rollbackCheckerResetCallback(
void InitialSyncer::_getBeginFetchingOpTimeCallback(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(),
"error while getting oldest active transaction timestamp for begin fetching timestamp");
@@ -746,7 +746,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting last oplog entry for begin timestamp");
if (!status.isOK()) {
@@ -803,7 +803,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
const OpTime& lastOpTime,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting the remote feature compatibility version");
if (!status.isOK()) {
@@ -983,7 +983,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Finished fetching oplog during initial sync: " << redact(oplogFetcherFinishStatus)
<< ". Last fetched optime: " << _lastFetched.toString();
@@ -1030,7 +1030,7 @@ void InitialSyncer::_databasesClonerCallback(const Status& databaseClonerFinishS
}
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(databaseClonerFinishStatus,
"error cloning databases");
if (!status.isOK()) {
@@ -1055,7 +1055,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
OpTimeAndWallTime resultOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error fetching last oplog entry for stop timestamp");
if (!status.isOK()) {
@@ -1102,7 +1102,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
TimestampedBSONObj{oplogSeedDoc, resultOpTimeAndWallTime.opTime.getTimestamp()},
resultOpTimeAndWallTime.opTime.getTerm());
if (!status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
return;
}
@@ -1111,7 +1111,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
opCtx.get(), resultOpTimeAndWallTime.opTime.getTimestamp(), orderedCommit);
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastApplied = resultOpTimeAndWallTime;
log() << "No need to apply operations. (currently at "
<< _initialSyncState->stopTimestamp.toBSON() << ")";
@@ -1123,7 +1123,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
void InitialSyncer::_getNextApplierBatchCallback(
const executor::TaskExecutor::CallbackArgs& callbackArgs,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(callbackArgs, "error getting next applier batch");
if (!status.isOK()) {
@@ -1223,7 +1223,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
OpTimeAndWallTime lastApplied,
std::uint32_t numApplied,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(multiApplierStatus, "error applying batch");
@@ -1260,7 +1260,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting last rollback ID");
if (!status.isOK()) {
@@ -1311,7 +1311,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
log() << "Initial sync attempt finishing up.";
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Initial Sync Attempt Statistics: " << redact(_getInitialSyncProgress_inlock());
auto runTime = _initialSyncState ? _initialSyncState->timer.millis() : 0;
@@ -1384,7 +1384,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before we transition the state to Complete.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto opCtx = makeOpCtx();
_tearDown_inlock(opCtx.get(), lastApplied);
@@ -1414,7 +1414,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before InitialSyncer::join() returns.
onCompletion = {};
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_stateCondition.notify_all();
@@ -1450,8 +1450,7 @@ Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn
}
void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
@@ -1506,8 +1505,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
}
void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index 6ad23526d85..c83f4c134df 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -52,8 +52,8 @@
#include "mongo/db/repl/rollback_checker.h"
#include "mongo/db/repl/sync_source_selector.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/net/hostandport.h"
@@ -510,8 +510,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Schedules a rollback checker to get the rollback ID after data cloning or applying. This
@@ -521,8 +520,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Checks the given status (or embedded status inside the callback args) and current data
@@ -582,7 +580,7 @@ private:
// (MX) Must hold _mutex and be in a callback in _exec to write; must either hold
// _mutex or be in a callback in _exec to read.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InitialSyncer::_mutex"); // (S)
const InitialSyncerOptions _opts; // (R)
std::unique_ptr<DataReplicatorExternalState> _dataReplicatorExternalState; // (R)
executor::TaskExecutor* _exec; // (R)
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 9abdc1f9b7d..4a6a9176ac7 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -60,7 +60,7 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
@@ -104,9 +104,9 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using unittest::log;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
struct CollectionCloneInfo {
std::shared_ptr<CollectionMockStats> stats = std::make_shared<CollectionMockStats>();
@@ -244,7 +244,9 @@ protected:
int documentsInsertedCount = 0;
};
- stdx::mutex _storageInterfaceWorkDoneMutex; // protects _storageInterfaceWorkDone.
+ // protects _storageInterfaceWorkDone.
+ Mutex _storageInterfaceWorkDoneMutex =
+ MONGO_MAKE_LATCH("InitialSyncerTest::_storageInterfaceWorkDoneMutex");
StorageInterfaceResults _storageInterfaceWorkDone;
void setUp() override {
diff --git a/src/mongo/db/repl/local_oplog_info.cpp b/src/mongo/db/repl/local_oplog_info.cpp
index 069c199def1..b17da6d88c5 100644
--- a/src/mongo/db/repl/local_oplog_info.cpp
+++ b/src/mongo/db/repl/local_oplog_info.cpp
@@ -95,7 +95,7 @@ void LocalOplogInfo::resetCollection() {
}
void LocalOplogInfo::setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
LogicalClock::get(service)->setClusterTimeFromTrustedSource(LogicalTime(newTime));
}
@@ -120,7 +120,7 @@ std::vector<OplogSlot> LocalOplogInfo::getNextOpTimes(OperationContext* opCtx, s
// Allow the storage engine to start the transaction outside the critical section.
opCtx->recoveryUnit()->preallocateSnapshot();
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
ts = LogicalClock::get(opCtx)->reserveTicks(count).asTimestamp();
const bool orderedCommit = false;
diff --git a/src/mongo/db/repl/local_oplog_info.h b/src/mongo/db/repl/local_oplog_info.h
index 67ab7e0560d..96cdb259f36 100644
--- a/src/mongo/db/repl/local_oplog_info.h
+++ b/src/mongo/db/repl/local_oplog_info.h
@@ -92,7 +92,7 @@ private:
// Synchronizes the section where a new Timestamp is generated and when it is registered in the
// storage engine.
- mutable stdx::mutex _newOpMutex;
+ mutable Mutex _newOpMutex = MONGO_MAKE_LATCH("LocaloplogInfo::_newOpMutex");
};
} // namespace repl
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index 99f09fa2484..02c993a0e67 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -60,7 +60,7 @@ MultiApplier::~MultiApplier() {
}
bool MultiApplier::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -69,7 +69,7 @@ bool MultiApplier::_isActive_inlock() const {
}
Status MultiApplier::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
@@ -96,7 +96,7 @@ Status MultiApplier::startup() noexcept {
}
void MultiApplier::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -117,12 +117,12 @@ void MultiApplier::shutdown() {
}
void MultiApplier::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
MultiApplier::State MultiApplier::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
@@ -153,14 +153,14 @@ void MultiApplier::_finishCallback(const Status& result) {
// destroyed outside the lock.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_onCompletion);
std::swap(_onCompletion, onCompletion);
}
onCompletion(result);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(State::kComplete != _state);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/multiapplier.h b/src/mongo/db/repl/multiapplier.h
index 119cd58bc89..406888746b1 100644
--- a/src/mongo/db/repl/multiapplier.h
+++ b/src/mongo/db/repl/multiapplier.h
@@ -43,8 +43,8 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -149,7 +149,7 @@ private:
CallbackFn _onCompletion;
// Protects member data of this MultiApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiApplier::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index d1bc540ab2c..29ee8017bfb 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -71,7 +71,7 @@ public:
: _thread([this, noopWrite, waitTime] { run(waitTime, std::move(noopWrite)); }) {}
~PeriodicNoopRunner() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
lk.unlock();
@@ -85,7 +85,7 @@ private:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_cv.wait_for(lk, waitTime.toSystemDuration(), [&] { return _inShutdown; });
@@ -104,7 +104,7 @@ private:
/**
* Mutex for the CV
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicNoopRunner::_mutex");
/**
* CV to wait for.
@@ -127,7 +127,7 @@ NoopWriter::~NoopWriter() {
}
Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastKnownOpTime = lastKnownOpTime;
invariant(!_noopRunner);
@@ -140,7 +140,7 @@ Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
}
void NoopWriter::stopWritingPeriodicNoops() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_noopRunner.reset();
}
diff --git a/src/mongo/db/repl/noop_writer.h b/src/mongo/db/repl/noop_writer.h
index c9cc5712386..999bc889a1d 100644
--- a/src/mongo/db/repl/noop_writer.h
+++ b/src/mongo/db/repl/noop_writer.h
@@ -32,7 +32,7 @@
#include <functional>
#include "mongo/db/repl/optime.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -75,7 +75,7 @@ private:
* Protects member data of this class during start and stop. There is no need to synchronize
* access once its running because its run by a one thread only.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("NoopWriter::_mutex");
std::unique_ptr<PeriodicNoopRunner> _noopRunner;
};
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index 1e1d020d6ad..5242a9c917b 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -75,12 +75,12 @@ Future<void> OplogApplier::startup() {
void OplogApplier::shutdown() {
_shutdown();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool OplogApplier::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/oplog_applier.h b/src/mongo/db/repl/oplog_applier.h
index a75f18f96f3..f6049c71943 100644
--- a/src/mongo/db/repl/oplog_applier.h
+++ b/src/mongo/db/repl/oplog_applier.h
@@ -40,7 +40,7 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -219,7 +219,7 @@ private:
Observer* const _observer;
// Protects member data of OplogApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogApplier::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 642a1db0078..69e25926631 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -106,7 +106,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If we are starting from an existing collection, we must populate the in memory state of the
// buffer.
auto sizeResult = _storageInterface->getCollectionSize(opCtx, _nss);
@@ -148,7 +148,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
void OplogBufferCollection::shutdown(OperationContext* opCtx) {
if (_options.dropCollectionAtShutdown) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_size = 0;
_count = 0;
@@ -167,7 +167,7 @@ void OplogBufferCollection::push(OperationContext* opCtx,
}
size_t numDocs = std::distance(begin, end);
std::vector<InsertStatement> docsToInsert(numDocs);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto ts = _lastPushedTimestamp;
auto sentinelCount = _sentinelCount;
std::transform(begin, end, docsToInsert.begin(), [&sentinelCount, &ts](const Value& value) {
@@ -193,7 +193,7 @@ void OplogBufferCollection::push(OperationContext* opCtx,
void OplogBufferCollection::waitForSpace(OperationContext* opCtx, std::size_t size) {}
bool OplogBufferCollection::isEmpty() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count == 0;
}
@@ -202,17 +202,17 @@ std::size_t OplogBufferCollection::getMaxSize() const {
}
std::size_t OplogBufferCollection::getSize() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _size;
}
std::size_t OplogBufferCollection::getCount() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count;
}
void OplogBufferCollection::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_createCollection(opCtx);
_size = 0;
@@ -224,7 +224,7 @@ void OplogBufferCollection::clear(OperationContext* opCtx) {
}
bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -232,7 +232,7 @@ bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
}
bool OplogBufferCollection::waitForData(Seconds waitDuration) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_cvNoLongerEmpty.wait_for(
lk, waitDuration.toSystemDuration(), [&]() { return _count != 0; })) {
return false;
@@ -241,7 +241,7 @@ bool OplogBufferCollection::waitForData(Seconds waitDuration) {
}
bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -251,7 +251,7 @@ bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto lastDocumentPushed = _lastDocumentPushed_inlock(opCtx);
if (lastDocumentPushed) {
BSONObj entryObj = extractEmbeddedOplogDocument(*lastDocumentPushed);
@@ -356,23 +356,23 @@ void OplogBufferCollection::_dropCollection(OperationContext* opCtx) {
}
std::size_t OplogBufferCollection::getSentinelCount_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _sentinelCount;
}
Timestamp OplogBufferCollection::getLastPushedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPushedTimestamp;
}
Timestamp OplogBufferCollection::getLastPoppedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPoppedKey.isEmpty() ? Timestamp()
: _lastPoppedKey[""].Obj()[kTimestampFieldName].timestamp();
}
std::queue<BSONObj> OplogBufferCollection::getPeekCache_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _peekCache;
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h
index 112f7dd71a6..40356c834be 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.h
+++ b/src/mongo/db/repl/oplog_buffer_collection.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/queue.h"
namespace mongo {
@@ -177,7 +177,7 @@ private:
stdx::condition_variable _cvNoLongerEmpty;
// Protects member data below and synchronizes it with the underlying collection.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogBufferCollection::_mutex");
// Number of documents in buffer.
std::size_t _count = 0;
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp
index 45b6803abcf..3e2705511bb 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.cpp
+++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp
@@ -51,8 +51,8 @@ void OplogBufferProxy::startup(OperationContext* opCtx) {
void OplogBufferProxy::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
}
@@ -65,7 +65,7 @@ void OplogBufferProxy::push(OperationContext* opCtx,
if (begin == end) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
_lastPushed = *(end - 1);
_target->push(opCtx, begin, end);
}
@@ -91,16 +91,16 @@ std::size_t OplogBufferProxy::getCount() const {
}
void OplogBufferProxy::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
_target->clear(opCtx);
}
bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
if (!_target->tryPop(opCtx, value)) {
return false;
}
@@ -114,7 +114,7 @@ bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
bool OplogBufferProxy::waitForData(Seconds waitDuration) {
{
- stdx::unique_lock<stdx::mutex> lk(_lastPushedMutex);
+ stdx::unique_lock<Latch> lk(_lastPushedMutex);
if (_lastPushed) {
return true;
}
@@ -123,7 +123,7 @@ bool OplogBufferProxy::waitForData(Seconds waitDuration) {
}
bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
if (_lastPeeked) {
*value = *_lastPeeked;
return true;
@@ -137,7 +137,7 @@ bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
if (!_lastPushed) {
return boost::none;
}
@@ -145,7 +145,7 @@ boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
}
boost::optional<OplogBuffer::Value> OplogBufferProxy::getLastPeeked_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
return _lastPeeked;
}
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h
index 3fdcec8a27b..5effffd815c 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.h
+++ b/src/mongo/db/repl/oplog_buffer_proxy.h
@@ -33,7 +33,7 @@
#include <memory>
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -80,10 +80,10 @@ private:
std::unique_ptr<OplogBuffer> _target;
// If both mutexes have to be acquired, acquire _lastPushedMutex first.
- mutable stdx::mutex _lastPushedMutex;
+ mutable Mutex _lastPushedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPushedMutex");
boost::optional<Value> _lastPushed;
- mutable stdx::mutex _lastPeekedMutex;
+ mutable Mutex _lastPeekedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPeekedMutex");
boost::optional<Value> _lastPeeked;
};
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index a39208720ce..d6e23d6c3ac 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -45,7 +45,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -165,7 +165,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
// Run 2 concurrent logOp() requests using the thread pool.
// Use a barrier with a thread count of 3 to ensure both logOp() tasks are complete before this
// test thread can proceed with shutting the thread pool down.
- stdx::mutex mtx;
+ auto mtx = MONGO_MAKE_LATCH();
unittest::Barrier barrier(3U);
const NamespaceString nss1("test1.coll");
const NamespaceString nss2("test2.coll");
@@ -200,7 +200,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
std::reverse(oplogEntries->begin(), oplogEntries->end());
// Look up namespaces and their respective optimes (returned by logOp()) in the map.
- stdx::lock_guard<stdx::mutex> lock(mtx);
+ stdx::lock_guard<Latch> lock(mtx);
ASSERT_EQUALS(2U, opTimeNssMap->size());
}
@@ -210,10 +210,10 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
* Returns optime of generated oplog entry.
*/
OpTime _logOpNoopWithMsg(OperationContext* opCtx,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
// logOp() must be called while holding lock because ephemeralForTest storage engine does not
// support concurrent updates to its internal state.
@@ -239,7 +239,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithoutDocLockingSupport) {
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -272,7 +272,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupport) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -304,7 +304,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -322,7 +322,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
// Revert the first logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the more recent optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto firstOpTimeAndNss = *(opTimeNssMap->cbegin());
if (opTime == firstOpTimeAndNss.first) {
ASSERT_EQUALS(nss, firstOpTimeAndNss.second)
@@ -351,7 +351,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -369,7 +369,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
// Revert the last logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the earlier optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto lastOpTimeAndNss = *(opTimeNssMap->crbegin());
if (opTime == lastOpTimeAndNss.first) {
ASSERT_EQUALS(nss, lastOpTimeAndNss.second)
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.cpp b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
index 61f46bf0bef..5c698190445 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
@@ -36,12 +36,12 @@ namespace repl {
void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
{
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = {};
_oplogTruncateAfterPoint = {};
_appliedThrough = {};
@@ -49,64 +49,64 @@ void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationCont
}
bool ReplicationConsistencyMarkersMock::getInitialSyncFlag(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
return _initialSyncFlag;
}
void ReplicationConsistencyMarkersMock::setInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = true;
}
void ReplicationConsistencyMarkersMock::clearInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
OpTime ReplicationConsistencyMarkersMock::getMinValid(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _minValid;
}
void ReplicationConsistencyMarkersMock::setMinValid(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = minValid;
}
void ReplicationConsistencyMarkersMock::setMinValidToAtLeast(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = std::max(_minValid, minValid);
}
void ReplicationConsistencyMarkersMock::setOplogTruncateAfterPoint(OperationContext* opCtx,
const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_oplogTruncateAfterPoint = timestamp;
}
Timestamp ReplicationConsistencyMarkersMock::getOplogTruncateAfterPoint(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _oplogTruncateAfterPoint;
}
void ReplicationConsistencyMarkersMock::setAppliedThrough(OperationContext* opCtx,
const OpTime& optime,
bool setTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = optime;
}
void ReplicationConsistencyMarkersMock::clearAppliedThrough(OperationContext* opCtx,
const Timestamp& writeTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = {};
}
OpTime ReplicationConsistencyMarkersMock::getAppliedThrough(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _appliedThrough;
}
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.h b/src/mongo/db/repl/replication_consistency_markers_mock.h
index 3215264110f..3fe3c2670f5 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.h
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.h
@@ -31,7 +31,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -73,10 +73,12 @@ public:
Status createInternalCollections(OperationContext* opCtx) override;
private:
- mutable stdx::mutex _initialSyncFlagMutex;
+ mutable Mutex _initialSyncFlagMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_initialSyncFlagMutex");
bool _initialSyncFlag = false;
- mutable stdx::mutex _minValidBoundariesMutex;
+ mutable Mutex _minValidBoundariesMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_minValidBoundariesMutex");
OpTime _appliedThrough;
OpTime _minValid;
Timestamp _oplogTruncateAfterPoint;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 5cf10287b04..d1975168f77 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -198,7 +198,7 @@ bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationCont
void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
OperationContext* opCtx, ReplicationCoordinator* replCoord) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
// We've shut down the external state, don't start again.
if (_inShutdown)
@@ -248,12 +248,12 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
}
void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_stopDataReplication_inlock(opCtx, lk);
}
void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
- OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock) {
+ OperationContext* opCtx, stdx::unique_lock<Latch>& lock) {
// Make sue no other _stopDataReplication calls are in progress.
_dataReplicationStopped.wait(lock, [this]() { return !_stoppingDataReplication; });
_stoppingDataReplication = true;
@@ -308,7 +308,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& settings) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_startedThreads) {
return;
}
@@ -331,7 +331,7 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s
}
void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_inShutdown = true;
if (!_startedThreads) {
return;
@@ -772,28 +772,28 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
}
void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->clearSyncTarget();
}
}
void ReplicationCoordinatorExternalStateImpl::stopProducer() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->stop(false);
}
}
void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->startProducerIfStopped();
}
}
bool ReplicationCoordinatorExternalStateImpl::tooStale() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
return _bgSync->tooStale();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 645ac39e28b..1469635a97f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -39,7 +39,7 @@
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/snapshot_manager.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -121,7 +121,7 @@ private:
/**
* Stops data replication and returns with 'lock' locked.
*/
- void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock);
+ void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<Latch>& lock);
/**
* Called when the instance transitions to primary in order to notify a potentially sharded host
@@ -142,7 +142,7 @@ private:
ServiceContext* _service;
// Guards starting threads and setting _startedThreads
- stdx::mutex _threadMutex;
+ Mutex _threadMutex = MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_threadMutex");
// Flag for guarding against concurrent data replication stopping.
bool _stoppingDataReplication = false;
@@ -188,7 +188,8 @@ private:
Future<void> _oplogApplierShutdownFuture;
// Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- stdx::mutex _nextThreadIdMutex;
+ Mutex _nextThreadIdMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_nextThreadIdMutex");
// Number used to uniquely name threads.
long long _nextThreadId = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index c6167b82e1d..75bdac91439 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -146,7 +146,7 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteD
Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
OperationContext* opCtx, const LastVote& lastVote) {
{
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
_shouldHangLastVoteCondVar.wait(lock);
}
@@ -211,7 +211,7 @@ void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentStatu
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
_storeLocalLastVoteDocumentShouldHang = hang;
if (!hang) {
_shouldHangLastVoteCondVar.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 0ce548743b0..5cebab1e820 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -37,8 +37,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/last_vote.h"
#include "mongo/db/repl/replication_coordinator_external_state.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -197,7 +197,8 @@ private:
Status _storeLocalConfigDocumentStatus;
Status _storeLocalLastVoteDocumentStatus;
// mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- stdx::mutex _shouldHangLastVoteMutex;
+ Mutex _shouldHangLastVoteMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateMock::_shouldHangLastVoteMutex");
stdx::condition_variable _shouldHangLastVoteCondVar;
bool _storeLocalLastVoteDocumentShouldHang;
bool _connectionsClosed;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index ba1739049fb..8587b3a56a0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -82,9 +82,9 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/executor/connection_pool_stats.h"
#include "mongo/executor/network_interface.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/oplog_query_metadata.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -154,7 +154,7 @@ private:
const bool _initialState;
};
-void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const std::function<void()>& fn) {
+void lockAndCall(stdx::unique_lock<Latch>* lk, const std::function<void()>& fn) {
if (!lk->owns_lock()) {
lk->lock();
}
@@ -233,7 +233,7 @@ public:
* _list is guarded by ReplicationCoordinatorImpl::_mutex, thus it is illegal to construct one
* of these without holding _mutex
*/
- WaiterGuard(const stdx::unique_lock<stdx::mutex>& lock, WaiterList* list, Waiter* waiter)
+ WaiterGuard(const stdx::unique_lock<Latch>& lock, WaiterList* list, Waiter* waiter)
: _lock(lock), _list(list), _waiter(waiter) {
invariant(_lock.owns_lock());
list->add_inlock(_waiter);
@@ -245,7 +245,7 @@ public:
}
private:
- const stdx::unique_lock<stdx::mutex>& _lock;
+ const stdx::unique_lock<Latch>& _lock;
WaiterList* _list;
Waiter* _waiter;
};
@@ -374,7 +374,7 @@ void ReplicationCoordinatorImpl::waitForStartUpComplete_forTest() {
void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
CallbackHandle handle;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
}
@@ -386,12 +386,12 @@ void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
}
ReplSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _rsConfig;
}
Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_handleElectionTimeoutCbh.isValid()) {
return Date_t();
}
@@ -399,12 +399,12 @@ Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
}
Milliseconds ReplicationCoordinatorImpl::getRandomizedElectionOffset_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getRandomizedElectionOffset_inlock();
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_priorityTakeoverCbh.isValid()) {
return boost::none;
}
@@ -412,7 +412,7 @@ boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest(
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getCatchupTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_catchupTakeoverCbh.isValid()) {
return boost::none;
}
@@ -425,12 +425,12 @@ executor::TaskExecutor::CallbackHandle ReplicationCoordinatorImpl::getCatchupTak
}
OpTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTimeAndWallTime_inlock();
}
@@ -481,7 +481,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
log() << "Did not find local initialized voted for document at startup.";
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->loadLastVote(lastVote.getValue());
}
@@ -542,7 +542,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
handle = CallbackHandle{};
}
fassert(40446, handle);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_finishLoadLocalConfigCbh = std::move(handle.getValue());
return false;
@@ -644,7 +644,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), lastOpTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_rsConfigState == kConfigStartingUp);
const PostMemberStateUpdateAction action =
_setCurrentRSConfig(lock, opCtx.get(), localConfig, myIndex.getValue());
@@ -661,7 +661,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Step down is impossible, so we don't need to wait for the returned event.
_updateTerm_inlock(term);
}
@@ -677,7 +677,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* opCtx) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_initialSyncer.swap(initialSyncerCopy);
}
if (initialSyncerCopy) {
@@ -719,7 +719,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
auto onCompletion = [this, startCompleted](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (opTimeStatus == ErrorCodes::CallbackCanceled) {
log() << "Initial Sync has been cancelled: " << opTimeStatus.getStatus();
return;
@@ -760,11 +760,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
try {
{
// Must take the lock to set _initialSyncer, but not call it.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_inShutdown) {
- log() << "Initial Sync not starting because replication is shutting down.";
- return;
- }
+ stdx::lock_guard<Latch> lock(_mutex);
initialSyncerCopy = std::make_shared<InitialSyncer>(
createInitialSyncerOptions(this, _externalState.get()),
std::make_unique<DataReplicatorExternalStateInitialSync>(this,
@@ -817,7 +813,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
storageGlobalParams.readOnly = true;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
return;
}
@@ -828,7 +824,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
_storage->initializeStorageControlsForReplication(opCtx->getServiceContext());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
fassert(18822, !_inShutdown);
_setConfigState_inlock(kConfigStartingUp);
_topCoord->setStorageEngineSupportsReadCommitted(
@@ -844,7 +840,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigUninitialized);
}
@@ -870,7 +866,7 @@ void ReplicationCoordinatorImpl::shutdown(OperationContext* opCtx) {
// Used to shut down outside of the lock.
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
fassert(28533, !_inShutdown);
_inShutdown = true;
if (_rsConfigState == kConfigPreStart) {
@@ -918,12 +914,12 @@ ReplicationCoordinator::Mode ReplicationCoordinatorImpl::getReplicationMode() co
}
MemberState ReplicationCoordinatorImpl::getMemberState() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getMemberState_inlock();
}
std::vector<MemberData> ReplicationCoordinatorImpl::getMemberData() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMemberData();
}
@@ -937,7 +933,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -949,7 +945,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_rsConfig.isInitialized());
if (_selfIndex == -1) {
// We aren't currently in the set. Return 0 seconds so we can clear out the applier's
@@ -960,7 +956,7 @@ Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
}
void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->clearSyncSourceBlacklist();
}
@@ -977,7 +973,7 @@ Status ReplicationCoordinatorImpl::setFollowerMode(const MemberState& newState)
Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
const MemberState& newState) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (newState == _topCoord->getMemberState()) {
return Status::OK();
}
@@ -1008,7 +1004,7 @@ Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
}
ReplicationCoordinator::ApplierState ReplicationCoordinatorImpl::getApplierState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _applierState;
}
@@ -1040,7 +1036,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// When we go to drop all temp collections, we must replicate the drops.
invariant(opCtx->writesAreReplicated());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_applierState != ApplierState::Draining) {
return;
}
@@ -1101,7 +1097,7 @@ Status ReplicationCoordinatorImpl::waitForDrainFinish(Milliseconds timeout) {
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this]() { return _applierState != ApplierState::Draining; };
if (!_drainFinishedCond.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -1116,7 +1112,7 @@ void ReplicationCoordinatorImpl::signalUpstreamUpdater() {
}
void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_topCoord->setMyHeartbeatMessage(_replExecutor->now(), msg);
}
@@ -1127,7 +1123,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
const auto opTime = opTimeAndWallTime.opTime;
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto myLastAppliedOpTime = _getMyLastAppliedOpTime_inlock();
if (opTime > myLastAppliedOpTime) {
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, consistency);
@@ -1153,7 +1149,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opTimeAndWallTime.opTime > _getMyLastDurableOpTime_inlock()) {
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
@@ -1167,7 +1163,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// The optime passed to this function is required to represent a consistent database state.
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, DataConsistency::Consistent);
_reportUpstream_inlock(std::move(lock));
@@ -1175,13 +1171,13 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
}
void ReplicationCoordinatorImpl::resetMyLastOpTimes() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_resetMyLastOpTimes(lock);
_reportUpstream_inlock(std::move(lock));
}
@@ -1196,7 +1192,7 @@ void ReplicationCoordinatorImpl::_resetMyLastOpTimes(WithLock lk) {
_stableOpTimeCandidates.clear();
}
-void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock) {
+void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
if (getReplicationMode() != modeReplSet) {
@@ -1283,22 +1279,22 @@ void ReplicationCoordinatorImpl::_setMyLastDurableOpTimeAndWallTime(
}
OpTime ReplicationCoordinatorImpl::getMyLastAppliedOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastAppliedOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTimeAndWallTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastDurableOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTimeAndWallTime_inlock();
}
OpTime ReplicationCoordinatorImpl::getMyLastDurableOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTime_inlock();
}
@@ -1405,7 +1401,7 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (isMajorityCommittedRead && !_externalState->snapshotsEnabled()) {
return {ErrorCodes::CommandNotSupported,
@@ -1572,7 +1568,7 @@ Status ReplicationCoordinatorImpl::setLastDurableOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1591,7 +1587,7 @@ Status ReplicationCoordinatorImpl::setLastAppliedOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1691,7 +1687,7 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli
OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
Timer timer;
WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _awaitReplication_inlock(&lock, opCtx, opTime, fixedWriteConcern);
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
@@ -1714,7 +1710,7 @@ BSONObj ReplicationCoordinatorImpl::_getReplicationProgress(WithLock wl) const {
return progress.obj();
}
Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
- stdx::unique_lock<stdx::mutex>* lock,
+ stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
@@ -1834,7 +1830,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
void ReplicationCoordinatorImpl::waitForStepDownAttempt_forTest() {
auto isSteppingDown = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If true, we know that a stepdown is underway.
return (_topCoord->isSteppingDown());
};
@@ -1933,7 +1929,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn()
// X mode for the first time. This ensures that no writing operations will continue
// after the node's term change.
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stopKillingOps.wait_for(
lock, Milliseconds(10).toSystemDuration(), [this] { return _killSignaled; })) {
log() << "Stopped killing user operations";
@@ -1949,7 +1945,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_stopAndWaitForKi
return;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_killSignaled = true;
_stopKillingOps.notify_all();
}
@@ -2009,7 +2005,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
auto deadline = force ? stepDownUntil : waitUntil;
AutoGetRstlForStepUpStepDown arsd(this, opCtx, deadline);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
opCtx->checkForInterrupt();
@@ -2043,7 +2039,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
stepdownHangBeforePerformingPostMemberStateUpdateActions.shouldFail())) {
mongo::sleepsecs(1);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
}
@@ -2149,7 +2145,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
}
void ReplicationCoordinatorImpl::_performElectionHandoff() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto candidateIndex = _topCoord->chooseElectionHandoffCandidate();
if (candidateIndex < 0) {
@@ -2198,7 +2194,7 @@ bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
return _getMemberState_inlock().primary();
}
@@ -2227,7 +2223,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont
}
bool ReplicationCoordinatorImpl::canAcceptNonLocalWrites() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _readWriteAbility->canAcceptNonLocalWrites(lk);
}
@@ -2259,7 +2255,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opC
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_memberState.rollback()) {
return false;
}
@@ -2287,7 +2283,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext
// Oplog reads are not allowed during STARTUP state, but we make an exception for internal
// reads. Internal reads are required for cleaning up unfinished apply batches.
if (!isPrimaryOrSecondary && getReplicationMode() == modeReplSet && ns.isOplog()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if ((_memberState.startup() && client->isFromUserConnection()) || _memberState.startup2() ||
_memberState.rollback()) {
return Status{ErrorCodes::NotMasterOrSecondary,
@@ -2331,17 +2327,17 @@ bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* o
}
OID ReplicationCoordinatorImpl::getElectionId() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _electionId;
}
int ReplicationCoordinatorImpl::getMyId() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyId_inlock();
}
HostAndPort ReplicationCoordinatorImpl::getMyHostAndPort() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.getMemberAt(_selfIndex).getHostAndPort();
}
@@ -2358,7 +2354,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
f = [&finishedEvent, this]() { _replExecutor->signalEvent(finishedEvent); };
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_resetMyLastOpTimes(lk);
}
// unlock before calling _startDataReplication().
@@ -2370,7 +2366,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
}
StatusWith<BSONObj> ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareReplSetUpdatePositionCommand(
_getCurrentCommittedSnapshotOpTime_inlock());
}
@@ -2382,7 +2378,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
if (responseStyle == ReplSetGetStatusResponseStyle::kInitialSync) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
initialSyncerCopy = _initialSyncer;
}
@@ -2397,7 +2393,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
BSONObj electionCandidateMetrics =
ReplicationMetrics::get(getServiceContext()).getElectionCandidateMetricsBSON();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Status result(ErrorCodes::InternalError, "didn't set status in prepareStatusResponse");
_topCoord->prepareStatusResponse(
TopologyCoordinator::ReplSetStatusArgs{
@@ -2417,7 +2413,7 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
IsMasterResponse* response, const SplitHorizon::Parameters& horizonParams) {
invariant(getSettings().usingReplSets());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->fillIsMasterForReplSet(response, horizonParams);
OpTime lastOpTime = _getMyLastAppliedOpTime_inlock();
@@ -2440,17 +2436,17 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
}
void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->fillMemberData(result);
}
ReplSetConfig ReplicationCoordinatorImpl::getConfig() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig;
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
result->append("config", _rsConfig.toBSON());
}
@@ -2458,7 +2454,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
EventHandle evh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
evh = _processReplSetMetadata_inlock(replMetadata);
}
@@ -2468,7 +2464,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
}
void ReplicationCoordinatorImpl::cancelAndRescheduleElectionTimeout() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelAndRescheduleElectionTimeout_inlock();
}
@@ -2481,7 +2477,7 @@ EventHandle ReplicationCoordinatorImpl::_processReplSetMetadata_inlock(
}
bool ReplicationCoordinatorImpl::getMaintenanceMode() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMaintenanceCount() > 0;
}
@@ -2491,7 +2487,7 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
"can only set maintenance mode on replica set members");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_topCoord->getRole() == TopologyCoordinator::Role::kCandidate) {
return Status(ErrorCodes::NotSecondary, "currently running for election");
}
@@ -2530,7 +2526,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
auto doResync = false;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->prepareSyncFromResponse(target, resultObj, &result);
// If we are in the middle of an initial sync, do a resync.
doResync = result.isOK() && _initialSyncer && _initialSyncer->isActive();
@@ -2545,7 +2541,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
auto result = [=]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj);
}();
if (!result.isOK()) {
@@ -2569,7 +2565,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
log() << "replSetReconfig admin command received from client; new config: "
<< args.newConfigObj;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
@@ -2625,7 +2621,6 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
if (!status.isOK()) {
error() << "replSetReconfig got " << status << " while parsing " << newConfigObj;
return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
- ;
}
if (newConfig.getReplSetName() != _settings.ourSetName()) {
str::stream errmsg;
@@ -2674,7 +2669,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
executor::TaskExecutor::EventHandle electionFinishedEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
electionFinishedEvent = _cancelElectionIfNeeded_inlock();
}
@@ -2689,7 +2684,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
}
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (isForceReconfig && _shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -2748,7 +2743,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
log() << "replSetInitiate admin command received from client";
const auto replEnabled = _settings.usingReplSets();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!replEnabled) {
return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
}
@@ -2837,7 +2832,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
void ReplicationCoordinatorImpl::_finishReplSetInitiate(OperationContext* opCtx,
const ReplSetConfig& newConfig,
int myIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_rsConfigState == kConfigInitiating);
invariant(!_rsConfig.isInitialized());
auto action = _setCurrentRSConfig(lk, opCtx, newConfig, myIndex);
@@ -3065,7 +3060,7 @@ void ReplicationCoordinatorImpl::CatchupState::start_inlock() {
if (!cbData.status.isOK()) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(*mutex);
+ stdx::lock_guard<Latch> lk(*mutex);
// Check whether the callback has been cancelled while holding mutex.
if (cbData.myHandle.isCanceled()) {
return;
@@ -3177,7 +3172,7 @@ void ReplicationCoordinatorImpl::CatchupState::incrementNumCatchUpOps_inlock(int
}
Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->abort_inlock(reason);
return Status::OK();
@@ -3186,14 +3181,14 @@ Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusion
}
void ReplicationCoordinatorImpl::incrementNumCatchUpOpsIfCatchingUp(int numOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->incrementNumCatchUpOps_inlock(numOps);
}
}
void ReplicationCoordinatorImpl::signalDropPendingCollectionsRemovedFromStorage() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_wakeReadyWaiters(lock);
}
@@ -3310,7 +3305,7 @@ void ReplicationCoordinatorImpl::_wakeReadyWaiters(WithLock lk) {
Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePositionArgs& updates,
long long* configVersion) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
Status status = Status::OK();
bool somethingChanged = false;
for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
@@ -3332,7 +3327,7 @@ Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePosi
}
bool ReplicationCoordinatorImpl::buildsIndexes() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_selfIndex == -1) {
return true;
}
@@ -3342,12 +3337,12 @@ bool ReplicationCoordinatorImpl::buildsIndexes() {
std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op,
bool durablyWritten) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getHostsWrittenTo(op, durablyWritten);
}
std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
@@ -3366,7 +3361,7 @@ std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() co
Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
const WriteConcernOptions& writeConcern) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
@@ -3383,7 +3378,7 @@ Status ReplicationCoordinatorImpl::_checkIfWriteConcernCanBeSatisfied_inlock(
Status ReplicationCoordinatorImpl::checkIfCommitQuorumCanBeSatisfied(
const CommitQuorumOptions& commitQuorum) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
}
@@ -3416,7 +3411,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
// If the 'commitQuorum' cannot be satisfied with all the members of this replica set, we
// need to inform the caller to avoid hanging while waiting for satisfiability of the
// 'commitQuorum' with 'commitReadyMembers' due to replica set reconfigurations.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status status = _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
if (!status.isOK()) {
return status;
@@ -3427,7 +3422,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
@@ -3455,7 +3450,7 @@ bool ReplicationCoordinatorImpl::isReplEnabled() const {
}
HostAndPort ReplicationCoordinatorImpl::chooseNewSyncSource(const OpTime& lastOpTimeFetched) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
HostAndPort oldSyncSource = _topCoord->getSyncSourceAddress();
// Always allow chaining while in catchup and drain mode.
@@ -3480,12 +3475,12 @@ void ReplicationCoordinatorImpl::_unblacklistSyncSource(
if (cbData.status == ErrorCodes::CallbackCanceled)
return;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->unblacklistSyncSource(host, _replExecutor->now());
}
void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->blacklistSyncSource(host, until);
_scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
_unblacklistSyncSource(cbData, host);
@@ -3509,7 +3504,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opC
_externalState->setGlobalTimestamp(opCtx->getServiceContext(),
lastOpTimeAndWallTime.opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
bool isRollbackAllowed = true;
_setMyLastAppliedOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed, consistency);
_setMyLastDurableOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed);
@@ -3520,7 +3515,7 @@ bool ReplicationCoordinatorImpl::shouldChangeSyncSource(
const HostAndPort& currentSource,
const rpc::ReplSetMetadata& replMetadata,
boost::optional<rpc::OplogQueryMetadata> oqMetadata) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->shouldChangeSyncSource(
currentSource, replMetadata, oqMetadata, _replExecutor->now());
}
@@ -3615,7 +3610,7 @@ void ReplicationCoordinatorImpl::_cleanupStableOpTimeCandidates(
boost::optional<OpTimeAndWallTime>
ReplicationCoordinatorImpl::chooseStableOpTimeFromCandidates_forTest(
const std::set<OpTimeAndWallTime>& candidates, const OpTimeAndWallTime& maximumStableOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _chooseStableOpTimeFromCandidates(lk, candidates, maximumStableOpTime);
}
void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
@@ -3624,12 +3619,12 @@ void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
}
std::set<OpTimeAndWallTime> ReplicationCoordinatorImpl::getStableOpTimeCandidates_forTest() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stableOpTimeCandidates;
}
void ReplicationCoordinatorImpl::attemptToAdvanceStableTimestamp() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_setStableTimestampForStorage(lk);
}
@@ -3757,7 +3752,7 @@ void ReplicationCoordinatorImpl::finishRecoveryIfEligible(OperationContext* opCt
void ReplicationCoordinatorImpl::advanceCommitPoint(
const OpTimeAndWallTime& committedOpTimeAndWallTime, bool fromSyncSource) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_advanceCommitPoint(lk, committedOpTimeAndWallTime, fromSyncSource);
}
@@ -3779,12 +3774,12 @@ void ReplicationCoordinatorImpl::_advanceCommitPoint(
}
OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTime();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getLastCommittedOpTimeAndWallTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTimeAndWallTime();
}
@@ -3798,7 +3793,7 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
return termStatus;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// We should only enter terminal shutdown from global terminal exit. In that case, rather
// than voting in a term we don't plan to stay alive in, refuse to vote.
@@ -3839,7 +3834,7 @@ void ReplicationCoordinatorImpl::prepareReplMetadata(const BSONObj& metadataRequ
invariant(-1 != rbid);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasReplSetMetadata) {
_prepareReplSetMetadata_inlock(lastOpTimeFromClient, builder);
@@ -3874,7 +3869,7 @@ bool ReplicationCoordinatorImpl::getWriteConcernMajorityShouldJournal_inlock() c
Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
@@ -3882,7 +3877,7 @@ Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgs
}
Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto senderHost(args.getSenderHost());
const Date_t now = _replExecutor->now();
@@ -3915,7 +3910,7 @@ long long ReplicationCoordinatorImpl::getTerm() {
EventHandle ReplicationCoordinatorImpl::updateTerm_forTest(
long long term, TopologyCoordinator::UpdateTermResult* updateResult) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
EventHandle finishEvh;
finishEvh = _updateTerm_inlock(term, updateResult);
@@ -3934,7 +3929,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long
EventHandle finishEvh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
finishEvh = _updateTerm_inlock(term, &updateTermResult);
}
@@ -3983,7 +3978,7 @@ EventHandle ReplicationCoordinatorImpl::_updateTerm_inlock(
void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* opCtx,
const Timestamp& untilSnapshot) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
uassert(ErrorCodes::NotYetInitialized,
"Cannot use snapshots until replica set is finished initializing.",
@@ -3999,7 +3994,7 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {
}
void ReplicationCoordinatorImpl::createWMajorityWriteAvailabilityDateWaiter(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto opTimeCB = [this, opTime]() {
ReplicationMetrics::get(getServiceContext())
.setWMajorityWriteAvailabilityDate(_replExecutor->now());
@@ -4045,7 +4040,7 @@ bool ReplicationCoordinatorImpl::_updateCommittedSnapshot(
}
void ReplicationCoordinatorImpl::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropAllSnapshots_inlock();
}
@@ -4091,7 +4086,7 @@ EventHandle ReplicationCoordinatorImpl::_makeEvent() {
WriteConcernOptions ReplicationCoordinatorImpl::populateUnsetWriteConcernOptionsSyncMode(
WriteConcernOptions wc) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _populateUnsetWriteConcernOptionsSyncMode(lock, wc);
}
@@ -4127,7 +4122,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
EventHandle finishEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
finishEvent = _electionFinishedEvent;
}
if (finishEvent.isValid()) {
@@ -4137,7 +4132,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
// Step up is considered successful only if we are currently a primary and we are not in the
// process of stepping down. If we know we are going to step down, we should fail the
// replSetStepUp command so caller can retry if necessary.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_getMemberState_inlock().primary())
return Status(ErrorCodes::CommandFailed, "Election failed.");
else if (_topCoord->isSteppingDown())
@@ -4160,7 +4155,7 @@ int64_t ReplicationCoordinatorImpl::_nextRandomInt64_inlock(int64_t limit) {
}
bool ReplicationCoordinatorImpl::setContainsArbiter() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.containsArbiter();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8a19e09562a..6cfb21a22df 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -571,7 +571,7 @@ private:
// Tracks number of operations left running on step down.
size_t _userOpsRunning = 0;
// Protects killSignaled and stopKillingOps cond. variable.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AutoGetRstlForStepUpStepDown::_mutex");
// Signals thread about the change of killSignaled value.
stdx::condition_variable _stopKillingOps;
// Once this is set to true, the killOpThreadFn method will terminate.
@@ -802,7 +802,7 @@ private:
* Helper method for _awaitReplication that takes an already locked unique_lock, but leaves
* operation timing to the caller.
*/
- Status _awaitReplication_inlock(stdx::unique_lock<stdx::mutex>* lock,
+ Status _awaitReplication_inlock(stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern);
@@ -854,7 +854,7 @@ private:
*
* Lock will be released after this method finishes.
*/
- void _reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock);
+ void _reportUpstream_inlock(stdx::unique_lock<Latch> lock);
/**
* Helpers to set the last applied and durable OpTime.
@@ -1141,10 +1141,10 @@ private:
*
* Requires "lock" to own _mutex, and returns the same unique_lock.
*/
- stdx::unique_lock<stdx::mutex> _handleHeartbeatResponseAction_inlock(
+ stdx::unique_lock<Latch> _handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock);
+ stdx::unique_lock<Latch> lock);
/**
* Updates the last committed OpTime to be 'committedOpTime' if it is more recent than the
@@ -1366,7 +1366,7 @@ private:
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationCoordinatorImpl::_mutex"); // (S)
// Handles to actively queued heartbeats.
HeartbeatHandles _heartbeatHandles; // (M)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index f49ecec21be..8330b7b30ed 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_metrics.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -95,7 +95,7 @@ public:
void ReplicationCoordinatorImpl::_startElectSelfV1(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_startElectSelfV1_inlock(reason);
}
@@ -187,7 +187,7 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(
void ReplicationCoordinatorImpl::_processDryRunResult(
long long originalTerm, TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
invariant(_voteRequester);
@@ -269,7 +269,7 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection(
return _externalState->storeLocalLastVoteDocument(opCtx.get(), lastVote);
}();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
if (status == ErrorCodes::CallbackCanceled) {
return;
@@ -315,7 +315,7 @@ MONGO_FAIL_POINT_DEFINE(electionHangsBeforeUpdateMemberState);
void ReplicationCoordinatorImpl::_onVoteRequestComplete(
long long newTerm, TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionGuardV1 lossGuard(this);
invariant(_voteRequester);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 6097df4f6e0..4be3daac838 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -54,9 +54,9 @@
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -91,7 +91,7 @@ Milliseconds ReplicationCoordinatorImpl::_getRandomizedElectionOffset_inlock() {
void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::CallbackArgs cbData,
const HostAndPort& target,
int targetIndex) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_untrackHeartbeatHandle_inlock(cbData.myHandle);
if (cbData.status == ErrorCodes::CallbackCanceled) {
@@ -131,7 +131,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, int targetIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// remove handle from queued heartbeats
_untrackHeartbeatHandle_inlock(cbData.myHandle);
@@ -246,10 +246,10 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
_handleHeartbeatResponseAction_inlock(action, hbStatusResponse, std::move(lk));
}
-stdx::unique_lock<stdx::mutex> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
+stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock) {
+ stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
switch (action.getAction()) {
case HeartbeatResponseAction::NoAction:
@@ -376,7 +376,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
"Blocking until fail point is disabled.";
auto inShutdown = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _inShutdown;
};
@@ -391,7 +391,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
// have taken global lock in S mode and operations blocked on prepare conflict will be killed to
// avoid 3-way deadlock between read, prepared transaction and step down thread.
AutoGetRstlForStepUpStepDown arsd(this, opCtx.get());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// This node has already stepped down due to reconfig. So, signal anyone who is waiting on the
// step down event.
@@ -497,7 +497,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
_externalState.get(), newConfig, getGlobalServiceContext());
if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If this node absent in newConfig, and this node was not previously initialized,
// return to kConfigUninitialized immediately, rather than storing the config and
// transitioning into the RS_REMOVED state. See SERVER-15740.
@@ -523,7 +523,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
auto status = _externalState->storeLocalConfigDocument(opCtx.get(), newConfig.toBSON());
bool isFirstConfig;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
isFirstConfig = !_rsConfig.isInitialized();
if (!status.isOK()) {
error() << "Ignoring new configuration in heartbeat response because we failed to"
@@ -594,7 +594,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// we have already set our ReplicationCoordinatorImpl::_rsConfigState state to
// "kConfigReconfiguring" which prevents new elections from happening.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
LOG_FOR_HEARTBEATS(0)
<< "Waiting for election to complete before finishing reconfig to version "
@@ -613,7 +613,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
auto opCtx = cc().makeOperationContext();
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -740,7 +740,7 @@ void ReplicationCoordinatorImpl::_startHeartbeats_inlock() {
void ReplicationCoordinatorImpl::_handleLivenessTimeout(
const executor::TaskExecutor::CallbackArgs& cbData) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Only reset the callback handle if it matches, otherwise more will be coming through
if (cbData.myHandle == _handleLivenessTimeoutCbh) {
_handleLivenessTimeoutCbh = CallbackHandle();
@@ -864,7 +864,7 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If it is not a single node replica set, no need to start an election after stepdown timeout.
if (reason == TopologyCoordinator::StartElectionReason::kSingleNodePromptElection &&
_rsConfig.getNumMembers() != 1) {
diff --git a/src/mongo/db/repl/replication_metrics.cpp b/src/mongo/db/repl/replication_metrics.cpp
index 55508674562..14f01452775 100644
--- a/src/mongo/db/repl/replication_metrics.cpp
+++ b/src/mongo/db/repl/replication_metrics.cpp
@@ -58,7 +58,7 @@ ReplicationMetrics::~ReplicationMetrics() {}
void ReplicationMetrics::incrementNumElectionsCalledForReason(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case TopologyCoordinator::StartElectionReason::kStepUpRequest:
case TopologyCoordinator::StartElectionReason::kStepUpRequestSkipDryRun: {
@@ -91,7 +91,7 @@ void ReplicationMetrics::incrementNumElectionsCalledForReason(
void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case TopologyCoordinator::StartElectionReason::kStepUpRequest:
case TopologyCoordinator::StartElectionReason::kStepUpRequestSkipDryRun: {
@@ -123,20 +123,20 @@ void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(
}
void ReplicationMetrics::incrementNumStepDownsCausedByHigherTerm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumStepDownsCausedByHigherTerm(
_electionMetrics.getNumStepDownsCausedByHigherTerm() + 1);
}
void ReplicationMetrics::incrementNumCatchUps() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumCatchUps(_electionMetrics.getNumCatchUps() + 1);
_updateAverageCatchUpOps(lk);
}
void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
ReplicationCoordinator::PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case ReplicationCoordinator::PrimaryCatchUpConclusionReason::kSucceeded:
_electionMetrics.setNumCatchUpsSucceeded(_electionMetrics.getNumCatchUpsSucceeded() +
@@ -169,140 +169,140 @@ void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
}
long ReplicationMetrics::getNumStepUpCmdsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getCalled();
}
long ReplicationMetrics::getNumPriorityTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getCalled();
}
long ReplicationMetrics::getNumCatchUpTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getCalled();
}
long ReplicationMetrics::getNumElectionTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getCalled();
}
long ReplicationMetrics::getNumFreezeTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getCalled();
}
long ReplicationMetrics::getNumStepUpCmdsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getSuccessful();
}
long ReplicationMetrics::getNumPriorityTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getSuccessful();
}
long ReplicationMetrics::getNumCatchUpTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getSuccessful();
}
long ReplicationMetrics::getNumElectionTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getSuccessful();
}
long ReplicationMetrics::getNumFreezeTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getSuccessful();
}
long ReplicationMetrics::getNumStepDownsCausedByHigherTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumStepDownsCausedByHigherTerm();
}
long ReplicationMetrics::getNumCatchUps_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUps();
}
long ReplicationMetrics::getNumCatchUpsSucceeded_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSucceeded();
}
long ReplicationMetrics::getNumCatchUpsAlreadyCaughtUp_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsAlreadyCaughtUp();
}
long ReplicationMetrics::getNumCatchUpsSkipped_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSkipped();
}
long ReplicationMetrics::getNumCatchUpsTimedOut_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsTimedOut();
}
long ReplicationMetrics::getNumCatchUpsFailedWithError_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithError();
}
long ReplicationMetrics::getNumCatchUpsFailedWithNewTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithNewTerm();
}
long ReplicationMetrics::getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd();
}
void ReplicationMetrics::setElectionCandidateMetrics(Date_t lastElectionDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setLastElectionDate(lastElectionDate);
_nodeIsCandidateOrPrimary = true;
}
void ReplicationMetrics::setTargetCatchupOpTime(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(opTime);
}
void ReplicationMetrics::setNumCatchUpOps(int numCatchUpOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setNumCatchUpOps(numCatchUpOps);
_totalNumCatchUpOps += numCatchUpOps;
_updateAverageCatchUpOps(lk);
}
void ReplicationMetrics::setNewTermStartDate(Date_t newTermStartDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setNewTermStartDate(newTermStartDate);
}
void ReplicationMetrics::setWMajorityWriteAvailabilityDate(Date_t wMajorityWriteAvailabilityDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setWMajorityWriteAvailabilityDate(wMajorityWriteAvailabilityDate);
}
boost::optional<OpTime> ReplicationMetrics::getTargetCatchupOpTime_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionCandidateMetrics.getTargetCatchupOpTime();
}
BSONObj ReplicationMetrics::getElectionMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.toBSON();
}
BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_nodeIsCandidateOrPrimary) {
return _electionCandidateMetrics.toBSON();
}
@@ -310,7 +310,7 @@ BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
}
void ReplicationMetrics::clearElectionCandidateMetrics() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(boost::none);
_electionCandidateMetrics.setNumCatchUpOps(boost::none);
_electionCandidateMetrics.setNewTermStartDate(boost::none);
diff --git a/src/mongo/db/repl/replication_metrics.h b/src/mongo/db/repl/replication_metrics.h
index 816b1fc39bc..a07f84e48cf 100644
--- a/src/mongo/db/repl/replication_metrics.h
+++ b/src/mongo/db/repl/replication_metrics.h
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_metrics_gen.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -98,7 +98,7 @@ private:
void _updateAverageCatchUpOps(WithLock lk);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationMetrics::_mutex");
ElectionMetrics _electionMetrics;
ElectionCandidateMetrics _electionCandidateMetrics;
ElectionParticipantMetrics _electionParticipantMetrics;
diff --git a/src/mongo/db/repl/replication_process.cpp b/src/mongo/db/repl/replication_process.cpp
index d3e77314cd3..117972289af 100644
--- a/src/mongo/db/repl/replication_process.cpp
+++ b/src/mongo/db/repl/replication_process.cpp
@@ -84,7 +84,7 @@ ReplicationProcess::ReplicationProcess(
_rbid(kUninitializedRollbackId) {}
Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rbidResult = _storageInterface->getRollbackID(opCtx);
if (!rbidResult.isOK()) {
@@ -102,7 +102,7 @@ Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
}
int ReplicationProcess::getRollbackID() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (kUninitializedRollbackId == _rbid) {
// This may happen when serverStatus is called by an internal client before we have a chance
// to read the rollback ID from storage.
@@ -112,7 +112,7 @@ int ReplicationProcess::getRollbackID() const {
}
Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(kUninitializedRollbackId == _rbid);
@@ -132,7 +132,7 @@ Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
}
Status ReplicationProcess::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _storageInterface->incrementRollbackID(opCtx);
diff --git a/src/mongo/db/repl/replication_process.h b/src/mongo/db/repl/replication_process.h
index 849ac7df8c4..82c298d363d 100644
--- a/src/mongo/db/repl/replication_process.h
+++ b/src/mongo/db/repl/replication_process.h
@@ -38,7 +38,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/replication_recovery.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -103,7 +103,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationProcess::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index ca678ebce17..3e97d2a56c4 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -64,47 +64,47 @@ const NamespaceString testNs("a.a");
class StorageInterfaceRecovery : public StorageInterfaceImpl {
public:
boost::optional<Timestamp> getRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _recoveryTimestamp;
}
void setRecoveryTimestamp(Timestamp recoveryTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoveryTimestamp = recoveryTimestamp;
}
bool supportsRecoverToStableTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoverToStableTimestamp;
}
void setSupportsRecoverToStableTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoverToStableTimestamp = supports;
}
bool supportsRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoveryTimestamp;
}
void setSupportsRecoveryTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoveryTimestamp = supports;
}
void setPointInTimeReadTimestamp(Timestamp pointInTimeReadTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_pointInTimeReadTimestamp = pointInTimeReadTimestamp;
}
Timestamp getPointInTimeReadTimestamp(OperationContext* opCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _pointInTimeReadTimestamp;
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRecovery::_mutex");
Timestamp _initialDataTimestamp = Timestamp::min();
boost::optional<Timestamp> _recoveryTimestamp = boost::none;
Timestamp _pointInTimeReadTimestamp = {};
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 5e7c852d211..d659ee83965 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -118,17 +118,17 @@ std::string Reporter::toString() const {
}
HostAndPort Reporter::getTarget() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _target;
}
Milliseconds Reporter::getKeepAliveInterval() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveInterval;
}
void Reporter::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_status = Status(ErrorCodes::CallbackCanceled, "Reporter no longer valid");
@@ -152,13 +152,13 @@ void Reporter::shutdown() {
}
Status Reporter::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
return _status;
}
Status Reporter::trigger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If these was a previous error then the reporter is dead and return that error.
if (!_status.isOK()) {
@@ -196,7 +196,7 @@ Status Reporter::trigger() {
StatusWith<BSONObj> Reporter::_prepareCommand() {
auto prepareResult = _prepareReplSetUpdatePositionCommandFn();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Reporter could have been canceled while preparing the command.
if (!_status.isOK()) {
@@ -239,7 +239,7 @@ void Reporter::_sendCommand_inlock(BSONObj commandRequest, Milliseconds netTimeo
void Reporter::_processResponseCallback(
const executor::TaskExecutor::RemoteCommandCallbackArgs& rcbd) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the reporter was shut down before this callback is invoked,
// return the canceled "_status".
@@ -299,7 +299,7 @@ void Reporter::_processResponseCallback(
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -318,7 +318,7 @@ void Reporter::_processResponseCallback(
void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::CallbackArgs& args,
bool fromTrigger) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -341,7 +341,7 @@ void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::Call
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -367,7 +367,7 @@ void Reporter::_onShutdown_inlock() {
}
bool Reporter::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -376,12 +376,12 @@ bool Reporter::_isActive_inlock() const {
}
bool Reporter::isWaitingToSendReport() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isWaitingToSendReporter;
}
Date_t Reporter::getKeepAliveTimeoutWhen_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveTimeoutWhen;
}
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index f6cc0ea8cea..caa67aaa528 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -36,8 +36,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -188,7 +188,7 @@ private:
const Milliseconds _updatePositionTimeout;
// Protects member data of this Reporter declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Reporter::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index cb5e57f6ae9..9089163aae5 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -33,14 +33,13 @@
#include "mongo/db/repl/rollback_checker.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
namespace repl {
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
RollbackChecker::RollbackChecker(executor::TaskExecutor* executor, HostAndPort syncSource)
: _executor(executor), _syncSource(syncSource), _baseRBID(-1), _lastRBID(-1) {
@@ -121,12 +120,12 @@ Status RollbackChecker::reset_sync() {
}
int RollbackChecker::getBaseRBID() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _baseRBID;
}
int RollbackChecker::getLastRBID_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastRBID;
}
diff --git a/src/mongo/db/repl/rollback_checker.h b/src/mongo/db/repl/rollback_checker.h
index 768dd47bf63..75a948af1cb 100644
--- a/src/mongo/db/repl/rollback_checker.h
+++ b/src/mongo/db/repl/rollback_checker.h
@@ -31,12 +31,11 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
-class Mutex;
-
/**
* The RollbackChecker maintains a sync source and its baseline rollback ID (rbid). It
* contains methods to check if a rollback occurred by checking if the rbid has changed since
@@ -119,7 +118,7 @@ private:
executor::TaskExecutor* const _executor;
// Protects member data of this RollbackChecker.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackChecker::_mutex");
// The sync source to check for rollbacks against.
HostAndPort _syncSource;
diff --git a/src/mongo/db/repl/rollback_checker_test.cpp b/src/mongo/db/repl/rollback_checker_test.cpp
index 46d366645cb..3c4bbdd0941 100644
--- a/src/mongo/db/repl/rollback_checker_test.cpp
+++ b/src/mongo/db/repl/rollback_checker_test.cpp
@@ -46,7 +46,7 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
class RollbackCheckerTest : public executor::ThreadPoolExecutorTest {
public:
@@ -58,7 +58,7 @@ protected:
std::unique_ptr<RollbackChecker> _rollbackChecker;
RollbackChecker::Result _hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
bool _hasCalledCallback;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackCheckerTest::_mutex");
};
void RollbackCheckerTest::setUp() {
@@ -66,7 +66,7 @@ void RollbackCheckerTest::setUp() {
launchExecutorThread();
getNet()->enterNetwork();
_rollbackChecker = std::make_unique<RollbackChecker>(&getExecutor(), HostAndPort());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
_hasCalledCallback = false;
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index ecb73b66573..4c670a82a2b 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -258,12 +258,12 @@ Status RollbackImpl::runRollback(OperationContext* opCtx) {
}
void RollbackImpl::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool RollbackImpl::_isInShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 5b32d6abb32..69dbb520161 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -449,7 +449,7 @@ private:
void _resetDropPendingState(OperationContext* opCtx);
// Guards access to member variables.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackImpl::_mutex"); // (S)
// Set to true when RollbackImpl should shut down.
bool _inShutdown = false; // (M)
diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h
index f4c4ce5a13a..10f4b51d566 100644
--- a/src/mongo/db/repl/rollback_test_fixture.h
+++ b/src/mongo/db/repl/rollback_test_fixture.h
@@ -119,7 +119,7 @@ protected:
class RollbackTest::StorageInterfaceRollback : public StorageInterfaceImpl {
public:
void setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
@@ -129,7 +129,7 @@ public:
* of '_currTimestamp'.
*/
StatusWith<Timestamp> recoverToStableTimestamp(OperationContext* opCtx) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_recoverToTimestampStatus) {
return _recoverToTimestampStatus.get();
} else {
@@ -152,17 +152,17 @@ public:
}
void setRecoverToTimestampStatus(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoverToTimestampStatus = status;
}
void setCurrentTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_currTimestamp = ts;
}
Timestamp getCurrentTimestamp() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _currTimestamp;
}
@@ -172,7 +172,7 @@ public:
Status setCollectionCount(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID,
long long newCount) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_setCollectionCountStatus && _setCollectionCountStatusUUID &&
nsOrUUID.uuid() == _setCollectionCountStatusUUID) {
return *_setCollectionCountStatus;
@@ -182,18 +182,18 @@ public:
}
void setSetCollectionCountStatus(UUID uuid, Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_setCollectionCountStatus = status;
_setCollectionCountStatusUUID = uuid;
}
long long getFinalCollectionCount(const UUID& uuid) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _newCounts[uuid];
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRollback::_mutex");
Timestamp _stableTimestamp;
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index 20d392acf44..18e3bc761b9 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -46,7 +46,7 @@ namespace mongo {
namespace repl {
using executor::RemoteCommandRequest;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using CallbackHandle = executor::TaskExecutor::CallbackHandle;
using EventHandle = executor::TaskExecutor::EventHandle;
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
diff --git a/src/mongo/db/repl/scatter_gather_runner.h b/src/mongo/db/repl/scatter_gather_runner.h
index d38bdc4862b..90f20bc20b5 100644
--- a/src/mongo/db/repl/scatter_gather_runner.h
+++ b/src/mongo/db/repl/scatter_gather_runner.h
@@ -33,7 +33,7 @@
#include <vector>
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -134,7 +134,7 @@ private:
executor::TaskExecutor::EventHandle _sufficientResponsesReceived;
std::vector<executor::TaskExecutor::CallbackHandle> _callbacks;
bool _started = false;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RunnerImpl::_mutex");
};
executor::TaskExecutor* _executor; // Not owned here.
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 88f4e2f36c7..dde8249541d 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -89,7 +89,7 @@ const char StorageInterfaceImpl::kRollbackIdFieldName[] = "rollbackId";
const char StorageInterfaceImpl::kRollbackIdDocumentId[] = "rollbackId";
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
const auto kIdIndexName = "_id_"_sd;
diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp
index 77936b4453d..e9fa17504be 100644
--- a/src/mongo/db/repl/storage_interface_mock.cpp
+++ b/src/mongo/db/repl/storage_interface_mock.cpp
@@ -41,7 +41,7 @@ namespace mongo {
namespace repl {
StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -49,7 +49,7 @@ StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
}
StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rbidInitialized) {
return Status(ErrorCodes::NamespaceExists, "Rollback ID already initialized");
}
@@ -61,7 +61,7 @@ StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opC
}
StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -70,23 +70,23 @@ StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCt
}
void StorageInterfaceMock::setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
void StorageInterfaceMock::setInitialDataTimestamp(ServiceContext* serviceCtx,
Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_initialDataTimestamp = snapshotName;
}
Timestamp StorageInterfaceMock::getStableTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _stableTimestamp;
}
Timestamp StorageInterfaceMock::getInitialDataTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _initialDataTimestamp;
}
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index ec32c6dc059..19bd3c69186 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -43,7 +43,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -408,7 +408,7 @@ public:
Timestamp oldestOpenReadTimestamp = Timestamp::min();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceMock::_mutex");
int _rbid;
bool _rbidInitialized = false;
Timestamp _stableTimestamp = Timestamp::min();
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 012bad86797..03b5af98376 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -98,7 +98,7 @@ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePosition
void SyncSourceFeedback::forwardSlaveProgress() {
{
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
_positionChanged = true;
_cond.notify_all();
if (_reporter) {
@@ -133,7 +133,7 @@ Status SyncSourceFeedback::_updateUpstream(Reporter* reporter) {
}
void SyncSourceFeedback::shutdown() {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
if (_reporter) {
_reporter->shutdown();
}
@@ -161,7 +161,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
// Take SyncSourceFeedback lock before calling into ReplicationCoordinator
// to avoid deadlock because ReplicationCoordinator could conceivably calling back into
// this class.
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
{
MONGO_IDLE_THREAD_BLOCK;
@@ -184,7 +184,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
}
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
MemberState state = replCoord->getMemberState();
if (state.primary() || state.startup()) {
continue;
@@ -220,14 +220,14 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
keepAliveInterval,
syncSourceFeedbackNetworkTimeoutSecs);
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
if (_shutdownSignaled) {
break;
}
_reporter = &reporter;
}
ON_BLOCK_EXIT([this]() {
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
_reporter = nullptr;
});
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index a75cb23ad64..3688de9a0ed 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -32,8 +32,8 @@
#include "mongo/base/status.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
struct HostAndPort;
@@ -79,7 +79,7 @@ private:
Status _updateUpstream(Reporter* reporter);
// protects cond, _shutdownSignaled, _keepAliveInterval, and _positionChanged.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("SyncSourceFeedback::_mtx");
// used to alert our thread of changes which need to be passed up the chain
stdx::condition_variable _cond;
// used to indicate a position change which has not yet been pushed along
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 45364e05bf7..0b371f9359d 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -85,7 +85,7 @@ SyncSourceResolver::~SyncSourceResolver() {
}
bool SyncSourceResolver::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -95,7 +95,7 @@ bool SyncSourceResolver::_isActive_inlock() const {
Status SyncSourceResolver::startup() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -113,7 +113,7 @@ Status SyncSourceResolver::startup() {
}
void SyncSourceResolver::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Transition directly from PreStart to Complete if not started yet.
if (State::kPreStart == _state) {
_state = State::kComplete;
@@ -137,12 +137,12 @@ void SyncSourceResolver::shutdown() {
}
void SyncSourceResolver::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
bool SyncSourceResolver::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -206,7 +206,7 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
}
Status SyncSourceResolver::_scheduleFetcher(std::unique_ptr<Fetcher> fetcher) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// TODO SERVER-27499 need to check if _state is kShuttingDown inside the mutex.
// Must schedule fetcher inside lock in case fetcher's callback gets invoked immediately by task
// executor.
@@ -341,7 +341,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea
// Once a work is scheduled, nothing prevents it finishing. We need the mutex to protect the
// access of member variables after scheduling, because otherwise the scheduled callback could
// finish and allow the destructor to fire before we access the member variables.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::kShuttingDown) {
return Status(
ErrorCodes::CallbackCanceled,
@@ -530,7 +530,7 @@ Status SyncSourceResolver::_finishCallback(const SyncSourceResolverResponse& res
<< exceptionToStatus();
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/sync_source_resolver.h b/src/mongo/db/repl/sync_source_resolver.h
index abe6396e650..2b2734d2c70 100644
--- a/src/mongo/db/repl/sync_source_resolver.h
+++ b/src/mongo/db/repl/sync_source_resolver.h
@@ -38,8 +38,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -234,7 +234,7 @@ private:
const OnCompletionFn _onCompletion;
// Protects members of this sync source resolver defined below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncSourceResolverResponse::_mutex");
mutable stdx::condition_variable _condition;
// State transitions:
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 755ab860a07..c8de5ed6e80 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -151,7 +151,7 @@ private:
void _run();
// Protects _cond, _shutdownSignaled, and _latestOpTime.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ApplyBatchFinalizerForJournal::_mutex");
// Used to alert our thread of a new OpTime.
stdx::condition_variable _cond;
// The next OpTime to set as the ReplicationCoordinator's lastOpTime after flushing.
@@ -163,7 +163,7 @@ private:
};
ApplyBatchFinalizerForJournal::~ApplyBatchFinalizerForJournal() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_shutdownSignaled = true;
_cond.notify_all();
lock.unlock();
@@ -175,7 +175,7 @@ void ApplyBatchFinalizerForJournal::record(const OpTimeAndWallTime& newOpTimeAnd
ReplicationCoordinator::DataConsistency consistency) {
_recordApplied(newOpTimeAndWallTime, consistency);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_latestOpTimeAndWallTime = newOpTimeAndWallTime;
_cond.notify_all();
}
@@ -187,7 +187,7 @@ void ApplyBatchFinalizerForJournal::_run() {
OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_latestOpTimeAndWallTime.opTime.isNull() && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -491,7 +491,7 @@ public:
}
OpQueue getNextBatch(Seconds maxWaitTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// _ops can indicate the following cases:
// 1. A new batch is ready to consume.
// 2. Shutdown.
@@ -604,7 +604,7 @@ private:
}
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Block until the previous batch has been taken.
_cv.wait(lk, [&] { return _ops.empty() && !_ops.termWhenExhausted(); });
_ops = std::move(ops);
@@ -621,7 +621,7 @@ private:
OplogBuffer* const _oplogBuffer;
OplogApplier::GetNextApplierBatchFn const _getNextApplierBatchFn;
- stdx::mutex _mutex; // Guards _ops.
+ Mutex _mutex = MONGO_MAKE_LATCH("OpQueueBatcher::_mutex"); // Guards _ops.
stdx::condition_variable _cv;
OpQueue _ops;
@@ -756,12 +756,12 @@ void SyncTail::runLoop(OplogBuffer* oplogBuffer,
}
void SyncTail::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool SyncTail::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index 770663e9dab..364df797cad 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -43,7 +43,7 @@
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/session_update_tracker.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -239,7 +239,7 @@ private:
const OplogApplier::Options _options;
// Protects member data of SyncTail.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncTail::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 4c8f44ad8d8..c2a18035ea7 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -65,7 +65,7 @@
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/transaction_participant_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -435,7 +435,7 @@ protected:
_insertOp2->getOpTime());
_opObserver->onInsertsFn =
[&](OperationContext*, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
- stdx::lock_guard<stdx::mutex> lock(_insertMutex);
+ stdx::lock_guard<Latch> lock(_insertMutex);
if (nss.isOplog() || nss == _nss1 || nss == _nss2 ||
nss == NamespaceString::kSessionTransactionsTableNamespace) {
_insertedDocs[nss].insert(_insertedDocs[nss].end(), docs.begin(), docs.end());
@@ -482,7 +482,7 @@ protected:
std::unique_ptr<ThreadPool> _writerPool;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntrySyncTailTest::_insertMutex");
};
TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionSeparate) {
@@ -816,7 +816,7 @@ protected:
_abortSinglePrepareApplyOp;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntryPreparedTransactionTest::_insertMutex");
};
TEST_F(MultiOplogEntryPreparedTransactionTest, MultiApplyPreparedTransactionSteadyState) {
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 4c53b558aa1..86edc6da9c5 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -50,8 +50,8 @@ namespace mongo {
namespace repl {
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
/**
@@ -87,7 +87,7 @@ TaskRunner::~TaskRunner() {
}
std::string TaskRunner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
str::stream output;
output << "TaskRunner";
output << " scheduled tasks: " << _tasks.size();
@@ -97,14 +97,14 @@ std::string TaskRunner::getDiagnosticString() const {
}
bool TaskRunner::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _active;
}
void TaskRunner::schedule(Task task) {
invariant(task);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_tasks.push_back(std::move(task));
_condition.notify_all();
@@ -123,7 +123,7 @@ void TaskRunner::schedule(Task task) {
}
void TaskRunner::cancel() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cancelRequested = true;
_condition.notify_all();
}
@@ -159,7 +159,7 @@ void TaskRunner::_runTasks() {
// Release thread back to pool after disposing if no scheduled tasks in queue.
if (nextAction == NextAction::kDisposeOperationContext ||
nextAction == NextAction::kInvalid) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tasks.empty()) {
_finishRunTasks_inlock();
return;
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index 9b15ed3d629..c1db72bdba5 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -33,8 +33,8 @@
#include <list>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
@@ -151,7 +151,7 @@ private:
ThreadPool* _threadPool;
// Protects member data of this TaskRunner.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TaskRunner::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp
index 6953f4900ec..96ad44916aa 100644
--- a/src/mongo/db/repl/task_runner_test.cpp
+++ b/src/mongo/db/repl/task_runner_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/repl/task_runner_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -57,12 +57,12 @@ TEST_F(TaskRunnerTest, GetDiagnosticString) {
}
TEST_F(TaskRunnerTest, CallbackValues) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool called = false;
OperationContext* opCtx = nullptr;
Status status = getDetectableErrorStatus();
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
called = true;
opCtx = theTxn;
status = theStatus;
@@ -72,7 +72,7 @@ TEST_F(TaskRunnerTest, CallbackValues) {
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_TRUE(called);
ASSERT(opCtx);
ASSERT_OK(status);
@@ -84,11 +84,11 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
TaskRunner::NextAction nextAction,
unique_function<void(Task task)> schedule) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
std::vector<OperationContext*> txns;
OpIdVector txnIds;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (txns.size() >= 2U) {
return TaskRunner::NextAction::kInvalid;
}
@@ -111,7 +111,7 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
test.getThreadPool().waitForIdle();
ASSERT_FALSE(test.getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2U, txns.size());
ASSERT(txns[0]);
ASSERT(txns[1]);
@@ -148,14 +148,14 @@ TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
}
TEST_F(TaskRunnerTest, SkipSecondTask) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -174,14 +174,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -190,14 +190,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
}
TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -223,14 +223,14 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -239,7 +239,7 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
}
TEST_F(TaskRunnerTest, Cancel) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -247,7 +247,7 @@ TEST_F(TaskRunnerTest, Cancel) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -261,7 +261,7 @@ TEST_F(TaskRunnerTest, Cancel) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -276,13 +276,13 @@ TEST_F(TaskRunnerTest, Cancel) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
Status status1 = getDetectableErrorStatus();
Status status2 = getDetectableErrorStatus();
@@ -290,7 +290,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task1" requests the task runner to retain the operation context. This has
// effect of keeping the task runner active.
auto task1 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
barrier.countDownAndWait();
status1 = theStatus;
return TaskRunner::NextAction::kKeepOperationContext;
@@ -300,7 +300,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task2" requests the task runner to dispose the operation context. After the
// operation context is destroyed, the task runner will go into an inactive state.
auto task2 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status2 = theStatus;
return TaskRunner::NextAction::kDisposeOperationContext;
};
@@ -314,13 +314,13 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// This status should be OK because we ensured that the task
// was scheduled and invoked before we called cancel().
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status1);
ASSERT_OK(status2);
}
TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -328,7 +328,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -338,7 +338,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -350,7 +350,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 8060e7f9488..2a9eb7932f3 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -1529,7 +1529,6 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
Date_t appliedWallTime = Date_t() + Seconds(oplogProgress.getSecs());
OpTime oplogDurable(Timestamp(1, 1), 19);
Date_t durableWallTime = Date_t() + Seconds(oplogDurable.getSecs());
- ;
OpTime lastCommittedOpTime(Timestamp(5, 1), 20);
Date_t lastCommittedWallTime = Date_t() + Seconds(lastCommittedOpTime.getSecs());
OpTime readConcernMajorityOpTime(Timestamp(4, 1), 20);
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index 94d88ce78c4..fbd2639e2b9 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -38,7 +38,7 @@
#include "mongo/db/catalog/commit_quorum_options.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/future.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/uuid.h"
@@ -104,7 +104,7 @@ struct ReplIndexBuildState {
IndexBuildProtocol protocol;
// Protects the state below.
- mutable stdx::mutex mutex;
+ mutable Mutex mutex = MONGO_MAKE_LATCH("ReplIndexBuildState::mutex");
// Secondaries do not set this information, so it is only set on primaries or on
// transition to primary.
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index a3854cb9038..def2a02bac2 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -60,7 +60,7 @@ ActiveMigrationsRegistry& ActiveMigrationsRegistry::get(OperationContext* opCtx)
StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
const MoveChunkRequest& args) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -80,7 +80,7 @@ StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
const NamespaceString& nss, const ChunkRange& chunkRange, const ShardId& fromShardId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -95,7 +95,7 @@ StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
}
boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
return _activeMoveChunkState->args.getNss();
}
@@ -106,7 +106,7 @@ boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkN
BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContext* opCtx) {
boost::optional<NamespaceString> nss;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
nss = _activeMoveChunkState->args.getNss();
@@ -132,13 +132,13 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex
}
void ActiveMigrationsRegistry::_clearDonateChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMoveChunkState);
_activeMoveChunkState.reset();
}
void ActiveMigrationsRegistry::_clearReceiveChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeReceiveChunkState);
_activeReceiveChunkState.reset();
}
diff --git a/src/mongo/db/s/active_migrations_registry.h b/src/mongo/db/s/active_migrations_registry.h
index 2f5dc3b56ae..e885bc23b91 100644
--- a/src/mongo/db/s/active_migrations_registry.h
+++ b/src/mongo/db/s/active_migrations_registry.h
@@ -33,8 +33,8 @@
#include <memory>
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
void _clearReceiveChunk();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMigrationsRegistry::_mutex");
// If there is an active moveChunk operation, this field contains the original request
boost::optional<ActiveMoveChunkState> _activeMoveChunkState;
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index fa383581038..4f4a5cf945e 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -56,7 +56,7 @@ ActiveMovePrimariesRegistry& ActiveMovePrimariesRegistry::get(OperationContext*
StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
const ShardMovePrimary& requestArgs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
if (_activeMovePrimaryState->requestArgs == requestArgs) {
return {ScopedMovePrimary(nullptr, false, _activeMovePrimaryState->notification)};
@@ -71,7 +71,7 @@ StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
}
boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrimaryNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
return _activeMovePrimaryState->requestArgs.get_shardsvrMovePrimary();
}
@@ -80,7 +80,7 @@ boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrima
}
void ActiveMovePrimariesRegistry::_clearMovePrimary() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMovePrimaryState);
_activeMovePrimaryState.reset();
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 38b19a6c94f..94f55657cba 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -99,7 +99,7 @@ private:
void _clearMovePrimary();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMovePrimariesRegistry::_mutex");
// If there is an active movePrimary operation going on, this field contains the request that
// initiated it.
diff --git a/src/mongo/db/s/active_rename_collection_registry.cpp b/src/mongo/db/s/active_rename_collection_registry.cpp
index ae9b50b4dcf..6d4e1e533b9 100644
--- a/src/mongo/db/s/active_rename_collection_registry.cpp
+++ b/src/mongo/db/s/active_rename_collection_registry.cpp
@@ -97,14 +97,14 @@ StatusWith<ScopedRenameCollection> ActiveRenameCollectionRegistry::registerRenam
}
void ActiveRenameCollectionRegistry::_clearRenameCollection(std::string nss) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeRenameCollectionMap.find(nss);
invariant(iter != _activeRenameCollectionMap.end());
_activeRenameCollectionMap.erase(nss);
}
void ActiveRenameCollectionRegistry::_setEmptyOrError(std::string nss, Status status) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeRenameCollectionMap.find(nss);
invariant(iter != _activeRenameCollectionMap.end());
auto activeRenameCollectionState = iter->second;
diff --git a/src/mongo/db/s/active_rename_collection_registry.h b/src/mongo/db/s/active_rename_collection_registry.h
index 50028b21f81..765f7627169 100644
--- a/src/mongo/db/s/active_rename_collection_registry.h
+++ b/src/mongo/db/s/active_rename_collection_registry.h
@@ -101,7 +101,7 @@ private:
void _setEmptyOrError(std::string nss, Status status);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveRenameCollectionRegistry::_mutex");
// Map containing any collections currently being renamed
StringMap<std::shared_ptr<ActiveRenameCollectionState>> _activeRenameCollectionMap;
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index 6a01fdd90ee..d2bda7ece20 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -91,7 +91,7 @@ ActiveShardCollectionRegistry& ActiveShardCollectionRegistry::get(OperationConte
StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCollection(
const ShardsvrShardCollection& request) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::string nss = request.get_shardsvrShardCollection().get().ns();
auto iter = _activeShardCollectionMap.find(nss);
@@ -114,7 +114,7 @@ StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCo
}
void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
_activeShardCollectionMap.erase(nss);
@@ -122,7 +122,7 @@ void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
void ActiveShardCollectionRegistry::_setUUIDOrError(std::string nss,
StatusWith<boost::optional<UUID>> swUUID) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
auto activeShardCollectionState = iter->second;
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index da734aee1c9..91423d65d7c 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
#include <memory>
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/string_map.h"
@@ -108,7 +108,7 @@ private:
void _setUUIDOrError(std::string nss, StatusWith<boost::optional<UUID>> swUUID);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveShardCollectionRegistry::_mutex");
// Map containing any collections currently being sharded
StringMap<std::shared_ptr<ActiveShardCollectionState>> _activeShardCollectionMap;
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index a920dd64ba9..9a2fadb8327 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -164,7 +164,7 @@ Balancer::Balancer(ServiceContext* serviceContext)
Balancer::~Balancer() {
// The balancer thread must have been stopped
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
}
@@ -182,7 +182,7 @@ Balancer* Balancer::get(OperationContext* operationContext) {
}
void Balancer::initiateBalancer(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
_state = kRunning;
@@ -194,7 +194,7 @@ void Balancer::initiateBalancer(OperationContext* opCtx) {
}
void Balancer::interruptBalancer() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state != kRunning)
return;
@@ -218,7 +218,7 @@ void Balancer::interruptBalancer() {
void Balancer::waitForBalancerToStop() {
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state == kStopped)
return;
@@ -228,7 +228,7 @@ void Balancer::waitForBalancerToStop() {
_thread.join();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_state = kStopped;
_thread = {};
@@ -236,7 +236,7 @@ void Balancer::waitForBalancerToStop() {
}
void Balancer::joinCurrentRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> scopedLock(_mutex);
+ stdx::unique_lock<Latch> scopedLock(_mutex);
const auto numRoundsAtStart = _numBalancerRounds;
opCtx->waitForConditionOrInterrupt(_condVar, scopedLock, [&] {
return !_inBalancerRound || _numBalancerRounds != numRoundsAtStart;
@@ -289,7 +289,7 @@ void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
const auto mode = balancerConfig->getBalancerMode();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
builder->append("mode", BalancerSettingsType::kBalancerModes[mode]);
builder->append("inBalancerRound", _inBalancerRound);
builder->append("numBalancerRounds", _numBalancerRounds);
@@ -303,7 +303,7 @@ void Balancer::_mainThread() {
log() << "CSRS balancer is starting";
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_threadOperationContext = opCtx.get();
}
@@ -423,7 +423,7 @@ void Balancer::_mainThread() {
}
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopping);
invariant(_migrationManagerInterruptThread.joinable());
}
@@ -432,7 +432,7 @@ void Balancer::_mainThread() {
_migrationManager.drainActiveMigrations();
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_migrationManagerInterruptThread = {};
_threadOperationContext = nullptr;
}
@@ -441,19 +441,19 @@ void Balancer::_mainThread() {
}
bool Balancer::_stopRequested() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
return (_state != kRunning);
}
void Balancer::_beginRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_inBalancerRound = true;
_condVar.notify_all();
}
void Balancer::_endRound(OperationContext* opCtx, Milliseconds waitTimeout) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inBalancerRound = false;
_numBalancerRounds++;
_condVar.notify_all();
@@ -464,7 +464,7 @@ void Balancer::_endRound(OperationContext* opCtx, Milliseconds waitTimeout) {
}
void Balancer::_sleepFor(OperationContext* opCtx, Milliseconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait_for(lock, waitTimeout.toSystemDuration(), [&] { return _state != kRunning; });
}
@@ -672,7 +672,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
}
void Balancer::notifyPersistedBalancerSettingsChanged() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.notify_all();
}
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index d33d6c1ddc0..4e22590bf1d 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -32,8 +32,8 @@
#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h"
#include "mongo/db/s/balancer/balancer_random.h"
#include "mongo/db/s/balancer/migration_manager.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -208,7 +208,7 @@ private:
const BSONObj& minKey);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Balancer::_mutex");
// Indicates the current state of the balancer
State _state{kStopped};
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 0a988cf1b13..4af124368e4 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -210,7 +210,7 @@ Status MigrationManager::executeManualMigration(
void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStopped);
invariant(_migrationRecoveryMap.empty());
_state = State::kRecovering;
@@ -285,7 +285,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kStopping) {
_migrationRecoveryMap.clear();
return;
@@ -367,7 +367,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
scopedGuard.dismiss();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kRecovering) {
_state = State::kEnabled;
_condVar.notify_all();
@@ -383,7 +383,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
void MigrationManager::interruptAndDisableMigrations() {
auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kEnabled || _state == State::kRecovering);
_state = State::kStopping;
@@ -402,7 +402,7 @@ void MigrationManager::interruptAndDisableMigrations() {
}
void MigrationManager::drainActiveMigrations() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopped)
return;
@@ -421,7 +421,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
// Ensure we are not stopped in order to avoid doing the extra work
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
Status(ErrorCodes::BalancerInterrupted,
@@ -457,7 +457,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
secondaryThrottle,
waitForDelete);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
@@ -522,7 +522,7 @@ void MigrationManager::_schedule(WithLock lock,
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_complete(lock, opCtx.get(), itMigration, args.response);
});
@@ -573,12 +573,12 @@ void MigrationManager::_checkDrained(WithLock) {
}
void MigrationManager::_waitForRecovery() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait(lock, [this] { return _state != State::kRecovering; });
}
void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopping) {
// The balancer was interrupted. Let the next balancer recover the state.
return;
@@ -605,7 +605,7 @@ Status MigrationManager::_processRemoteCommandResponse(
const RemoteCommandResponse& remoteCommandResponse,
ScopedMigrationRequest* scopedMigrationRequest) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status commandStatus(ErrorCodes::InternalError, "Uninitialized value.");
// Check for local errors sending the remote command caused by stepdown.
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index 4f6c1288571..0e517b7e067 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -38,10 +38,10 @@
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -260,7 +260,7 @@ private:
stdx::unordered_map<NamespaceString, std::list<MigrationType>> _migrationRecoveryMap;
// Protects the class state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationManager::_mutex");
// Always start the migration manager in a stopped state.
State _state{State::kStopped};
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 049ab0ae261..c7dd1e22250 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -234,12 +234,12 @@ ChunkSplitter& ChunkSplitter::get(ServiceContext* serviceContext) {
}
void ChunkSplitter::onShardingInitialization(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_isPrimary = isPrimary;
}
void ChunkSplitter::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_isPrimary) {
return;
}
@@ -249,7 +249,7 @@ void ChunkSplitter::onStepUp() {
}
void ChunkSplitter::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_isPrimary) {
return;
}
diff --git a/src/mongo/db/s/chunk_splitter.h b/src/mongo/db/s/chunk_splitter.h
index ef774dc017c..a05683fc6e7 100644
--- a/src/mongo/db/s/chunk_splitter.h
+++ b/src/mongo/db/s/chunk_splitter.h
@@ -107,7 +107,7 @@ private:
long dataWritten);
// Protects the state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ChunkSplitter::_mutex");
// The ChunkSplitter is only active on a primary node.
bool _isPrimary{false};
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 27b0a47a7ef..9fba4d4c1b2 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -134,7 +134,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
bool writeOpLog = false;
{
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (self->isEmpty()) {
LOG(1) << "No further range deletions scheduled on " << nss.ns();
return boost::none;
@@ -181,7 +181,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
<< "ns" << nss.ns() << "epoch" << epoch << "min"
<< range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
scopedLock,
e.toStatus("cannot push startRangeDeletion record to Op Log,"
@@ -254,7 +254,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean;
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (!replicationStatus.isOK()) {
LOG(0) << "Error when waiting for write concern after removing " << nss << " range "
@@ -304,7 +304,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
if (!scopedCollectionMetadata) {
LOG(0) << "Abandoning any range deletions because the metadata for " << nss.ns()
<< " was reset";
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
@@ -319,7 +319,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
<< nss.ns();
}
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 7e38463b55e..687c9a877b1 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -58,7 +58,7 @@ public:
: _factory(std::move(factory)) {}
CollectionShardingState& getOrCreate(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _collections.find(nss.ns());
if (it == _collections.end()) {
@@ -74,7 +74,7 @@ public:
BSONObjBuilder versionB(builder->subobjStart("versions"));
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (auto& coll : _collections) {
const auto optMetadata = coll.second->getCurrentMetadataIfKnown();
@@ -93,7 +93,7 @@ private:
std::unique_ptr<CollectionShardingStateFactory> _factory;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateMap::_mutex");
CollectionsMap _collections;
};
diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
index 49a4f118ce5..b0c800c92cb 100644
--- a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
+++ b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
@@ -58,7 +58,7 @@ public:
private:
executor::TaskExecutor* _getExecutor() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_taskExecutor) {
const std::string kExecName("CollectionRangeDeleter-TaskExecutor");
@@ -75,7 +75,7 @@ private:
}
// Serializes the instantiation of the task executor
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateFactoryShard::_mutex");
std::unique_ptr<executor::TaskExecutor> _taskExecutor{nullptr};
};
diff --git a/src/mongo/db/s/config/namespace_serializer.cpp b/src/mongo/db/s/config/namespace_serializer.cpp
index c132fe177b2..6c69eaa668d 100644
--- a/src/mongo/db/s/config/namespace_serializer.cpp
+++ b/src/mongo/db/s/config/namespace_serializer.cpp
@@ -49,7 +49,7 @@ NamespaceSerializer::ScopedLock::ScopedLock(StringData ns, NamespaceSerializer&
: _ns(ns.toString()), _nsSerializer(nsSerializer) {}
NamespaceSerializer::ScopedLock::~ScopedLock() {
- stdx::unique_lock<stdx::mutex> lock(_nsSerializer._mutex);
+ stdx::unique_lock<Latch> lock(_nsSerializer._mutex);
auto iter = _nsSerializer._inProgressMap.find(_ns);
iter->second->numWaiting--;
@@ -62,7 +62,7 @@ NamespaceSerializer::ScopedLock::~ScopedLock() {
}
NamespaceSerializer::ScopedLock NamespaceSerializer::lock(OperationContext* opCtx, StringData nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto iter = _inProgressMap.find(nss);
if (iter == _inProgressMap.end()) {
diff --git a/src/mongo/db/s/config/namespace_serializer.h b/src/mongo/db/s/config/namespace_serializer.h
index 912171dcdbc..f0e6c4b158c 100644
--- a/src/mongo/db/s/config/namespace_serializer.h
+++ b/src/mongo/db/s/config/namespace_serializer.h
@@ -36,8 +36,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -72,7 +72,7 @@ private:
bool isInProgress = true;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
StringMap<std::shared_ptr<NSLock>> _inProgressMap;
};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 424db73a9d0..557529099ff 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -100,7 +100,7 @@ ShardingCatalogManager::~ShardingCatalogManager() {
}
void ShardingCatalogManager::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -114,7 +114,7 @@ void ShardingCatalogManager::startup() {
void ShardingCatalogManager::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
@@ -126,7 +126,7 @@ void ShardingCatalogManager::shutDown() {
Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configInitialized) {
return {ErrorCodes::AlreadyInitialized,
"Config database was previously loaded into memory"};
@@ -146,14 +146,14 @@ Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = true;
return Status::OK();
}
void ShardingCatalogManager::discardCachedConfigDatabaseInitializationState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = false;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 821c2c037fa..0966ecba966 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -34,6 +34,7 @@
#include "mongo/db/repl/optime_with.h"
#include "mongo/db/s/config/namespace_serializer.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
@@ -41,7 +42,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/request_types/rename_collection_gen.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -533,7 +533,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogManager::_mutex");
// True if shutDown() has been called. False, otherwise.
bool _inShutdown{false}; // (M)
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index f3557c5791e..643c36d9dcd 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -53,7 +53,7 @@ public:
DatabaseShardingStateMap() {}
DatabaseShardingState& getOrCreate(const StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _databases.find(dbName);
if (it == _databases.end()) {
@@ -69,7 +69,7 @@ public:
private:
using DatabasesMap = StringMap<std::shared_ptr<DatabaseShardingState>>;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DatabaseShardingStateMap::_mutex");
DatabasesMap _databases;
};
diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp
index 7ea8c1e1345..b0ccfc17e37 100644
--- a/src/mongo/db/s/implicit_create_collection.cpp
+++ b/src/mongo/db/s/implicit_create_collection.cpp
@@ -46,8 +46,8 @@
#include "mongo/s/grid.h"
#include "mongo/s/request_types/create_collection_gen.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -73,7 +73,7 @@ public:
invariant(!opCtx->lockState()->isLocked());
{
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
while (_isInProgress) {
auto status = opCtx->waitForConditionOrInterruptNoAssert(_cvIsInProgress, lg);
if (!status.isOK()) {
@@ -85,7 +85,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_isInProgress = false;
_cvIsInProgress.notify_one();
});
@@ -128,7 +128,7 @@ public:
private:
const NamespaceString _ns;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializer::_mutex");
stdx::condition_variable _cvIsInProgress;
bool _isInProgress = false;
};
@@ -136,7 +136,7 @@ private:
class CreateCollectionSerializerMap {
public:
std::shared_ptr<CreateCollectionSerializer> getForNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto iter = _inProgressMap.find(ns.ns());
if (iter == _inProgressMap.end()) {
std::tie(iter, std::ignore) =
@@ -147,12 +147,12 @@ public:
}
void cleanupNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_inProgressMap.erase(ns.ns());
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializerMap::_mutex");
std::map<std::string, std::shared_ptr<CreateCollectionSerializer>> _inProgressMap;
};
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 5d832418367..52f606b4031 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -173,7 +173,7 @@ public:
}
~RangePreserver() {
- stdx::lock_guard<stdx::mutex> managerLock(_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> managerLock(_metadataManager->_managerLock);
invariant(_metadataTracker->usageCounter != 0);
if (--_metadataTracker->usageCounter == 0) {
@@ -232,7 +232,7 @@ void MetadataManager::_clearAllCleanups(WithLock, Status status) {
boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty()) {
return boost::none;
@@ -269,7 +269,7 @@ boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
}
size_t MetadataManager::numberOfMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty())
return 0;
@@ -277,7 +277,7 @@ size_t MetadataManager::numberOfMetadataSnapshots() const {
}
int MetadataManager::numberOfEmptyMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
int emptyMetadataSnapshots = 0;
for (const auto& collMetadataTracker : _metadata) {
@@ -289,7 +289,7 @@ int MetadataManager::numberOfEmptyMetadataSnapshots() const {
}
void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
// Collection is becoming sharded
if (_metadata.empty()) {
@@ -352,7 +352,7 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
}
void MetadataManager::clearFilteringMetadata() {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_receivingChunks.clear();
_clearAllCleanups(lg);
_metadata.clear();
@@ -394,7 +394,7 @@ void MetadataManager::_retireExpiredMetadata(WithLock lock) {
}
void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end(); ++it) {
BSONArrayBuilder pendingBB(bb.subarrayStart());
@@ -405,7 +405,7 @@ void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
}
void MetadataManager::append(BSONObjBuilder* builder) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_rangesToClean.append(builder);
@@ -450,7 +450,7 @@ void MetadataManager::_pushListToClean(WithLock, std::list<Deletion> ranges) {
}
auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
if (_overlapsInUseChunk(lg, range)) {
@@ -467,7 +467,7 @@ auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotificati
}
void MetadataManager::forgetReceive(ChunkRange const& range) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
// This is potentially a partially received chunk, which needs to be cleaned up. We know none
@@ -486,7 +486,7 @@ void MetadataManager::forgetReceive(ChunkRange const& range) {
auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
-> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
auto* const activeMetadata = _metadata.back().get();
@@ -523,7 +523,7 @@ auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
}
size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
size_t count = 0;
for (auto& tracker : _metadata) {
count += tracker->orphans.size();
@@ -532,13 +532,13 @@ size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
}
size_t MetadataManager::numberOfRangesToClean() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
return _rangesToClean.size();
}
auto MetadataManager::trackOrphanedDataCleanup(ChunkRange const& range) const
-> boost::optional<CleanupNotification> {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
auto overlaps = _overlapsInUseCleanups(lg, range);
if (overlaps) {
return overlaps;
@@ -591,7 +591,7 @@ auto MetadataManager::_overlapsInUseCleanups(WithLock, ChunkRange const& range)
}
boost::optional<ChunkRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
return _metadata.back()->metadata->getNextOrphanRange(_receivingChunks, from);
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 0eb43d529c2..90a0a7e233e 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -240,7 +240,7 @@ private:
executor::TaskExecutor* const _executor;
// Mutex to protect the state below
- mutable stdx::mutex _managerLock;
+ mutable Mutex _managerLock = MONGO_MAKE_LATCH("MetadataManager::_managerLock");
// Contains a list of collection metadata for the same collection epoch, ordered in
// chronological order based on the refreshes that occurred. The entry at _metadata.back() is
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 1e367cf7aea..e5d3480fa3d 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -42,10 +42,10 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index b2d8544b21a..7b891cf8e18 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -292,7 +292,7 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
// between cancellations for different migration sessions. It is thus possible that a second
// migration from different donor, but the same recipient would certainly abort an already
// running migration.
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = kCloning;
return Status::OK();
@@ -321,7 +321,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
}
iteration++;
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
const std::size_t cloneLocsRemaining = _cloneLocs.size();
@@ -551,14 +551,14 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
const repl::OpTime& prePostImageOpTime) {
switch (op) {
case 'd': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_deleted.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
case 'i':
case 'u': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_reload.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
@@ -574,7 +574,7 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
}
bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrackRequests() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_acceptingNewOperationTrackRequests) {
return false;
}
@@ -584,7 +584,7 @@ bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrac
}
void MigrationChunkClonerSourceLegacy::_drainAllOutstandingOperationTrackRequests(
- stdx::unique_lock<stdx::mutex>& lk) {
+ stdx::unique_lock<Latch>& lk) {
invariant(_state == kDone);
_acceptingNewOperationTrackRequests = false;
_allOutstandingOperationTrackRequestsDrained.wait(
@@ -598,7 +598,7 @@ void MigrationChunkClonerSourceLegacy::_incrementOutstandingOperationTrackReques
}
void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackRequests() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
--_outstandingOperationTrackRequests;
if (_outstandingOperationTrackRequests == 0) {
_allOutstandingOperationTrackRequestsDrained.notify_all();
@@ -606,7 +606,7 @@ void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackReques
}
uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return std::min(static_cast<uint64_t>(BSONObjMaxUserSize),
_averageObjectSizeForCloneLocs * _cloneLocs.size());
@@ -621,7 +621,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto iter = _cloneLocs.begin();
for (; iter != _cloneLocs.end(); ++iter) {
@@ -666,7 +666,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
{
// All clone data must have been drained before starting to fetch the incremental changes.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_cloneLocs.empty());
// The "snapshot" for delete and update list must be taken under a single lock. This is to
@@ -685,7 +685,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
builder->append("size", totalDocSize);
// Put back remaining ids we didn't consume
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_deleted.splice(_deleted.cbegin(), deleteList);
_reload.splice(_reload.cbegin(), updateList);
@@ -693,7 +693,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
}
void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_state = kDone;
_drainAllOutstandingOperationTrackRequests(lk);
@@ -800,7 +800,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
if (!isLargeChunk) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cloneLocs.insert(recordId);
}
@@ -829,7 +829,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
<< _args.getMaxKey()};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12;
return Status::OK();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index e77998d907d..34b41503bec 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -40,10 +40,10 @@
#include "mongo/db/s/migration_chunk_cloner_source.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_source.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -285,7 +285,7 @@ private:
* function. Should only be used in the cleanup for this class. Should use a lock wrapped
* around this class's mutex.
*/
- void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<stdx::mutex>& lk);
+ void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<Latch>& lk);
/**
* Appends to the builder the list of _id of documents that were deleted during migration.
@@ -325,7 +325,7 @@ private:
std::unique_ptr<SessionCatalogMigrationSource> _sessionCatalogSource;
// Protects the entries below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationChunkClonerSourceLegacy::_mutex");
// The current state of the cloner
State _state{kNew};
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 8cbb3b2875c..307512ae2d8 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -225,12 +225,12 @@ MigrationDestinationManager* MigrationDestinationManager::get(OperationContext*
}
MigrationDestinationManager::State MigrationDestinationManager::getState() const {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return _state;
}
void MigrationDestinationManager::setState(State newState) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = newState;
_stateChangedCV.notify_all();
}
@@ -238,7 +238,7 @@ void MigrationDestinationManager::setState(State newState) {
void MigrationDestinationManager::_setStateFail(StringData msg) {
log() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -250,7 +250,7 @@ void MigrationDestinationManager::_setStateFail(StringData msg) {
void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
warning() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -260,7 +260,7 @@ void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
}
bool MigrationDestinationManager::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive(lk);
}
@@ -272,7 +272,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
OperationContext* opCtx,
bool waitForSteadyOrDone) {
if (waitForSteadyOrDone) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
try {
opCtx->waitForConditionOrInterruptFor(_stateChangedCV, lock, Seconds(1), [&]() -> bool {
return _state != READY && _state != CLONE && _state != CATCHUP;
@@ -283,7 +283,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
b.append("waited", true);
}
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
b.appendBool("active", _sessionId.is_initialized());
@@ -314,7 +314,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
BSONObj MigrationDestinationManager::getMigrationStatusReport() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_isActive(lk)) {
return migrationutil::makeMigrationStatusDocument(
_nss, _fromShard, _toShard, false, _min, _max);
@@ -329,7 +329,7 @@ Status MigrationDestinationManager::start(OperationContext* opCtx,
const StartChunkCloneRequest cloneRequest,
const OID& epoch,
const WriteConcernOptions& writeConcern) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_sessionId);
invariant(!_scopedReceiveChunk);
@@ -437,7 +437,7 @@ repl::OpTime MigrationDestinationManager::cloneDocumentsFromDonor(
}
Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
if (!_sessionId) {
return Status::OK();
@@ -458,7 +458,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
}
void MigrationDestinationManager::abortWithoutSessionIdCheck() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = ABORT;
_stateChangedCV.notify_all();
_errmsg = "aborted without session id check";
@@ -466,7 +466,7 @@ void MigrationDestinationManager::abortWithoutSessionIdCheck() {
Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessionId) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
@@ -734,7 +734,7 @@ void MigrationDestinationManager::_migrateThread() {
_forgetPending(opCtx.get(), ChunkRange(_min, _max));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessionId.reset();
_scopedReceiveChunk.reset();
_isActiveCV.notify_all();
@@ -846,7 +846,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
{
- stdx::lock_guard<stdx::mutex> statsLock(_mutex);
+ stdx::lock_guard<Latch> statsLock(_mutex);
_numCloned += batchNumCloned;
ShardingStatistics::get(opCtx).countDocsClonedOnRecipient.addAndFetch(
batchNumCloned);
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index 18c008900cc..607eec9a68a 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -41,9 +41,9 @@
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_destination.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/timer.h"
@@ -178,7 +178,7 @@ private:
bool _isActive(WithLock) const;
// Mutex to guard all fields
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MigrationDestinationManager::_mutex");
// Migration session ID uniquely identifies the migration and indicates whether the prepare
// method has been called.
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.cpp b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
index 6a288834ce7..ecf63039105 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.cpp
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
@@ -36,7 +36,7 @@ namespace mongo {
NamespaceMetadataChangeNotifications::NamespaceMetadataChangeNotifications() = default;
NamespaceMetadataChangeNotifications::~NamespaceMetadataChangeNotifications() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_notificationsList.empty());
}
@@ -44,7 +44,7 @@ NamespaceMetadataChangeNotifications::ScopedNotification
NamespaceMetadataChangeNotifications::createNotification(const NamespaceString& nss) {
auto notifToken = std::make_shared<NotificationToken>(nss);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto& notifList = _notificationsList[nss];
notifToken->itToErase = notifList.insert(notifList.end(), notifToken);
@@ -53,7 +53,7 @@ NamespaceMetadataChangeNotifications::createNotification(const NamespaceString&
}
void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto mapIt = _notificationsList.find(nss);
if (mapIt == _notificationsList.end()) {
@@ -70,7 +70,7 @@ void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& n
void NamespaceMetadataChangeNotifications::_unregisterNotificationToken(
std::shared_ptr<NotificationToken> token) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!token->itToErase) {
return;
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.h b/src/mongo/db/s/namespace_metadata_change_notifications.h
index ba7c51e86a0..12df62bfb95 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.h
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -114,7 +114,7 @@ private:
void _unregisterNotificationToken(std::shared_ptr<NotificationToken> token);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceMetadataChangeNotifications::_mutex");
std::map<NamespaceString, NotificationsList> _notificationsList;
};
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index ae7ca172c5f..9e55b7e2aad 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -316,7 +316,7 @@ SessionCatalogMigrationDestination::~SessionCatalogMigrationDestination() {
void SessionCatalogMigrationDestination::start(ServiceContext* service) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == State::NotStarted);
_state = State::Migrating;
_isStateChanged.notify_all();
@@ -340,7 +340,7 @@ void SessionCatalogMigrationDestination::start(ServiceContext* service) {
}
void SessionCatalogMigrationDestination::finish() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != State::ErrorOccurred) {
_state = State::Committing;
_isStateChanged.notify_all();
@@ -375,7 +375,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
while (true) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::ErrorOccurred) {
return;
}
@@ -393,7 +393,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
if (oplogArray.isEmpty()) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::Committing) {
// The migration is considered done only when it gets an empty result from
// the source shard while this is in state committing. This is to make sure
@@ -414,7 +414,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
// We depleted the buffer at least once, transition to ready for commit.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: only transition to "ready to commit" if state is not error/force stop.
if (_state == State::Migrating) {
_state = State::ReadyToCommit;
@@ -455,19 +455,19 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
waitForWriteConcern(uniqueOpCtx.get(), lastResult.oplogTime, kMajorityWC, &unusedWCResult));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::Done;
_isStateChanged.notify_all();
}
}
std::string SessionCatalogMigrationDestination::getErrMsg() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _errMsg;
}
void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::ErrorOccurred;
_errMsg = errMsg.toString();
@@ -475,7 +475,7 @@ void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
}
SessionCatalogMigrationDestination::State SessionCatalogMigrationDestination::getState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/s/session_catalog_migration_destination.h b/src/mongo/db/s/session_catalog_migration_destination.h
index 89c43be2e62..185eecbb9ba 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.h
+++ b/src/mongo/db/s/session_catalog_migration_destination.h
@@ -36,9 +36,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -116,7 +116,7 @@ private:
stdx::thread _thread;
// Protects _state and _errMsg.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalogMigrationDestination::_mutex");
stdx::condition_variable _isStateChanged;
State _state = State::NotStarted;
std::string _errMsg; // valid only if _state == ErrorOccurred.
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index f645174986d..15dd677e0ba 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -181,12 +181,12 @@ bool SessionCatalogMigrationSource::hasMoreOplog() {
return true;
}
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
return _hasNewWrites(lk);
}
void SessionCatalogMigrationSource::onCommitCloneStarted() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCommitStarted;
if (_newOplogNotification) {
@@ -196,7 +196,7 @@ void SessionCatalogMigrationSource::onCommitCloneStarted() {
}
void SessionCatalogMigrationSource::onCloneCleanup() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCleanup;
if (_newOplogNotification) {
@@ -207,14 +207,14 @@ void SessionCatalogMigrationSource::onCloneCleanup() {
SessionCatalogMigrationSource::OplogResult SessionCatalogMigrationSource::getLastFetchedOplog() {
{
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
if (_lastFetchedOplog) {
return OplogResult(_lastFetchedOplog, false);
}
}
{
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
return OplogResult(_lastFetchedNewWriteOplog, true);
}
}
@@ -230,7 +230,7 @@ bool SessionCatalogMigrationSource::fetchNextOplog(OperationContext* opCtx) {
std::shared_ptr<Notification<bool>> SessionCatalogMigrationSource::getNotificationForNewOplog() {
invariant(!_hasMoreOplogFromSessionCatalog());
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newOplogNotification) {
return _newOplogNotification;
@@ -293,13 +293,13 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
}
bool SessionCatalogMigrationSource::_hasMoreOplogFromSessionCatalog() {
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
return _lastFetchedOplog || !_lastFetchedOplogBuffer.empty() ||
!_sessionOplogIterators.empty() || _currentOplogIterator;
}
bool SessionCatalogMigrationSource::_fetchNextOplogFromSessionCatalog(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_sessionCloneMutex);
+ stdx::unique_lock<Latch> lk(_sessionCloneMutex);
if (!_lastFetchedOplogBuffer.empty()) {
_lastFetchedOplog = _lastFetchedOplogBuffer.back();
@@ -334,7 +334,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
EntryAtOpTimeType entryAtOpTimeType;
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newWriteOpTimeList.empty()) {
_lastFetchedNewWriteOplog.reset();
@@ -369,7 +369,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
}
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_lastFetchedNewWriteOplog = newWriteOplogEntry;
_newWriteOpTimeList.pop_front();
}
@@ -379,7 +379,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
void SessionCatalogMigrationSource::notifyNewWriteOpTime(repl::OpTime opTime,
EntryAtOpTimeType entryAtOpTimeType) {
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_newWriteOpTimeList.emplace_back(opTime, entryAtOpTimeType);
if (_newOplogNotification) {
diff --git a/src/mongo/db/s/session_catalog_migration_source.h b/src/mongo/db/s/session_catalog_migration_source.h
index 06093d4c8e8..df0d9d80259 100644
--- a/src/mongo/db/s/session_catalog_migration_source.h
+++ b/src/mongo/db/s/session_catalog_migration_source.h
@@ -37,9 +37,9 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/transaction_history_iterator.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -231,7 +231,8 @@ private:
// Protects _sessionCatalogCursor, _sessionOplogIterators, _currentOplogIterator,
// _lastFetchedOplogBuffer, _lastFetchedOplog
- stdx::mutex _sessionCloneMutex;
+ Mutex _sessionCloneMutex =
+ MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_sessionCloneMutex");
// List of remaining session records that needs to be cloned.
std::vector<std::unique_ptr<SessionOplogIterator>> _sessionOplogIterators;
@@ -248,7 +249,7 @@ private:
boost::optional<repl::OplogEntry> _lastFetchedOplog;
// Protects _newWriteTsList, _lastFetchedNewWriteOplog, _state, _newOplogNotification
- stdx::mutex _newOplogMutex;
+ Mutex _newOplogMutex = MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_newOplogMutex");
// Stores oplog opTime of new writes that are coming in.
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 78600e5d488..a974104d1f4 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -374,7 +374,7 @@ void ShardServerCatalogCacheLoader::notifyOfCollectionVersionUpdate(const Namesp
}
void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role == ReplicaSetRole::None);
if (isPrimary) {
@@ -385,7 +385,7 @@ void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
}
void ShardServerCatalogCacheLoader::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
_contexts.interrupt(ErrorCodes::PrimarySteppedDown);
++_term;
@@ -393,7 +393,7 @@ void ShardServerCatalogCacheLoader::onStepDown() {
}
void ShardServerCatalogCacheLoader::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
++_term;
_role = ReplicaSetRole::Primary;
@@ -401,7 +401,7 @@ void ShardServerCatalogCacheLoader::onStepUp() {
void ShardServerCatalogCacheLoader::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_inShutdown) {
return;
}
@@ -412,7 +412,7 @@ void ShardServerCatalogCacheLoader::shutDown() {
// Prevent further scheduling, then interrupt ongoing tasks.
_threadPool.shutdown();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_contexts.interrupt(ErrorCodes::InterruptedAtShutdown);
++_term;
}
@@ -430,7 +430,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -446,7 +446,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
// We may have missed an OperationContextGroup interrupt since this operation
// began but before the OperationContext was added to the group. So we'll check
// that we're still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh routing table because replica set state changed or "
"the node is shutting down.",
@@ -473,7 +473,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -489,7 +489,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
// We may have missed an OperationContextGroup interrupt since this operation began
// but before the OperationContext was added to the group. So we'll check that we're
// still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh database because replica set state changed or the node "
"is shutting down.",
@@ -509,7 +509,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opCtx,
const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -560,7 +560,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC
void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -636,7 +636,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
// Get the max version the loader has.
const ChunkVersion maxLoaderVersion = [&] {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt != _collAndChunkTaskLists.end() &&
@@ -707,7 +707,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}
const auto termAfterRefresh = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _term;
}();
@@ -864,7 +864,7 @@ std::pair<bool, CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getE
const NamespaceString& nss,
const ChunkVersion& catalogCacheSinceVersion,
const long long term) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt == _collAndChunkTaskLists.end()) {
@@ -899,7 +899,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleCollAndChun
OperationContext* opCtx, const NamespaceString& nss, collAndChunkTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _collAndChunkTaskLists[nss];
auto wasEmpty = list.empty();
@@ -921,7 +921,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
DBTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _dbTaskLists[dbName.toString()];
auto wasEmpty = list.empty();
@@ -955,7 +955,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -977,7 +977,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collAndChunkTaskLists.erase(nss);
}
return;
@@ -1004,7 +1004,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -1026,7 +1026,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dbTaskLists.erase(name);
}
return;
@@ -1039,7 +1039,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
OperationContext* opCtx, const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const collAndChunkTask& task = _collAndChunkTaskLists[nss].front();
invariant(task.dropped || !task.collectionAndChangedChunks->changedChunks.empty());
@@ -1076,7 +1076,7 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const DBTask& task = _dbTaskLists[dbName.toString()].front();
@@ -1241,7 +1241,7 @@ void ShardServerCatalogCacheLoader::DbTaskList::pop_front() {
}
void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
@@ -1249,7 +1249,7 @@ void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompl
}
void ShardServerCatalogCacheLoader::DbTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h
index 2a456c5a9ef..4cbdc31e3e0 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.h
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h
@@ -31,8 +31,8 @@
#include "mongo/db/operation_context_group.h"
#include "mongo/db/s/namespace_metadata_change_notifications.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/s/catalog_cache_loader.h"
-#include "mongo/stdx/condition_variable.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -204,7 +204,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -314,7 +314,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -484,7 +484,7 @@ private:
NamespaceMetadataChangeNotifications _namespaceNotifications;
// Protects the class state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardServerCatalogCacheLoader::_mutex");
// True if shutDown was called.
bool _inShutdown{false};
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 29f9f699848..46631bb89bd 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -313,7 +313,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity(
auto const shardingState = ShardingState::get(opCtx);
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- stdx::unique_lock<stdx::mutex> ul(_initSynchronizationMutex);
+ stdx::unique_lock<Latch> ul(_initSynchronizationMutex);
if (shardingState->enabled()) {
uassert(40371, "", shardingState->shardId() == shardIdentity.getShardName());
diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h
index 241488ae3fe..2eaefd22fbe 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.h
+++ b/src/mongo/db/s/sharding_initialization_mongod.h
@@ -114,7 +114,8 @@ public:
private:
// This mutex ensures that only one thread at a time executes the sharding
// initialization/teardown sequence
- stdx::mutex _initSynchronizationMutex;
+ Mutex _initSynchronizationMutex =
+ MONGO_MAKE_LATCH("ShardingInitializationMongod::_initSynchronizationMutex");
// Function for initializing the sharding environment components (i.e. everything on the Grid)
ShardingEnvironmentInitFunc _initFunc;
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index b9c7e634a53..37e5f8930fa 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -57,7 +57,7 @@ ShardingState* ShardingState::get(OperationContext* operationContext) {
}
void ShardingState::setInitialized(ShardId shardId, OID clusterId) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_shardId = std::move(shardId);
@@ -71,7 +71,7 @@ void ShardingState::setInitialized(Status failedStatus) {
invariant(!failedStatus.isOK());
log() << "Failed to initialize sharding components" << causedBy(failedStatus);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_initializationStatus = std::move(failedStatus);
@@ -79,7 +79,7 @@ void ShardingState::setInitialized(Status failedStatus) {
}
boost::optional<Status> ShardingState::initializationStatus() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (_getInitializationState() == InitializationState::kNew)
return boost::none;
@@ -105,13 +105,13 @@ Status ShardingState::canAcceptShardedCommands() const {
ShardId ShardingState::shardId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _shardId;
}
OID ShardingState::clusterId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _clusterId;
}
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 4b78d0bdfb4..ab3430fb5ec 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -32,8 +32,8 @@
#include <string>
#include "mongo/bson/oid.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -136,7 +136,7 @@ private:
}
// Protects state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingState::_mutex");
// State of the initialization of the sharding state along with any potential errors
AtomicWord<unsigned> _initializationState{static_cast<uint32_t>(InitializationState::kNew)};
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 7836c63ef62..3d3e392980a 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -139,7 +139,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _participantsDurable (optional)
// Output: _participantsDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participants);
_step = Step::kWritingParticipantList;
@@ -166,7 +166,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
.thenRunOn(Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor())
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_participantsDurable = true;
}
@@ -177,7 +177,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decision (optional)
// Output: _decision is set
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participantsDurable);
_step = Step::kWaitingForVotes;
@@ -195,7 +195,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
_serviceContext, *_sendPrepareScheduler, _lsid, _txnNumber, *_participants)
.then([this](PrepareVoteConsensus consensus) mutable {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decision = consensus.decision();
}
@@ -218,7 +218,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decisionDurable (optional)
// Output: _decisionDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decision);
_step = Step::kWritingDecision;
@@ -242,7 +242,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
})
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decisionDurable = true;
}
@@ -250,7 +250,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Input: _decisionDurable
// Output: (none)
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decisionDurable);
_step = Step::kWaitingForDecisionAcks;
@@ -291,7 +291,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Do a best-effort attempt (i.e., writeConcern w:1) to delete the coordinator's durable
// state.
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_step = Step::kDeletingCoordinatorDoc;
@@ -364,7 +364,7 @@ void TransactionCoordinator::cancelIfCommitNotYetStarted() {
}
bool TransactionCoordinator::_reserveKickOffCommitPromise() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_kickOffCommitPromiseSet)
return false;
@@ -385,7 +385,7 @@ void TransactionCoordinator::_done(Status status) {
LOG(3) << txn::txnIdToString(_lsid, _txnNumber) << " Two-phase commit completed with "
<< redact(status);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto tickSource = _serviceContext->getTickSource();
@@ -487,7 +487,7 @@ std::string TransactionCoordinator::_twoPhaseCommitInfoForLog(
}
TransactionCoordinator::Step TransactionCoordinator::getStep() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _step;
}
@@ -496,7 +496,7 @@ void TransactionCoordinator::reportState(BSONObjBuilder& parent) const {
TickSource* tickSource = _serviceContext->getTickSource();
TickSource::Tick currentTick = tickSource->getTicks();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
BSONObjBuilder lsidBuilder(doc.subobjStart("lsid"));
_lsid.serialize(&lsidBuilder);
@@ -543,7 +543,7 @@ std::string TransactionCoordinator::toString(Step step) const {
}
void TransactionCoordinator::_updateAssociatedClient(Client* client) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_transactionCoordinatorMetricsObserver->updateLastClientInfo(client);
}
diff --git a/src/mongo/db/s/transaction_coordinator.h b/src/mongo/db/s/transaction_coordinator.h
index 12005613f89..68745a3e540 100644
--- a/src/mongo/db/s/transaction_coordinator.h
+++ b/src/mongo/db/s/transaction_coordinator.h
@@ -166,7 +166,7 @@ private:
std::unique_ptr<txn::AsyncWorkScheduler> _sendPrepareScheduler;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinator::_mutex");
// Tracks which step of the 2PC coordination is currently (or was most recently) executing
Step _step{Step::kInactive};
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index 5a5c029833b..fc0612515b2 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -52,14 +52,14 @@ void TransactionCoordinatorCatalog::exitStepUp(Status status) {
<< causedBy(status);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_stepUpCompletionStatus);
_stepUpCompletionStatus = std::move(status);
_stepUpCompleteCV.notify_all();
}
void TransactionCoordinatorCatalog::onStepDown() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
@@ -83,7 +83,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
LOG(3) << "Inserting coordinator " << lsid.getId() << ':' << txnNumber
<< " into in-memory catalog";
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (!forStepUp) {
_waitForStepUpToComplete(ul, opCtx);
}
@@ -113,7 +113,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
OperationContext* opCtx, const LogicalSessionId& lsid, TxnNumber txnNumber) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
std::shared_ptr<TransactionCoordinator> coordinatorToReturn;
@@ -133,7 +133,7 @@ std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
boost::optional<std::pair<TxnNumber, std::shared_ptr<TransactionCoordinator>>>
TransactionCoordinatorCatalog::getLatestOnSession(OperationContext* opCtx,
const LogicalSessionId& lsid) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -156,7 +156,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
LOG(3) << "Removing coordinator " << lsid.getId() << ':' << txnNumber
<< " from in-memory catalog";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -181,7 +181,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
}
void TransactionCoordinatorCatalog::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
while (!_noActiveCoordinatorsCV.wait_for(
ul, stdx::chrono::seconds{5}, [this] { return _coordinatorsBySession.empty(); })) {
@@ -192,11 +192,11 @@ void TransactionCoordinatorCatalog::join() {
}
std::string TransactionCoordinatorCatalog::toString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _toString(lk);
}
-void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk,
+void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<Latch>& lk,
OperationContext* opCtx) {
invariant(lk.owns_lock());
opCtx->waitForConditionOrInterrupt(
@@ -219,7 +219,7 @@ std::string TransactionCoordinatorCatalog::_toString(WithLock wl) const {
}
void TransactionCoordinatorCatalog::filter(FilterPredicate predicate, FilterVisitor visitor) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto sessionIt = _coordinatorsBySession.begin(); sessionIt != _coordinatorsBySession.end();
++sessionIt) {
auto& lsid = sessionIt->first;
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.h b/src/mongo/db/s/transaction_coordinator_catalog.h
index 5768c69bb3c..375fc33d1d9 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.h
+++ b/src/mongo/db/s/transaction_coordinator_catalog.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/db/s/transaction_coordinator.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
namespace mongo {
@@ -125,7 +125,7 @@ private:
* Blocks in an interruptible wait until the catalog is not marked as having a stepup in
* progress.
*/
- void _waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void _waitForStepUpToComplete(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
/**
* Removes the coordinator with the given session id and transaction number from the catalog, if
@@ -142,7 +142,7 @@ private:
std::string _toString(WithLock wl) const;
// Protects the state below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorCatalog::_mutex");
// Contains TransactionCoordinator objects by session id and transaction number. May contain
// more than one coordinator per session. All coordinators for a session that do not correspond
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 5d3cf3bfdd5..05061af7fbe 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -60,14 +60,14 @@ AsyncWorkScheduler::AsyncWorkScheduler(ServiceContext* serviceContext)
AsyncWorkScheduler::~AsyncWorkScheduler() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_quiesced(lg));
}
if (!_parent)
return;
- stdx::lock_guard<stdx::mutex> lg(_parent->_mutex);
+ stdx::lock_guard<Latch> lg(_parent->_mutex);
_parent->_childSchedulers.erase(_itToRemove);
_parent->_notifyAllTasksComplete(lg);
_parent = nullptr;
@@ -129,7 +129,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
auto pf = makePromiseFuture<ResponseStatus>();
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledCommandHandle =
@@ -157,7 +157,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
} else {
promise->setError([&] {
if (status == ErrorCodes::CallbackCanceled) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
return _shutdownStatus.isOK() ? status : _shutdownStatus;
}
return status;
@@ -172,7 +172,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusWith<ResponseStatus> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -182,7 +182,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
auto child = std::make_unique<AsyncWorkScheduler>(_serviceContext);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
child->shutdown(_shutdownStatus);
@@ -195,7 +195,7 @@ std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
void AsyncWorkScheduler::shutdown(Status status) {
invariant(!status.isOK());
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
return;
@@ -216,7 +216,7 @@ void AsyncWorkScheduler::shutdown(Status status) {
}
void AsyncWorkScheduler::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_allListsEmptyCV.wait(ul, [&] {
return _activeOpContexts.empty() && _activeHandles.empty() && _childSchedulers.empty();
});
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index eb769319aad..a1f25c84744 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -78,7 +78,7 @@ public:
auto pf = makePromiseFuture<ReturnType>();
auto taskCompletionPromise = std::make_shared<Promise<ReturnType>>(std::move(pf.promise));
try {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledWorkHandle = uassertStatusOK(_executor->scheduleWorkAt(
@@ -119,7 +119,7 @@ public:
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -210,7 +210,7 @@ private:
ChildIteratorsList::iterator _itToRemove;
// Mutex to protect the shared state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AsyncWorkScheduler::_mutex");
// If shutdown() is called, this contains the first status that was passed to it and is an
// indication that no more operations can be scheduled
@@ -294,7 +294,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
* The first few fields have fixed values. *
******************************************************/
// Protects all state in the SharedBlock.
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("SharedBlock::mutex");
// If any response returns an error prior to a response setting shouldStopIteration to
// ShouldStopIteration::kYes, the promise will be set with that error rather than the global
@@ -332,7 +332,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
for (auto&& localFut : futures) {
std::move(localFut)
.then([sharedBlock](IndividualResult res) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->shouldStopIteration =
@@ -340,14 +340,14 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
}
})
.onError([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->status = s;
}
})
.getAsync([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
sharedBlock->numOutstandingResponses--;
if (sharedBlock->numOutstandingResponses == 0) {
// Unlock before emplacing the result in case any continuations do expensive
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index a8e72285cd6..3ac1212a468 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -171,7 +171,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
Milliseconds recoveryDelayForTesting) {
joinPreviousRound();
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -234,7 +234,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
void TransactionCoordinatorService::onStepDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_catalogAndScheduler)
return;
@@ -249,7 +249,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
if (!isPrimary)
return;
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -260,7 +260,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
std::shared_ptr<TransactionCoordinatorService::CatalogAndScheduler>
TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassert(
ErrorCodes::NotMaster, "Transaction coordinator is not a primary", _catalogAndScheduler);
diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h
index c200809744f..a4fe1ce16f9 100644
--- a/src/mongo/db/s/transaction_coordinator_service.h
+++ b/src/mongo/db/s/transaction_coordinator_service.h
@@ -146,7 +146,7 @@ private:
std::shared_ptr<CatalogAndScheduler> _catalogAndSchedulerToCleanup;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorService::_mutex");
// The catalog + scheduler instantiated at the last step-up attempt. When nullptr, it means
// onStepUp has not been called yet after the last stepDown (or construction).
diff --git a/src/mongo/db/s/wait_for_majority_service.cpp b/src/mongo/db/s/wait_for_majority_service.cpp
index 0625a84b611..f41ed83c630 100644
--- a/src/mongo/db/s/wait_for_majority_service.cpp
+++ b/src/mongo/db/s/wait_for_majority_service.cpp
@@ -141,7 +141,7 @@ SharedSemiFuture<void> WaitForMajorityService::waitUntilMajority(const repl::OpT
void WaitForMajorityService::_periodicallyWaitForMajority(ServiceContext* service) {
ThreadClient tc("waitForMajority", service);
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_inShutDown) {
auto opCtx = tc->makeOperationContext();
diff --git a/src/mongo/db/s/wait_for_majority_service.h b/src/mongo/db/s/wait_for_majority_service.h
index 970b475d0d3..90ec771bd40 100644
--- a/src/mongo/db/s/wait_for_majority_service.h
+++ b/src/mongo/db/s/wait_for_majority_service.h
@@ -36,7 +36,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/future.h"
@@ -74,7 +74,7 @@ private:
*/
void _periodicallyWaitForMajority(ServiceContext* service);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMaorityService::_mutex");
// Contains an ordered list of opTimes to wait to be majority comitted.
OpTimeWaitingMap _queuedOpTimes;
diff --git a/src/mongo/db/s/wait_for_majority_service_test.cpp b/src/mongo/db/s/wait_for_majority_service_test.cpp
index d904d253af1..ca89ac04c8b 100644
--- a/src/mongo/db/s/wait_for_majority_service_test.cpp
+++ b/src/mongo/db/s/wait_for_majority_service_test.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/wait_for_majority_service.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -64,7 +64,7 @@ public:
}
void finishWaitingOneOpTime() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isTestReady = true;
_isTestReadyCV.notify_one();
@@ -74,7 +74,7 @@ public:
}
Status waitForWriteConcernStub(OperationContext* opCtx, const repl::OpTime& opTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waitForMajorityCallCount++;
_callCountChangedCV.notify_one();
@@ -97,7 +97,7 @@ public:
}
const repl::OpTime& getLastOpTimeWaited() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTimeWaited;
}
@@ -109,7 +109,7 @@ public:
private:
WaitForMajorityService _waitForMajorityService;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMajorityServiceTest::_mutex");
stdx::condition_variable _isTestReadyCV;
stdx::condition_variable _finishWaitingOneOpTimeCV;
stdx::condition_variable _callCountChangedCV;
diff --git a/src/mongo/db/server_recovery.cpp b/src/mongo/db/server_recovery.cpp
index f7133127f40..c44515a3358 100644
--- a/src/mongo/db/server_recovery.cpp
+++ b/src/mongo/db/server_recovery.cpp
@@ -48,17 +48,17 @@ bool SizeRecoveryState::collectionNeedsSizeAdjustment(const std::string& ident)
}
bool SizeRecoveryState::collectionAlwaysNeedsSizeAdjustment(const std::string& ident) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _collectionsAlwaysNeedingSizeAdjustment.count(ident) > 0;
}
void SizeRecoveryState::markCollectionAsAlwaysNeedsSizeAdjustment(const std::string& ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.insert(ident);
}
void SizeRecoveryState::clearStateBeforeRecovery() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.clear();
}
} // namespace mongo
diff --git a/src/mongo/db/server_recovery.h b/src/mongo/db/server_recovery.h
index fbd89f56360..3b9d87a8065 100644
--- a/src/mongo/db/server_recovery.h
+++ b/src/mongo/db/server_recovery.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
/**
@@ -81,7 +81,7 @@ public:
void clearStateBeforeRecovery();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SizeRecoveryState::_mutex");
std::set<std::string> _collectionsAlwaysNeedingSizeAdjustment;
};
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 350e9eae02e..148c2ebbc83 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -52,6 +52,7 @@
#include "mongo/util/str.h"
#include "mongo/util/system_clock_source.h"
#include "mongo/util/system_tick_source.h"
+#include <iostream>
namespace mongo {
namespace {
@@ -96,7 +97,7 @@ ServiceContext::ServiceContext()
_preciseClockSource(std::make_unique<SystemClockSource>()) {}
ServiceContext::~ServiceContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& client : _clients) {
severe() << "Client " << client->desc() << " still exists while destroying ServiceContext@"
<< static_cast<void*>(this);
@@ -161,7 +162,7 @@ ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc,
std::unique_ptr<Client> client(new Client(std::move(desc), this, std::move(session)));
onCreate(client.get(), _clientObservers);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_clients.insert(client.get()).second);
}
return UniqueClient(client.release());
@@ -225,7 +226,7 @@ void ServiceContext::setServiceExecutor(std::unique_ptr<transport::ServiceExecut
void ServiceContext::ClientDeleter::operator()(Client* client) const {
ServiceContext* const service = client->getServiceContext();
{
- stdx::lock_guard<stdx::mutex> lk(service->_mutex);
+ stdx::lock_guard<Latch> lk(service->_mutex);
invariant(service->_clients.erase(client));
}
onDestroy(client, service->_clientObservers);
@@ -291,7 +292,7 @@ Client* ServiceContext::LockedClientsCursor::next() {
}
void ServiceContext::setKillAllOperations() {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
// Ensure that all newly created operation contexts will immediately be in the interrupted state
_globalKill.store(true);
@@ -332,17 +333,17 @@ void ServiceContext::unsetKillAllOperations() {
}
void ServiceContext::registerKillOpListener(KillOpListenerInterface* listener) {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
_killOpListeners.push_back(listener);
}
void ServiceContext::waitForStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupCompleteCondVar.wait(lk, [this] { return _startupComplete; });
}
void ServiceContext::notifyStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupComplete = true;
lk.unlock();
_startupCompleteCondVar.notify_all();
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index 0aa04389245..3d4fdd1609b 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -39,8 +39,8 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/session.h"
@@ -50,6 +50,8 @@
#include "mongo/util/periodic_runner.h"
#include "mongo/util/tick_source.h"
+#include <iostream>
+
namespace mongo {
class AbstractMessagingPort;
@@ -163,7 +165,7 @@ public:
Client* next();
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
ClientSet::const_iterator _curr;
ClientSet::const_iterator _end;
};
@@ -530,7 +532,7 @@ private:
std::unique_ptr<ClientObserver> _observer;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceContext::_mutex");
/**
* The periodic runner.
diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp
index bd422327e37..98bab228070 100644
--- a/src/mongo/db/service_context_test_fixture.cpp
+++ b/src/mongo/db/service_context_test_fixture.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/client.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/diagnostic_info.h"
namespace mongo {
diff --git a/src/mongo/db/service_liaison_mock.cpp b/src/mongo/db/service_liaison_mock.cpp
index ab4397f1980..f6c36f9eb51 100644
--- a/src/mongo/db/service_liaison_mock.cpp
+++ b/src/mongo/db/service_liaison_mock.cpp
@@ -43,12 +43,12 @@ MockServiceLiaisonImpl::MockServiceLiaisonImpl() {
}
LogicalSessionIdSet MockServiceLiaisonImpl::getActiveOpSessions() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _activeSessions;
}
LogicalSessionIdSet MockServiceLiaisonImpl::getOpenCursorSessions(OperationContext* opCtx) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _cursorSessions;
}
@@ -65,32 +65,32 @@ void MockServiceLiaisonImpl::scheduleJob(PeriodicRunner::PeriodicJob job) {
void MockServiceLiaisonImpl::addCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::removeCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clearCursorSession() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.clear();
}
void MockServiceLiaisonImpl::add(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.clear();
}
diff --git a/src/mongo/db/service_liaison_mock.h b/src/mongo/db/service_liaison_mock.h
index 6d500ae5682..72512cbb95b 100644
--- a/src/mongo/db/service_liaison_mock.h
+++ b/src/mongo/db/service_liaison_mock.h
@@ -33,8 +33,8 @@
#include "mongo/db/service_liaison.h"
#include "mongo/executor/async_timer_mock.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/time_support.h"
@@ -87,7 +87,7 @@ private:
boost::optional<SessionKiller::Matcher> _matcher;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MockServiceLiaisonImpl::_mutex");
LogicalSessionIdSet _activeSessions;
LogicalSessionIdSet _cursorSessions;
};
diff --git a/src/mongo/db/service_liaison_mongod.cpp b/src/mongo/db/service_liaison_mongod.cpp
index 94e1fbd9217..6e26c6f16e7 100644
--- a/src/mongo/db/service_liaison_mongod.cpp
+++ b/src/mongo/db/service_liaison_mongod.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/cursor_manager.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongod.h b/src/mongo/db/service_liaison_mongod.h
index b1060425f6f..3cf8864b5eb 100644
--- a/src/mongo/db/service_liaison_mongod.h
+++ b/src/mongo/db/service_liaison_mongod.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongod::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/service_liaison_mongos.cpp b/src/mongo/db/service_liaison_mongos.cpp
index 666ca06ea68..9abe73ea5c5 100644
--- a/src/mongo/db/service_liaison_mongos.cpp
+++ b/src/mongo/db/service_liaison_mongos.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/service_liaison_mongos.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongos.h b/src/mongo/db/service_liaison_mongos.h
index ab40801557d..22fc7032d73 100644
--- a/src/mongo/db/service_liaison_mongos.h
+++ b/src/mongo/db/service_liaison_mongos.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongos::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/session_catalog.cpp b/src/mongo/db/session_catalog.cpp
index f3954651690..97fbff47f89 100644
--- a/src/mongo/db/session_catalog.cpp
+++ b/src/mongo/db/session_catalog.cpp
@@ -49,7 +49,7 @@ const auto operationSessionDecoration =
} // namespace
SessionCatalog::~SessionCatalog() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (const auto& entry : _sessions) {
ObservableSession session(lg, entry.second->session);
invariant(!session.currentOperation());
@@ -58,7 +58,7 @@ SessionCatalog::~SessionCatalog() {
}
void SessionCatalog::reset_forTest() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_sessions.clear();
}
@@ -79,7 +79,7 @@ SessionCatalog::ScopedCheckedOutSession SessionCatalog::_checkOutSession(Operati
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
invariant(!opCtx->lockState()->isLocked());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, *opCtx->getLogicalSessionId());
// Wait until the session is no longer checked out and until the previously scheduled kill has
@@ -106,7 +106,7 @@ SessionCatalog::SessionToKill SessionCatalog::checkOutSessionForKill(OperationCo
invariant(!operationSessionDecoration(opCtx));
invariant(!opCtx->getTxnNumber());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, killToken.lsidToKill);
invariant(ObservableSession(ul, sri->session)._killed());
@@ -130,7 +130,7 @@ void SessionCatalog::scanSession(const LogicalSessionId& lsid,
std::unique_ptr<SessionRuntimeInfo> sessionToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
if (it != _sessions.end()) {
auto& sri = it->second;
@@ -151,7 +151,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
std::vector<std::unique_ptr<SessionRuntimeInfo>> sessionsToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
LOG(2) << "Beginning scanSessions. Scanning " << _sessions.size() << " sessions.";
@@ -173,7 +173,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
}
SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& lsid) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
uassert(ErrorCodes::NoSuchSession, "Session not found", it != _sessions.end());
@@ -182,7 +182,7 @@ SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& ls
}
size_t SessionCatalog::size() const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
return _sessions.size();
}
@@ -198,7 +198,7 @@ SessionCatalog::SessionRuntimeInfo* SessionCatalog::_getOrCreateSessionRuntimeIn
void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri,
boost::optional<KillToken> killToken) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
// Make sure we have exactly the same session on the map and that it is still associated with an
// operation context (meaning checked-out)
diff --git a/src/mongo/db/session_catalog.h b/src/mongo/db/session_catalog.h
index b9e5e98049d..ea5226915c7 100644
--- a/src/mongo/db/session_catalog.h
+++ b/src/mongo/db/session_catalog.h
@@ -37,8 +37,8 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/session.h"
#include "mongo/db/session_killer.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -153,7 +153,7 @@ private:
void _releaseSession(SessionRuntimeInfo* sri, boost::optional<KillToken> killToken);
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalog::_mutex");
// Owns the Session objects for all current Sessions.
SessionRuntimeInfoMap _sessions;
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index 2ef67c1f884..58e03aa20b4 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -600,9 +600,9 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ConcurrentCheckOutAndKill) {
// The main thread won't check in the session until it's killed.
{
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cond;
- stdx::unique_lock<stdx::mutex> lock(m);
+ stdx::unique_lock<Latch> lock(m);
ASSERT_EQ(ErrorCodes::InternalError,
_opCtx->waitForConditionOrInterruptNoAssert(cond, lock));
}
diff --git a/src/mongo/db/session_killer.cpp b/src/mongo/db/session_killer.cpp
index 2f92bf6dbf2..c7acd2d074c 100644
--- a/src/mongo/db/session_killer.cpp
+++ b/src/mongo/db/session_killer.cpp
@@ -50,7 +50,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
Client::setCurrent(sc->makeClient("SessionKiller"));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// While we're not in shutdown
while (!_inShutdown) {
@@ -72,7 +72,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
SessionKiller::~SessionKiller() {
DESTRUCTOR_GUARD([&] {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
_killerCV.notify_one();
@@ -138,7 +138,7 @@ SessionKiller* SessionKiller::get(OperationContext* ctx) {
std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
OperationContext* opCtx, const KillAllSessionsByPatternSet& toKill) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Save a shared_ptr to the current reapResults (I.e. the next thing to get killed).
auto reapResults = _reapResults;
@@ -164,7 +164,7 @@ std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
return {reapResults.result, reapResults.result->get_ptr()};
}
-void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk) {
+void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk) {
// Pull our current workload onto the stack. Swap it for empties.
decltype(_nextToReap) nextToReap;
decltype(_reapResults) reapResults;
diff --git a/src/mongo/db/session_killer.h b/src/mongo/db/session_killer.h
index 44f58509d70..8e9cd89cdfa 100644
--- a/src/mongo/db/session_killer.h
+++ b/src/mongo/db/session_killer.h
@@ -37,8 +37,8 @@
#include "mongo/base/status_with.h"
#include "mongo/db/kill_sessions.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/net/hostandport.h"
@@ -125,13 +125,13 @@ private:
std::shared_ptr<boost::optional<Result>> result;
};
- void _periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk);
+ void _periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk);
KillFunc _killFunc;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionKiller::_mutex");
stdx::condition_variable _callerCV;
stdx::condition_variable _killerCV;
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
index 63e16f14321..6e4e8fbc29b 100644
--- a/src/mongo/db/sessions_collection_config_server.cpp
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -96,7 +96,7 @@ Status SessionsCollectionConfigServer::setupSessionsCollection(OperationContext*
return {ErrorCodes::ShardingStateNotInitialized, "sharding state is not yet initialized"};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
{
auto res = _shardCollectionIfNeeded(opCtx);
if (!res.isOK()) {
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index bdfac76abff..701d055772a 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection_sharded.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -64,7 +64,7 @@ private:
Status _shardCollectionIfNeeded(OperationContext* opCtx);
Status _generateIndexesIfNeeded(OperationContext* opCtx);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionsCollectionConfigServer::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_mock.cpp b/src/mongo/db/sessions_collection_mock.cpp
index 33e75ab842c..00992744589 100644
--- a/src/mongo/db/sessions_collection_mock.cpp
+++ b/src/mongo/db/sessions_collection_mock.cpp
@@ -60,22 +60,22 @@ Status MockSessionsCollectionImpl::removeRecords(const LogicalSessionIdSet& sess
}
void MockSessionsCollectionImpl::add(LogicalSessionRecord record) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.insert({record.getId(), std::move(record)});
}
void MockSessionsCollectionImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.erase(lsid);
}
bool MockSessionsCollectionImpl::has(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _sessions.find(lsid) != _sessions.end();
}
void MockSessionsCollectionImpl::clearSessions() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.clear();
}
@@ -93,7 +93,7 @@ Status MockSessionsCollectionImpl::_refreshSessions(const LogicalSessionRecordSe
}
Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& sessions) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
_sessions.erase(lsid);
}
@@ -104,7 +104,7 @@ Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& ses
StatusWith<LogicalSessionIdSet> MockSessionsCollectionImpl::findRemovedSessions(
OperationContext* opCtx, const LogicalSessionIdSet& sessions) {
LogicalSessionIdSet lsids;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
if (_sessions.find(lsid) == _sessions.end()) {
lsids.emplace(lsid);
diff --git a/src/mongo/db/sessions_collection_mock.h b/src/mongo/db/sessions_collection_mock.h
index a31a4f7fc5a..da6477692fa 100644
--- a/src/mongo/db/sessions_collection_mock.h
+++ b/src/mongo/db/sessions_collection_mock.h
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -89,7 +89,7 @@ private:
Status _refreshSessions(const LogicalSessionRecordSet& sessions);
Status _removeRecords(const LogicalSessionIdSet& sessions);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockSessionsCollectionImpl::_mutex");
SessionMap _sessions;
RefreshHook _refresh;
diff --git a/src/mongo/db/sessions_collection_rs.h b/src/mongo/db/sessions_collection_rs.h
index d073969cdc2..0d52d3f52e4 100644
--- a/src/mongo/db/sessions_collection_rs.h
+++ b/src/mongo/db/sessions_collection_rs.h
@@ -37,7 +37,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/sessions_collection.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -108,7 +108,7 @@ private:
LocalCallback&& localCallback,
RemoteCallback&& remoteCallback);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionsCollectionRS::_mutex");
std::unique_ptr<RemoteCommandTargeter> _targeter;
};
diff --git a/src/mongo/db/snapshot_window_util.cpp b/src/mongo/db/snapshot_window_util.cpp
index 05a46b42e13..c06bb078d5e 100644
--- a/src/mongo/db/snapshot_window_util.cpp
+++ b/src/mongo/db/snapshot_window_util.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/snapshot_window_options.h"
#include "mongo/db/storage/storage_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -55,7 +55,7 @@ namespace SnapshotWindowUtil {
// another, since they act on and modify the same storage parameters. Further guards the static
// variables "_snapshotWindowLastDecreasedAt" and "_snapshotWindowLastIncreasedAt" used in
// increaseTargetSnapshotWindowSize() and decreaseSnapshowWindow().
-stdx::mutex snapshotWindowMutex;
+Mutex snapshotWindowMutex;
namespace {
@@ -92,7 +92,7 @@ void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
// Tracks the last time that the snapshot window was increased so that it does not go up so fast
// that the storage engine does not have time to improve snapshot availability.
@@ -150,7 +150,7 @@ void decreaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
StorageEngine* engine = opCtx->getServiceContext()->getStorageEngine();
if (engine && engine->isCacheUnderPressure(opCtx)) {
diff --git a/src/mongo/db/stats/server_write_concern_metrics.cpp b/src/mongo/db/stats/server_write_concern_metrics.cpp
index c36431ca3f3..bfc14025d73 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.cpp
+++ b/src/mongo/db/stats/server_write_concern_metrics.cpp
@@ -58,7 +58,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForInserts(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_insertMetrics.recordWriteConcern(writeConcernOptions, numInserts);
}
@@ -68,7 +68,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForUpdate(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_updateMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -78,7 +78,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForDelete(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_deleteMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -87,7 +87,7 @@ BSONObj ServerWriteConcernMetrics::toBSON() const {
return BSONObj();
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
BSONObjBuilder builder;
diff --git a/src/mongo/db/stats/server_write_concern_metrics.h b/src/mongo/db/stats/server_write_concern_metrics.h
index 524c4fce917..b1e17f53e38 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.h
+++ b/src/mongo/db/stats/server_write_concern_metrics.h
@@ -97,7 +97,7 @@ private:
StringMap<std::uint64_t> wTagCounts;
};
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ServerWriteConcernMetrics::_mutex");
WriteConcernMetricsForOperationType _insertMetrics;
WriteConcernMetricsForOperationType _updateMetrics;
WriteConcernMetricsForOperationType _deleteMetrics;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
index 977ba1d6b52..370ececd244 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
@@ -94,7 +94,7 @@ std::unique_ptr<mongo::RecordStore> KVEngine::getRecordStore(OperationContext* o
}
bool KVEngine::trySwapMaster(StringStore& newMaster, uint64_t version) {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
invariant(!newMaster.hasBranch() && !_master.hasBranch());
if (_masterVersion != version)
return false;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/biggie/biggie_kv_engine.h
index 97c836b523a..a9a3582cfdd 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.h
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.h
@@ -154,7 +154,7 @@ public:
* Returns a pair of the current version and copy of tree of the master.
*/
std::pair<uint64_t, StringStore> getMasterInfo() {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
return std::make_pair(_masterVersion, _master);
}
@@ -170,7 +170,7 @@ private:
std::map<std::string, bool> _idents; // TODO : replace with a query to _master.
std::unique_ptr<VisibilityManager> _visibilityManager;
- mutable stdx::mutex _masterLock;
+ mutable Mutex _masterLock = MONGO_MAKE_LATCH("KVEngine::_masterLock");
StringStore _master;
uint64_t _masterVersion = 0;
};
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index 4c47df9cd7b..8e2aadc7041 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -120,7 +120,7 @@ bool RecordStore::isCapped() const {
}
void RecordStore::setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -264,7 +264,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo
auto endIt = workingCopy->upper_bound(_postfix);
while (recordIt != endIt) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_cappedCallback) {
// Documents are guaranteed to have a RecordId at the end of the KeyString, unlike
// unique indexes.
@@ -357,11 +357,11 @@ void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* wo
auto recordIt = workingCopy->lower_bound(_prefix);
// Ensure only one thread at a time can do deletes, otherwise they'll conflict.
- stdx::lock_guard<stdx::mutex> cappedDeleterLock(_cappedDeleterMutex);
+ stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex);
while (_cappedAndNeedDelete(opCtx, workingCopy)) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
RecordId rid = RecordId(extractRecordId(recordIt->first));
if (_isOplog && _visibilityManager->isFirstHidden(rid)) {
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h
index e8dee66da1c..005d49ee293 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.h
+++ b/src/mongo/db/storage/biggie/biggie_record_store.h
@@ -38,7 +38,7 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace biggie {
@@ -138,10 +138,11 @@ private:
std::string _prefix;
std::string _postfix;
- mutable stdx::mutex _cappedCallbackMutex; // Guards _cappedCallback
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback
CappedCallback* _cappedCallback;
- mutable stdx::mutex _cappedDeleterMutex;
+ mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex");
AtomicWord<long long> _highestRecordId{1};
AtomicWord<long long> _numRecords{0};
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
index d9921bc6472..94a869727b0 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
@@ -56,7 +56,7 @@ public:
virtual void rollback() {
_visibilityManager->dealtWithRecord(_rid);
- stdx::lock_guard<stdx::mutex> lk(_rs->_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_rs->_cappedCallbackMutex);
if (_rs->_cappedCallback)
_rs->_cappedCallback->notifyCappedWaitersIfNeeded();
}
@@ -68,7 +68,7 @@ private:
};
void VisibilityManager::dealtWithRecord(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.erase(rid);
_opsBecameVisibleCV.notify_all();
}
@@ -76,7 +76,7 @@ void VisibilityManager::dealtWithRecord(RecordId rid) {
void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
RecordStore* rs,
RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.insert(rid);
opCtx->recoveryUnit()->registerChange(std::make_unique<VisibilityManagerChange>(this, rs, rid));
@@ -85,13 +85,13 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
}
RecordId VisibilityManager::getAllCommittedRecord() {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
return _uncommittedRecords.empty() ? _highestSeen
: RecordId(_uncommittedRecords.begin()->repr() - 1);
}
bool VisibilityManager::isFirstHidden(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
if (_uncommittedRecords.empty())
return false;
return *_uncommittedRecords.begin() == rid;
@@ -100,7 +100,7 @@ bool VisibilityManager::isFirstHidden(RecordId rid) {
void VisibilityManager::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) {
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
- stdx::unique_lock<stdx::mutex> lock(_stateLock);
+ stdx::unique_lock<Latch> lock(_stateLock);
const RecordId waitFor = _highestSeen;
opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lock, [&] {
return _uncommittedRecords.empty() || *_uncommittedRecords.begin() > waitFor;
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.h b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
index 387b7edc0d0..8370ba0c990 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.h
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
@@ -31,7 +31,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -76,7 +76,8 @@ public:
void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx);
private:
- mutable stdx::mutex _stateLock; // Protects the values below.
+ mutable Mutex _stateLock =
+ MONGO_MAKE_LATCH("VisibilityManager::_stateLock"); // Protects the values below.
RecordId _highestSeen = RecordId();
// Used to wait for all earlier oplog writes to be visible.
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index fc88ca957ff..0bc79d049ba 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -151,7 +151,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents.erase(_ident);
}
@@ -166,7 +166,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents[_ident] = _entry;
}
@@ -471,7 +471,7 @@ void DurableCatalogImpl::init(OperationContext* opCtx) {
}
std::vector<NamespaceString> DurableCatalogImpl::getAllCollections() const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
std::vector<NamespaceString> result;
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
result.push_back(NamespaceString(it->first));
@@ -487,7 +487,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
const string ident = _newUniqueIdent(nss, "collection");
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[nss.toString()];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists, "collection already exists");
@@ -517,7 +517,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
}
std::string DurableCatalogImpl::getCollectionIdent(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end());
return it->second.ident;
@@ -536,7 +536,7 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx,
RecordId* out) const {
RecordId dl;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << nss);
dl = it->second.storedLoc;
@@ -652,7 +652,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
fassert(28522, status);
}
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator fromIt = _idents.find(fromNss.toString());
invariant(fromIt != _idents.end());
@@ -673,7 +673,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const NamespaceString& nss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator it = _idents.find(nss.toString());
if (it == _idents.end()) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found");
@@ -693,7 +693,7 @@ std::vector<std::string> DurableCatalogImpl::getAllIdentsForDB(StringData db) co
std::vector<std::string> v;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
NamespaceString ns(it->first);
if (ns.db() != db)
@@ -761,7 +761,7 @@ StatusWith<std::string> DurableCatalogImpl::newOrphanedIdent(OperationContext* o
NamespaceString::kOrphanCollectionPrefix + identNs)
.ns();
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[ns];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists,
diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h
index b7683e9da68..f99ff41da5b 100644
--- a/src/mongo/db/storage/durable_catalog_impl.h
+++ b/src/mongo/db/storage/durable_catalog_impl.h
@@ -40,7 +40,7 @@
#include "mongo/db/storage/bson_collection_catalog_entry.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/kv/kv_prefix.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -261,7 +261,7 @@ private:
};
typedef std::map<std::string, Entry> NSToIdentMap;
NSToIdentMap _idents;
- mutable stdx::mutex _identsLock;
+ mutable Mutex _identsLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_identsLock");
// Manages the feature document that may be present in the DurableCatalogImpl. '_featureTracker'
// is guaranteed to be non-null after DurableCatalogImpl::init() is called.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 1f689ddd607..597bc513d20 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -43,7 +43,7 @@ namespace mongo {
RecoveryUnit* EphemeralForTestEngine::newRecoveryUnit() {
return new EphemeralForTestRecoveryUnit([this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
JournalListener::Token token = _journalListener->getToken();
_journalListener->onDurable(token);
});
@@ -55,14 +55,14 @@ Status EphemeralForTestEngine::createRecordStore(OperationContext* opCtx,
const CollectionOptions& options) {
// Register the ident in the `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getRecordStore`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
OperationContext* opCtx, StringData ns, StringData ident, const CollectionOptions& options) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (options.capped) {
return std::make_unique<EphemeralForTestRecordStore>(
ns,
@@ -77,7 +77,7 @@ std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
std::unique_ptr<RecordStore> EphemeralForTestEngine::makeTemporaryRecordStore(
OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return std::make_unique<EphemeralForTestRecordStore>(ident, &_dataMap[ident]);
}
@@ -88,14 +88,14 @@ Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx
const IndexDescriptor* desc) {
// Register the ident in `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getSortedDataInterface`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterface(
OperationContext* opCtx, StringData ident, const IndexDescriptor* desc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()),
desc->unique(),
desc->parentNS(),
@@ -105,7 +105,7 @@ std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterf
}
Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap.erase(ident);
return Status::OK();
}
@@ -117,7 +117,7 @@ int64_t EphemeralForTestEngine::getIdentSize(OperationContext* opCtx, StringData
std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext* opCtx) const {
std::vector<std::string> all;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) {
all.push_back(it->first);
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index a083f9f3a4b..b51b285ef2c 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -33,7 +33,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -102,13 +102,12 @@ public:
virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
return _dataMap.find(ident) != _dataMap.end();
- ;
}
std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
void setJournalListener(JournalListener* jl) final {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -127,7 +126,7 @@ public:
private:
typedef StringMap<std::shared_ptr<void>> DataMap;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("EphemeralForTestEngine::_mutex");
DataMap _dataMap; // All actual data is owned in here
// Notified when we write as everything is considered "journalled" since repl depends on it.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index 2fdbaaa579e..3bd7ffb0ce5 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -35,7 +35,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp
index ac010f891ae..c6976fd77b2 100644
--- a/src/mongo/db/storage/flow_control.cpp
+++ b/src/mongo/db/storage/flow_control.cpp
@@ -172,7 +172,7 @@ double FlowControl::_getLocksPerOp() {
Sample backOne;
std::size_t numSamples;
{
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
numSamples = _sampledOpsApplied.size();
if (numSamples >= 2) {
backTwo = _sampledOpsApplied[numSamples - 2];
@@ -399,7 +399,7 @@ std::int64_t FlowControl::_approximateOpsBetween(Timestamp prevTs, Timestamp cur
std::int64_t prevApplied = -1;
std::int64_t currApplied = -1;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
for (auto&& sample : _sampledOpsApplied) {
if (prevApplied == -1 && prevTs.asULL() <= std::get<0>(sample)) {
prevApplied = std::get<1>(sample);
@@ -427,7 +427,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
_numOpsSinceStartup += opsApplied;
if (_numOpsSinceStartup - _lastSample <
static_cast<std::size_t>(gFlowControlSamplePeriod.load())) {
@@ -469,7 +469,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
void FlowControl::_trimSamples(const Timestamp trimTo) {
int numTrimmed = 0;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
// Always leave at least two samples for calculating `locksPerOp`.
while (_sampledOpsApplied.size() > 2 &&
std::get<0>(_sampledOpsApplied.front()) < trimTo.asULL()) {
diff --git a/src/mongo/db/storage/flow_control.h b/src/mongo/db/storage/flow_control.h
index 64f0d0b1d00..17b465b9d21 100644
--- a/src/mongo/db/storage/flow_control.h
+++ b/src/mongo/db/storage/flow_control.h
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_coordinator_fwd.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -125,7 +125,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _isLaggedTimeMicros{0};
- mutable stdx::mutex _sampledOpsMutex;
+ mutable Mutex _sampledOpsMutex = MONGO_MAKE_LATCH("FlowControl::_sampledOpsMutex");
std::deque<Sample> _sampledOpsApplied;
// These values are used in the sampling process.
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
index ef5b441d989..44337fffc49 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
@@ -46,7 +46,7 @@ KVDropPendingIdentReaper::KVDropPendingIdentReaper(KVEngine* engine) : _engine(e
void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestamp,
const NamespaceString& nss,
StringData ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingIdents.equal_range(dropTimestamp);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -65,7 +65,7 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam
}
boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingIdents.cbegin();
if (it == _dropPendingIdents.cend()) {
return boost::none;
@@ -74,7 +74,7 @@ boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp()
}
std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::set<std::string> idents;
for (const auto& entry : _dropPendingIdents) {
const auto& identInfo = entry.second;
@@ -87,7 +87,7 @@ std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, const Timestamp& ts) {
DropPendingIdents toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingIdents.cbegin();
it != _dropPendingIdents.cend() && it->first < ts;
++it) {
@@ -125,7 +125,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropTimestamp()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto& timestampAndIdentInfo : toDrop) {
const auto& dropTimestamp = timestampAndIdentInfo.first;
// This may return zero if _dropPendingIdents was cleared using clearDropPendingState().
@@ -135,7 +135,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
}
void KVDropPendingIdentReaper::clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingIdents.clear();
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
index c249d9af0ba..75f13690a3d 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
@@ -38,7 +38,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -113,7 +113,7 @@ private:
KVEngine* const _engine;
// Guards access to member variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("KVDropPendingIdentReaper::_mutex");
// Drop-pending idents. Ordered by drop timestamp.
DropPendingIdents _dropPendingIdents;
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 6b88dc22c3b..1a54a82f6a1 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -31,7 +31,7 @@
namespace mongo {
int64_t KVPrefix::_nextValue = 0;
-stdx::mutex KVPrefix::_nextValueMutex;
+Mutex KVPrefix::_nextValueMutex = MONGO_MAKE_LATCH();
const KVPrefix KVPrefix::kNotPrefixed = KVPrefix(-1);
std::string KVPrefix::toString() const {
@@ -54,7 +54,7 @@ std::string KVPrefix::toString() const {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
_nextValue = largestPrefix._value + 1;
}
@@ -67,7 +67,7 @@ std::string KVPrefix::toString() const {
}
/* static */ KVPrefix KVPrefix::generateNextPrefix() {
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index 6a785dc19db..45a1e891c0e 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -33,7 +33,7 @@
#include "mongo/bson/util/builder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
explicit KVPrefix(int64_t value) : _value(value) {}
int64_t _value;
- static stdx::mutex _nextValueMutex;
+ static Mutex _nextValueMutex;
static int64_t _nextValue;
};
diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp
index 2aae21eafb4..cf3f7d10b70 100644
--- a/src/mongo/db/storage/kv/storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/storage_engine_test.cpp
@@ -431,13 +431,13 @@ TEST_F(TimestampKVEngineTest, TimestampListeners) {
}
TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
bool changes[4] = {false, false, false, false};
TimestampListener first(checkpoint, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[0]) {
changes[0] = true;
cv.notify_all();
@@ -445,7 +445,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener second(oldest, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[1]) {
changes[1] = true;
cv.notify_all();
@@ -453,7 +453,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener third(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[2]) {
changes[2] = true;
cv.notify_all();
@@ -461,7 +461,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener fourth(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[3]) {
changes[3] = true;
cv.notify_all();
@@ -474,7 +474,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
_storageEngine->getTimestampMonitor()->addListener(&fourth);
// Wait until all 4 listeners get notified at least once.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] {
for (auto const& change : changes) {
if (!change) {
diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.h b/src/mongo/db/storage/mobile/mobile_kv_engine.h
index 3762ccf0878..0e0b3ab17e3 100644
--- a/src/mongo/db/storage/mobile/mobile_kv_engine.h
+++ b/src/mongo/db/storage/mobile/mobile_kv_engine.h
@@ -35,7 +35,7 @@
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session_pool.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/string_map.h"
@@ -124,7 +124,7 @@ public:
std::vector<std::string> getAllIdents(OperationContext* opCtx) const override;
void setJournalListener(JournalListener* jl) override {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -143,7 +143,7 @@ public:
private:
void maybeVacuum(Client* client, Date_t deadline);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MobileKVEngine::_mutex");
void _initDBPath(const std::string& path);
std::int32_t _setSQLitePragma(const std::string& pragma, sqlite3* session);
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.cpp b/src/mongo/db/storage/mobile/mobile_record_store.cpp
index 7543fcb1617..f60142d95fe 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.cpp
+++ b/src/mongo/db/storage/mobile/mobile_record_store.cpp
@@ -233,7 +233,7 @@ void MobileRecordStore::_initDataSizeIfNeeded_inlock(OperationContext* opCtx) co
}
long long MobileRecordStore::dataSize(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_initDataSizeIfNeeded_inlock(opCtx);
return _dataSize;
}
@@ -255,7 +255,7 @@ void MobileRecordStore::_initNumRecsIfNeeded_inlock(OperationContext* opCtx) con
}
long long MobileRecordStore::numRecords(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_initNumRecsIfNeeded_inlock(opCtx);
return _numRecs;
}
@@ -420,7 +420,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_rs->_numRecsMutex);
_rs->_numRecs -= _diff;
}
@@ -430,7 +430,7 @@ private:
};
void MobileRecordStore::_changeNumRecs(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
opCtx->recoveryUnit()->registerChange(std::make_unique<NumRecsChange>(this, diff));
_initNumRecsIfNeeded_inlock(opCtx);
_numRecs += diff;
@@ -441,7 +441,7 @@ bool MobileRecordStore::_resetNumRecsIfNeeded(OperationContext* opCtx, int64_t n
int64_t currNumRecs = numRecords(opCtx);
if (currNumRecs != newNumRecs) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_numRecs = newNumRecs;
}
return wasReset;
@@ -457,7 +457,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_rs->_dataSizeMutex);
_rs->_dataSize -= _diff;
}
@@ -467,7 +467,7 @@ private:
};
void MobileRecordStore::_changeDataSize(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
opCtx->recoveryUnit()->registerChange(std::make_unique<DataSizeChange>(this, diff));
_initDataSizeIfNeeded_inlock(opCtx);
_dataSize += diff;
@@ -479,7 +479,7 @@ bool MobileRecordStore::_resetDataSizeIfNeeded(OperationContext* opCtx, int64_t
if (currDataSize != _dataSize) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_dataSize = newDataSize;
}
return wasReset;
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.h b/src/mongo/db/storage/mobile/mobile_record_store.h
index b08c14c9e44..d9457edd985 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.h
+++ b/src/mongo/db/storage/mobile/mobile_record_store.h
@@ -167,7 +167,7 @@ private:
bool _resetNumRecsIfNeeded(OperationContext* opCtx, int64_t newNumRecs);
mutable int64_t _numRecs;
- mutable stdx::mutex _numRecsMutex;
+ mutable Mutex _numRecsMutex = MONGO_MAKE_LATCH("MobileRecordStore::_numRecsMutex");
mutable bool _isNumRecsInitialized = false;
/**
@@ -188,7 +188,7 @@ private:
bool _resetDataSizeIfNeeded(OperationContext* opCtx, int64_t newDataSize);
mutable int64_t _dataSize;
- mutable stdx::mutex _dataSizeMutex;
+ mutable Mutex _dataSizeMutex = MONGO_MAKE_LATCH("MobileRecordStore::_dataSizeMutex");
mutable bool _isDataSizeInitialized = false;
};
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.cpp b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
index 179a30cbe5e..a8a211bcc6b 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.cpp
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
@@ -43,7 +43,7 @@
#include "mongo/db/storage/mobile/mobile_session_pool.h"
#include "mongo/db/storage/mobile/mobile_sqlite_statement.h"
#include "mongo/db/storage/mobile/mobile_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -105,7 +105,7 @@ MobileSessionPool::~MobileSessionPool() {
}
std::unique_ptr<MobileSession> MobileSessionPool::getSession(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We should never be able to get here after _shuttingDown is set, because no new operations
// should be allowed to start.
@@ -141,13 +141,13 @@ void MobileSessionPool::releaseSession(MobileSession* session) {
if (!failedDropsQueue.isEmpty())
failedDropsQueue.execAndDequeueOp(session);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessions.push_back(session->getSession());
_releasedSessionNotifier.notify_one();
}
void MobileSessionPool::shutDown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shuttingDown = true;
// Retrieve the operation context from the thread's client if the client exists.
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 08586e0ece8..031953cdfb3 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -37,7 +37,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class MobileSession;
@@ -58,7 +58,7 @@ public:
private:
AtomicWord<bool> _isEmpty;
- stdx::mutex _queueMutex;
+ Mutex _queueMutex = MONGO_MAKE_LATCH("MobileDelayedOpQueue::_queueMutex");
std::queue<std::string> _opQueryQueue;
};
@@ -107,7 +107,7 @@ private:
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
- stdx::mutex _mutex;
+ Mutex _mutex;
stdx::condition_variable _releasedSessionNotifier;
std::string _path;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 8854f359119..a43deca1687 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -900,7 +900,7 @@ StorageEngineImpl::TimestampMonitor::TimestampMonitor(KVEngine* engine, Periodic
StorageEngineImpl::TimestampMonitor::~TimestampMonitor() {
log() << "Timestamp monitor shutting down";
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
invariant(_listeners.empty());
}
@@ -912,7 +912,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
"TimestampMonitor",
[&](Client* client) {
{
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (_listeners.empty()) {
return;
}
@@ -979,7 +979,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
}
void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestamp newTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
for (auto& listener : _listeners) {
if (listener->getType() == type) {
listener->notify(newTimestamp);
@@ -988,7 +988,7 @@ void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestam
}
void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) != _listeners.end()) {
bool listenerAlreadyRegistered = true;
invariant(!listenerAlreadyRegistered);
@@ -997,7 +997,7 @@ void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listene
}
void StorageEngineImpl::TimestampMonitor::removeListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) == _listeners.end()) {
bool listenerNotRegistered = true;
invariant(!listenerNotRegistered);
diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h
index 07f2cf6f42d..64f9774f6e8 100644
--- a/src/mongo/db/storage/storage_engine_impl.h
+++ b/src/mongo/db/storage/storage_engine_impl.h
@@ -46,7 +46,7 @@
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_engine_interface.h"
#include "mongo/db/storage/temporary_record_store.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -290,7 +290,7 @@ public:
PeriodicRunner* _periodicRunner;
// Protects access to _listeners below.
- stdx::mutex _monitorMutex;
+ Mutex _monitorMutex = MONGO_MAKE_LATCH("TimestampMonitor::_monitorMutex");
std::vector<TimestampListener*> _listeners;
// This should remain as the last member variable so that its destructor gets executed first
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 556a01a2efb..87a0fa73e0f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -187,7 +187,7 @@ public:
while (!_shuttingDown.load()) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// Check every 10 seconds or sooner in the debug builds
_condvar.wait_for(lock, stdx::chrono::seconds(kDebugBuild ? 1 : 10));
@@ -202,7 +202,7 @@ public:
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the session sweeper thread early, we do not want the shutdown
// to wait for us too long.
_condvar.notify_one();
@@ -214,7 +214,7 @@ private:
WiredTigerSessionCache* _sessionCache;
AtomicWord<bool> _shuttingDown{false};
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerSessionSweeper::_mutex"); // protects _condvar
// The session sweeper thread idles on this condition variable for a particular time duration
// between cleaning up expired sessions. It can be triggered early to expediate shutdown.
stdx::condition_variable _condvar;
@@ -322,7 +322,7 @@ public:
auto opCtx = tc->makeOperationContext();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_condvar.wait_for(lock,
stdx::chrono::seconds(static_cast<std::int64_t>(
@@ -395,7 +395,7 @@ public:
if (oplogNeededForRollback.isOK()) {
// Now that the checkpoint is durable, publish the oplog needed to recover
// from it.
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
_oplogNeededForCrashRecovery.store(
oplogNeededForRollback.getValue().asULL());
}
@@ -440,7 +440,7 @@ public:
_hasTriggeredFirstStableCheckpoint = true;
log() << "Triggering the first stable checkpoint. Initial Data: " << initialData
<< " PrevStable: " << prevStable << " CurrStable: " << currStable;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.notify_one();
}
}
@@ -454,14 +454,14 @@ public:
* _oplogNeededForCrashRecovery will not change during assignment.
*/
void assignOplogNeededForCrashRecoveryTo(boost::optional<Timestamp>* timestamp) {
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
*timestamp = Timestamp(_oplogNeededForCrashRecovery.load());
}
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the checkpoint thread early, to take a final checkpoint before shutting
// down, if one has not coincidentally just been taken.
_condvar.notify_one();
@@ -473,7 +473,8 @@ private:
WiredTigerKVEngine* _wiredTigerKVEngine;
WiredTigerSessionCache* _sessionCache;
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_mutex");
+ ; // protects _condvar
// The checkpoint thread idles on this condition variable for a particular time duration between
// taking checkpoints. It can be triggered early to expediate immediate checkpointing.
stdx::condition_variable _condvar;
@@ -482,7 +483,8 @@ private:
bool _hasTriggeredFirstStableCheckpoint = false;
- stdx::mutex _oplogNeededForCrashRecoveryMutex;
+ Mutex _oplogNeededForCrashRecoveryMutex =
+ MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_oplogNeededForCrashRecoveryMutex");
AtomicWord<std::uint64_t> _oplogNeededForCrashRecovery;
};
@@ -1064,7 +1066,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
uassert(51034, "Cannot open backup cursor with in-memory mode.", !isEphemeral());
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_checkpointThread->assignOplogNeededForCrashRecoveryTo(&_oplogPinnedByBackup);
auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
@@ -1099,7 +1101,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
void WiredTigerKVEngine::endNonBlockingBackup(OperationContext* opCtx) {
_backupSession.reset();
// Oplog truncation thread can now remove the pinned oplog.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_oplogPinnedByBackup = boost::none;
_backupCursor = nullptr;
}
@@ -1140,7 +1142,7 @@ void WiredTigerKVEngine::syncSizeInfo(bool sync) const {
void WiredTigerKVEngine::setOldestActiveTransactionTimestampCallback(
StorageEngine::OldestActiveTransactionTimestampCallback callback) {
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
_oldestActiveTransactionTimestampCallback = std::move(callback);
};
@@ -1403,7 +1405,7 @@ Status WiredTigerKVEngine::dropIdent(OperationContext* opCtx, StringData ident)
if (ret == EBUSY) {
// this is expected, queue it up
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_front(uri);
}
_sessionCache->closeCursorsForQueuedDrops();
@@ -1422,7 +1424,7 @@ std::list<WiredTigerCachedCursor> WiredTigerKVEngine::filterCursorsWithQueuedDro
std::list<WiredTigerCachedCursor>* cache) {
std::list<WiredTigerCachedCursor> toDrop;
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
return toDrop;
@@ -1456,7 +1458,7 @@ bool WiredTigerKVEngine::haveDropsQueued() const {
_previousCheckedDropsQueued = now;
// Don't wait for the mutex: if we can't get it, report that no drops are queued.
- stdx::unique_lock<stdx::mutex> lk(_identToDropMutex, stdx::defer_lock);
+ stdx::unique_lock<Latch> lk(_identToDropMutex, stdx::defer_lock);
return lk.try_lock() && !_identToDrop.empty();
}
@@ -1466,7 +1468,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
WiredTigerSession session(_conn);
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
numInQueue = _identToDrop.size();
}
@@ -1479,7 +1481,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
for (int i = 0; i < numToDelete; i++) {
string uri;
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
break;
uri = _identToDrop.front();
@@ -1490,7 +1492,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
LOG(1) << "WT queued drop of " << uri << " res " << ret;
if (ret == EBUSY) {
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_back(uri);
} else {
invariantWTOK(ret);
@@ -1871,7 +1873,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::getOplogNeededForRollback() const {
auto stableTimestamp = _stableTimestamp.load();
// Only one thread can set or execute this callback.
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
boost::optional<Timestamp> oldestActiveTransactionTimestamp;
if (_oldestActiveTransactionTimestampCallback) {
auto status = _oldestActiveTransactionTimestampCallback(Timestamp(stableTimestamp));
@@ -1904,7 +1906,7 @@ boost::optional<Timestamp> WiredTigerKVEngine::getOplogNeededForCrashRecovery()
Timestamp WiredTigerKVEngine::getPinnedOplog() const {
{
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
if (!storageGlobalParams.allowOplogTruncation) {
// If oplog truncation is not allowed, then return the min timestamp so that no history
// is
@@ -1956,14 +1958,14 @@ bool WiredTigerKVEngine::supportsOplogStones() const {
void WiredTigerKVEngine::startOplogManager(OperationContext* opCtx,
const std::string& uri,
WiredTigerRecordStore* oplogRecordStore) {
- stdx::lock_guard<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::lock_guard<Latch> lock(_oplogManagerMutex);
if (_oplogManagerCount == 0)
_oplogManager->start(opCtx, uri, oplogRecordStore);
_oplogManagerCount++;
}
void WiredTigerKVEngine::haltOplogManager() {
- stdx::unique_lock<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::unique_lock<Latch> lock(_oplogManagerMutex);
invariant(_oplogManagerCount > 0);
_oplogManagerCount--;
if (_oplogManagerCount == 0) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 7b98f9fd388..39a06e1f213 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -45,7 +45,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/elapsed_tracker.h"
namespace mongo {
@@ -409,7 +409,8 @@ private:
std::uint64_t _getCheckpointTimestamp() const;
- mutable stdx::mutex _oldestActiveTransactionTimestampCallbackMutex;
+ mutable Mutex _oldestActiveTransactionTimestampCallbackMutex =
+ MONGO_MAKE_LATCH("::_oldestActiveTransactionTimestampCallbackMutex");
StorageEngine::OldestActiveTransactionTimestampCallback
_oldestActiveTransactionTimestampCallback;
@@ -420,7 +421,7 @@ private:
ClockSource* const _clockSource;
// Mutex to protect use of _oplogManagerCount by this instance of KV engine.
- mutable stdx::mutex _oplogManagerMutex;
+ mutable Mutex _oplogManagerMutex = MONGO_MAKE_LATCH("::_oplogManagerMutex");
std::size_t _oplogManagerCount = 0;
std::unique_ptr<WiredTigerOplogManager> _oplogManager;
@@ -451,15 +452,16 @@ private:
std::string _rsOptions;
std::string _indexOptions;
- mutable stdx::mutex _dropAllQueuesMutex;
- mutable stdx::mutex _identToDropMutex;
+ mutable Mutex _dropAllQueuesMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_dropAllQueuesMutex");
+ mutable Mutex _identToDropMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_identToDropMutex");
std::list<std::string> _identToDrop;
mutable Date_t _previousCheckedDropsQueued;
std::unique_ptr<WiredTigerSession> _backupSession;
WT_CURSOR* _backupCursor;
- mutable stdx::mutex _oplogPinnedByBackupMutex;
+ mutable Mutex _oplogPinnedByBackupMutex =
+ MONGO_MAKE_LATCH("WiredTigerKVEngine::_oplogPinnedByBackupMutex");
boost::optional<Timestamp> _oplogPinnedByBackup;
Timestamp _recoveryTimestamp;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index fbf0b9450a3..647fe8de738 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -74,7 +74,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
// Need to obtain the mutex before starting the thread, as otherwise it may race ahead
// see _shuttingDown as true and quit prematurely.
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_oplogJournalThread = stdx::thread(&WiredTigerOplogManager::_oplogJournalThreadLoop,
this,
WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(),
@@ -86,7 +86,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
void WiredTigerOplogManager::halt() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
invariant(_isRunning);
_shuttingDown = true;
_isRunning = false;
@@ -120,7 +120,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// Close transaction before we wait.
opCtx->recoveryUnit()->abandonSnapshot();
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
// Prevent any scheduled journal flushes from being delayed and blocking this wait excessively.
_opsWaitingForVisibility++;
@@ -148,7 +148,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
}
void WiredTigerOplogManager::triggerJournalFlush() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
if (!_opsWaitingForJournal) {
_opsWaitingForJournal = true;
_opsWaitingForJournalCV.notify_one();
@@ -174,7 +174,7 @@ void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* ses
// waitUntilDurable() call requiring an opCtx parameter.
opCtx->swapLockState(std::make_unique<LockerImpl>());
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
{
MONGO_IDLE_THREAD_BLOCK;
_opsWaitingForJournalCV.wait(lk,
@@ -251,7 +251,7 @@ std::uint64_t WiredTigerOplogManager::getOplogReadTimestamp() const {
}
void WiredTigerOplogManager::setOplogReadTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_setOplogReadTimestamp(lk, ts.asULL());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
index 9a82985fc28..09258c657f2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -60,7 +60,7 @@ public:
void halt();
bool isRunning() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
return _isRunning && !_shuttingDown;
}
@@ -89,7 +89,8 @@ private:
void _setOplogReadTimestamp(WithLock, uint64_t newTimestamp);
stdx::thread _oplogJournalThread;
- mutable stdx::mutex _oplogVisibilityStateMutex;
+ mutable Mutex _oplogVisibilityStateMutex =
+ MONGO_MAKE_LATCH("WiredTigerOplogManager::_oplogVisibilityStateMutex");
mutable stdx::condition_variable
_opsWaitingForJournalCV; // Signaled to trigger a journal flush.
mutable stdx::condition_variable
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index f0a12735423..0c4c1956e51 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -147,7 +147,7 @@ public:
_oplogStones->_currentRecords.store(0);
_oplogStones->_currentBytes.store(0);
- stdx::lock_guard<stdx::mutex> lk(_oplogStones->_mutex);
+ stdx::lock_guard<Latch> lk(_oplogStones->_mutex);
_oplogStones->_stones.clear();
}
@@ -159,7 +159,7 @@ private:
WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs)
: _rs(rs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(rs->isCapped());
invariant(rs->cappedMaxSize() > 0);
@@ -178,13 +178,13 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTi
}
bool WiredTigerRecordStore::OplogStones::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
return _isDead;
}
void WiredTigerRecordStore::OplogStones::kill() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
_isDead = true;
}
_oplogReclaimCv.notify_one();
@@ -192,11 +192,11 @@ void WiredTigerRecordStore::OplogStones::kill() {
void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
// Wait until kill() is called or there are too many oplog stones.
- stdx::unique_lock<stdx::mutex> lock(_oplogReclaimMutex);
+ stdx::unique_lock<Latch> lock(_oplogReclaimMutex);
while (!_isDead) {
{
MONGO_IDLE_THREAD_BLOCK;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasExcessStones_inlock()) {
// There are now excess oplog stones. However, there it may be necessary to keep
// additional oplog.
@@ -219,7 +219,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
boost::optional<WiredTigerRecordStore::OplogStones::Stone>
WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!hasExcessStones_inlock()) {
return {};
@@ -229,12 +229,12 @@ WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
}
void WiredTigerRecordStore::OplogStones::popOldestStone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stones.pop_front();
}
void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRecord) {
- stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::try_to_lock);
+ stdx::unique_lock<Latch> lk(_mutex, stdx::try_to_lock);
if (!lk) {
// Someone else is either already creating a new stone or popping the oldest one. In the
// latter case, we let the next insert trigger the new stone's creation.
@@ -275,7 +275,7 @@ void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* o
void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
int64_t recordsRemoved, int64_t bytesRemoved, RecordId firstRemovedId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int64_t numStonesToRemove = 0;
int64_t recordsInStonesToRemove = 0;
@@ -305,7 +305,7 @@ void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
void WiredTigerRecordStore::OplogStones::setMinBytesPerStone(int64_t size) {
invariant(size > 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Only allow changing the minimum bytes per stone if no data has been inserted.
invariant(_stones.size() == 0 && _currentRecords.load() == 0);
@@ -457,7 +457,7 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
}
void WiredTigerRecordStore::OplogStones::adjust(int64_t maxSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const unsigned long long kMinStonesToKeep = 10ULL;
const unsigned long long kMaxStonesToKeep = 100ULL;
@@ -699,7 +699,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
WiredTigerRecordStore::~WiredTigerRecordStore() {
{
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_shuttingDown = true;
}
@@ -784,7 +784,7 @@ const char* WiredTigerRecordStore::name() const {
}
bool WiredTigerRecordStore::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
return _shuttingDown;
}
@@ -1060,7 +1060,7 @@ int64_t WiredTigerRecordStore::_cappedDeleteAsNeeded_inlock(OperationContext* op
++docsRemoved;
sizeSaved += old_value.size;
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_shuttingDown)
break;
@@ -1332,12 +1332,12 @@ bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const {
}
bool WiredTigerRecordStore::haveCappedWaiters() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
return _cappedCallback && _cappedCallback->haveCappedWaiters();
}
void WiredTigerRecordStore::notifyCappedWaitersIfNeeded() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
// This wakes up cursors blocking in await_data.
if (_cappedCallback) {
_cappedCallback->notifyCappedWaitersIfNeeded();
@@ -1743,7 +1743,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx,
// Compute the number and associated sizes of the records to delete.
{
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
do {
if (_cappedCallback) {
uassertStatusOK(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 693987af2a6..044d57339d7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -43,8 +43,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/fail_point_service.h"
@@ -212,7 +212,7 @@ public:
Status updateCappedSize(OperationContext* opCtx, long long cappedSize) final;
void setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -343,9 +343,12 @@ private:
RecordId _cappedFirstRecord;
AtomicWord<long long> _cappedSleep;
AtomicWord<long long> _cappedSleepMS;
+
+ // guards _cappedCallback and _shuttingDown
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("WiredTigerRecordStore::_cappedCallbackMutex");
CappedCallback* _cappedCallback;
bool _shuttingDown;
- mutable stdx::mutex _cappedCallbackMutex; // guards _cappedCallback and _shuttingDown
// See comment in ::cappedDeleteAsNeeded
int _cappedDeleteCheckCount;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
index f6e9371c894..f88334ea85b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
@@ -33,8 +33,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -99,7 +99,7 @@ public:
//
size_t numStones() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _stones.size();
}
@@ -129,7 +129,7 @@ private:
WiredTigerRecordStore* _rs;
- stdx::mutex _oplogReclaimMutex;
+ Mutex _oplogReclaimMutex;
stdx::condition_variable _oplogReclaimCv;
// True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the "local"
@@ -143,7 +143,8 @@ private:
AtomicWord<long long> _currentRecords; // Number of records in the stone being filled.
AtomicWord<long long> _currentBytes; // Number of bytes in the stone being filled.
- mutable stdx::mutex _mutex; // Protects against concurrent access to the deque of oplog stones.
+ // Protects against concurrent access to the deque of oplog stones.
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogStones::_mutex");
std::deque<OplogStones::Stone> _stones; // front = oldest, back = newest.
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 104a5caa151..897b72eb762 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -266,7 +266,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
UniqueWiredTigerSession session = getSession();
WT_SESSION* s = session->getSession();
{
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
auto config = stableCheckpoint ? "use_timestamp=true" : "use_timestamp=false";
auto checkpointLock = _engine->getCheckpointLock(opCtx);
@@ -280,7 +280,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
uint32_t start = _lastSyncTime.load();
// Do the remainder in a critical section that ensures only a single thread at a time
// will attempt to synchronize.
- stdx::unique_lock<stdx::mutex> lk(_lastSyncMutex);
+ stdx::unique_lock<Latch> lk(_lastSyncMutex);
uint32_t current = _lastSyncTime.loadRelaxed(); // synchronized with writes through mutex
if (current != start) {
// Someone else synced already since we read lastSyncTime, so we're done!
@@ -292,7 +292,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
// This gets the token (OpTime) from the last write, before flushing (either the journal, or a
// checkpoint), and then reports that token (OpTime) as a durable write.
- stdx::unique_lock<stdx::mutex> jlk(_journalListenerMutex);
+ stdx::unique_lock<Latch> jlk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
// Initialize on first use.
@@ -316,7 +316,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(OperationContext* opCtx,
std::uint64_t lastCount) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
if (lastCount == _prepareCommitOrAbortCounter.loadRelaxed()) {
opCtx->waitForConditionOrInterrupt(_prepareCommittedOrAbortedCond, lk, [&] {
return _prepareCommitOrAbortCounter.loadRelaxed() > lastCount;
@@ -325,14 +325,14 @@ void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(Operatio
}
void WiredTigerSessionCache::notifyPreparedUnitOfWorkHasCommittedOrAborted() {
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
_prepareCommitOrAbortCounter.fetchAndAdd(1);
_prepareCommittedOrAbortedCond.notify_all();
}
void WiredTigerSessionCache::closeAllCursors(const std::string& uri) {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeAllCursors(uri);
}
@@ -342,14 +342,14 @@ void WiredTigerSessionCache::closeCursorsForQueuedDrops() {
// Increment the cursor epoch so that all cursors from this epoch are closed.
_cursorEpoch.fetchAndAdd(1);
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeCursorsForQueuedDrops(_engine);
}
}
size_t WiredTigerSessionCache::getIdleSessionsCount() {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
return _sessions.size();
}
@@ -361,7 +361,7 @@ void WiredTigerSessionCache::closeExpiredIdleSessions(int64_t idleTimeMillis) {
auto cutoffTime = _clockSource->now() - Milliseconds(idleTimeMillis);
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
// Discard all sessions that became idle before the cutoff time
for (auto it = _sessions.begin(); it != _sessions.end();) {
auto session = *it;
@@ -381,7 +381,7 @@ void WiredTigerSessionCache::closeAll() {
SessionCache swap;
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
_epoch.fetchAndAdd(1);
_sessions.swap(swap);
}
@@ -401,7 +401,7 @@ UniqueWiredTigerSession WiredTigerSessionCache::getSession() {
invariant(!(_shuttingDown.loadRelaxed() & kShuttingDownMask));
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (!_sessions.empty()) {
// Get the most recently used session so that if we discard sessions, we're
// discarding older ones
@@ -468,7 +468,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
session->setIdleExpireTime(_clockSource->now());
if (session->_getEpoch() == currentEpoch) { // check outside of lock to reduce contention
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (session->_getEpoch() == _epoch.load()) { // recheck inside the lock for correctness
returnedToCache = true;
_sessions.push_back(session);
@@ -485,7 +485,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
void WiredTigerSessionCache::setJournalListener(JournalListener* jl) {
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
_journalListener = jl;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index 72b55e311ed..9a94f175cdc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -37,7 +37,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
@@ -323,7 +323,7 @@ private:
AtomicWord<unsigned> _shuttingDown;
static const uint32_t kShuttingDownMask = 1 << 31;
- stdx::mutex _cacheLock;
+ Mutex _cacheLock = MONGO_MAKE_LATCH("WiredTigerSessionCache::_cacheLock");
typedef std::vector<WiredTigerSession*> SessionCache;
SessionCache _sessions;
@@ -335,15 +335,16 @@ private:
// Counter and critical section mutex for waitUntilDurable
AtomicWord<unsigned> _lastSyncTime;
- stdx::mutex _lastSyncMutex;
+ Mutex _lastSyncMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_lastSyncMutex");
// Mutex and cond var for waiting on prepare commit or abort.
- stdx::mutex _prepareCommittedOrAbortedMutex;
+ Mutex _prepareCommittedOrAbortedMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionCache::_prepareCommittedOrAbortedMutex");
stdx::condition_variable _prepareCommittedOrAbortedCond;
AtomicWord<std::uint64_t> _prepareCommitOrAbortCounter{0};
// Protects _journalListener.
- stdx::mutex _journalListenerMutex;
+ Mutex _journalListenerMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_journalListenerMutex");
// Notified when we commit to the journal.
JournalListener* _journalListener = &NoOpJournalListener::instance;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 56c8161d134..76ddde766e0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -64,7 +64,7 @@ WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn,
}
WiredTigerSizeStorer::~WiredTigerSizeStorer() {
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
_cursor->close(_cursor);
}
@@ -74,7 +74,7 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
return;
// Ordering is important: as the entry may be flushed concurrently, set the dirty flag last.
- stdx::lock_guard<stdx::mutex> lk(_bufferMutex);
+ stdx::lock_guard<Latch> lk(_bufferMutex);
auto& entry = _buffer[uri];
// During rollback it is possible to get a new SizeInfo. In that case clear the dirty flag,
// so the SizeInfo can be destructed without triggering the dirty check invariant.
@@ -90,13 +90,13 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(StringData uri) const {
{
// Check if we can satisfy the read from the buffer.
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
Buffer::const_iterator it = _buffer.find(uri);
if (it != _buffer.end())
return it->second;
}
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
// Intentionally ignoring return value.
ON_BLOCK_EXIT([&] { _cursor->reset(_cursor); });
@@ -125,7 +125,7 @@ std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(Strin
void WiredTigerSizeStorer::flush(bool syncToDisk) {
Buffer buffer;
{
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
_buffer.swap(buffer);
}
@@ -133,13 +133,13 @@ void WiredTigerSizeStorer::flush(bool syncToDisk) {
return; // Nothing to do.
Timer t;
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
{
// On failure, place entries back into the map, unless a newer value already exists.
ON_BLOCK_EXIT([this, &buffer]() {
this->_cursor->reset(this->_cursor);
if (!buffer.empty()) {
- stdx::lock_guard<stdx::mutex> bufferLock(this->_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(this->_bufferMutex);
for (auto& it : buffer)
this->_buffer.try_emplace(it.first, it.second);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 5db2a4e72bc..79e5725ac81 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -36,7 +36,7 @@
#include "mongo/base/string_data.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -95,12 +95,13 @@ private:
const WiredTigerSession _session;
const bool _readOnly;
// Guards _cursor. Acquire *before* _bufferMutex.
- mutable stdx::mutex _cursorMutex;
+ mutable Mutex _cursorMutex = MONGO_MAKE_LATCH("WiredTigerSessionStorer::_cursorMutex");
WT_CURSOR* _cursor; // pointer is const after constructor
using Buffer = StringMap<std::shared_ptr<SizeInfo>>;
- mutable stdx::mutex _bufferMutex; // Guards _buffer
+ mutable Mutex _bufferMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionStorer::_bufferMutex"); // Guards _buffer
Buffer _buffer;
};
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index 7216bc1727b..dd7c6ce52b5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -42,14 +42,14 @@
namespace mongo {
void WiredTigerSnapshotManager::setCommittedSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
invariant(!_committedSnapshot || *_committedSnapshot <= timestamp);
_committedSnapshot = timestamp;
}
void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
if (timestamp.isNull())
_localSnapshot = boost::none;
else
@@ -57,12 +57,12 @@ void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
}
boost::optional<Timestamp> WiredTigerSnapshotManager::getLocalSnapshot() {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
return _localSnapshot;
}
void WiredTigerSnapshotManager::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
_committedSnapshot = boost::none;
}
@@ -71,7 +71,7 @@ boost::optional<Timestamp> WiredTigerSnapshotManager::getMinSnapshotForNextCommi
return boost::none;
}
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
return _committedSnapshot;
}
@@ -81,7 +81,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnCommittedSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
uassert(ErrorCodes::ReadConcernMajorityNotAvailableYet,
"Committed view disappeared while running operation",
_committedSnapshot);
@@ -99,7 +99,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnLocalSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
invariant(_localSnapshot);
LOG(3) << "begin_transaction on local snapshot " << _localSnapshot.get().toString();
auto status = txnOpen.setReadSnapshot(_localSnapshot.get());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index 75c9777a502..1726a7d4c2b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -35,7 +35,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/storage/snapshot_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -91,11 +91,13 @@ public:
private:
// Snapshot to use for reads at a commit timestamp.
- mutable stdx::mutex _committedSnapshotMutex; // Guards _committedSnapshot.
+ mutable Mutex _committedSnapshotMutex = // Guards _committedSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_committedSnapshotMutex");
boost::optional<Timestamp> _committedSnapshot;
// Snapshot to use for reads at a local stable timestamp.
- mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
+ mutable Mutex _localSnapshotMutex = // Guards _localSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_localSnapshotMutex");
boost::optional<Timestamp> _localSnapshot;
};
} // namespace mongo
diff --git a/src/mongo/db/time_proof_service.cpp b/src/mongo/db/time_proof_service.cpp
index 756d0397d5f..7e29f0b2254 100644
--- a/src/mongo/db/time_proof_service.cpp
+++ b/src/mongo/db/time_proof_service.cpp
@@ -57,7 +57,7 @@ TimeProofService::Key TimeProofService::generateRandomKey() {
}
TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const Key& key) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto timeCeil = LogicalTime(Timestamp(time.asTimestamp().asULL() | kRangeMask));
if (_cache && _cache->hasProof(timeCeil, key)) {
return _cache->_proof;
@@ -82,7 +82,7 @@ Status TimeProofService::checkProof(LogicalTime time, const TimeProof& proof, co
}
void TimeProofService::resetCache() {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (_cache) {
_cache = boost::none;
}
diff --git a/src/mongo/db/time_proof_service.h b/src/mongo/db/time_proof_service.h
index f7ca66ab3c5..43b6d97a681 100644
--- a/src/mongo/db/time_proof_service.h
+++ b/src/mongo/db/time_proof_service.h
@@ -32,7 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/crypto/sha1_block.h"
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -90,7 +90,7 @@ private:
};
// protects _cache
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("TimeProofService::_cacheMutex");
// one-entry cache
boost::optional<CacheEntry> _cache;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 4252cc1cfb5..f13388e1892 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -133,7 +133,7 @@ public:
db.getCursor().write<LittleEndian<uint32_t>>(size);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_written += size;
}
@@ -150,7 +150,7 @@ public:
} catch (...) {
auto status = exceptionToStatus();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_result = status;
}
});
@@ -173,7 +173,7 @@ public:
// If we couldn't push our packet begin the process of failing the recording
_pcqPipe.producer.close();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the result was otherwise okay, mark it as failed due to the queue blocking. If
// it failed for another reason, don't overwrite that.
@@ -187,7 +187,7 @@ public:
}
Status shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_inShutdown) {
_inShutdown = true;
@@ -203,7 +203,7 @@ public:
}
BSONObj getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_trafficStats.setBufferedBytes(_pcqPipe.controller.getStats().queueDepth);
_trafficStats.setCurrentFileSize(_written);
return _trafficStats.toBSON();
@@ -251,7 +251,7 @@ private:
MultiProducerSingleConsumerQueue<TrafficRecordingPacket, CostFunction>::Pipe _pcqPipe;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Recording::_mutex");
bool _inShutdown = false;
TrafficRecorderStats _trafficStats;
size_t _written = 0;
@@ -282,7 +282,7 @@ void TrafficRecorder::start(const StartRecordingTraffic& options) {
!gTrafficRecordingDirectory.empty());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording already active", !_recording);
@@ -299,7 +299,7 @@ void TrafficRecorder::stop() {
_shouldRecord.store(false);
auto recording = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording not active", _recording);
@@ -314,7 +314,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
const Message& message) {
if (shouldAlwaysRecordTraffic) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_recording) {
StartRecordingTraffic options;
@@ -347,7 +347,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
// We couldn't queue
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the recording isn't the one we have in hand bail (its been ended, or a new one has
// been created
@@ -360,7 +360,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
std::shared_ptr<TrafficRecorder::Recording> TrafficRecorder::_getCurrentRecording() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _recording;
}
diff --git a/src/mongo/db/traffic_recorder.h b/src/mongo/db/traffic_recorder.h
index 8bd261cbfb4..964b95fdf80 100644
--- a/src/mongo/db/traffic_recorder.h
+++ b/src/mongo/db/traffic_recorder.h
@@ -34,8 +34,8 @@
#include "mongo/db/service_context.h"
#include "mongo/db/traffic_recorder_gen.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/message.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/session.h"
namespace mongo {
@@ -72,7 +72,7 @@ private:
AtomicWord<bool> _shouldRecord;
// The mutex only protects the last recording shared_ptr
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TrafficRecorder::_mutex");
std::shared_ptr<Recording> _recording;
};
diff --git a/src/mongo/db/ttl_collection_cache.cpp b/src/mongo/db/ttl_collection_cache.cpp
index bb272653d24..d4d9ffe7694 100644
--- a/src/mongo/db/ttl_collection_cache.cpp
+++ b/src/mongo/db/ttl_collection_cache.cpp
@@ -46,19 +46,19 @@ TTLCollectionCache& TTLCollectionCache::get(ServiceContext* ctx) {
}
void TTLCollectionCache::registerTTLInfo(std::pair<UUID, std::string>&& ttlInfo) {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
_ttlInfos.push_back(std::move(ttlInfo));
}
void TTLCollectionCache::deregisterTTLInfo(const std::pair<UUID, std::string>& ttlInfo) {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
auto collIter = std::find(_ttlInfos.begin(), _ttlInfos.end(), ttlInfo);
fassert(40220, collIter != _ttlInfos.end());
_ttlInfos.erase(collIter);
}
std::vector<std::pair<UUID, std::string>> TTLCollectionCache::getTTLInfos() {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
return _ttlInfos;
}
}; // namespace mongo
diff --git a/src/mongo/db/ttl_collection_cache.h b/src/mongo/db/ttl_collection_cache.h
index 761a7f93321..b4b428e005c 100644
--- a/src/mongo/db/ttl_collection_cache.h
+++ b/src/mongo/db/ttl_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/uuid.h"
/**
@@ -52,7 +52,7 @@ public:
std::vector<std::pair<UUID, std::string>> getTTLInfos();
private:
- stdx::mutex _ttlInfosLock;
+ Mutex _ttlInfosLock = MONGO_MAKE_LATCH("TTLCollectionCache::_ttlInfosLock");
std::vector<std::pair<UUID, std::string>> _ttlInfos; // <CollectionUUID, IndexName>
};
} // namespace mongo
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index a2de077d063..ef49919f3e0 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -87,7 +87,7 @@ Status ViewCatalog::reload(OperationContext* opCtx, ViewCatalogLookupBehavior lo
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _reload(lk, opCtx, ViewCatalogLookupBehavior::kValidateDurableViews);
}
@@ -147,7 +147,7 @@ Status ViewCatalog::_reload(WithLock,
}
void ViewCatalog::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_viewMap.clear();
_viewGraph.clear();
@@ -172,7 +172,7 @@ void ViewCatalog::iterate(OperationContext* opCtx, ViewIteratorCallback callback
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
for (auto&& view : _viewMap) {
callback(*view.second);
@@ -389,7 +389,7 @@ Status ViewCatalog::createView(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -422,7 +422,7 @@ Status ViewCatalog::modifyView(OperationContext* opCtx,
const BSONArray& pipeline) {
invariant(opCtx->lockState()->isDbLockedForMode(viewName.db(), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -461,7 +461,7 @@ Status ViewCatalog::dropView(OperationContext* opCtx, const NamespaceString& vie
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
ON_BLOCK_EXIT([this] { _ignoreExternalChange = false; });
@@ -515,7 +515,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookup(OperationContext* opCtx, Str
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_valid && opCtx->getClient()->isFromUserConnection()) {
// We want to avoid lookups on invalid collection names.
if (!NamespaceString::validCollectionName(ns)) {
@@ -537,7 +537,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookupWithoutValidatingDurableViews
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lookup(lk, opCtx, ns, ViewCatalogLookupBehavior::kAllowInvalidDurableViews);
}
@@ -547,7 +547,7 @@ StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* opCtx,
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_requireValidCatalog(lock);
diff --git a/src/mongo/db/views/view_catalog.h b/src/mongo/db/views/view_catalog.h
index a8fd9df9e3e..44103a0ed24 100644
--- a/src/mongo/db/views/view_catalog.h
+++ b/src/mongo/db/views/view_catalog.h
@@ -44,7 +44,7 @@
#include "mongo/db/views/resolved_view.h"
#include "mongo/db/views/view.h"
#include "mongo/db/views/view_graph.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -199,7 +199,7 @@ private:
*/
void _requireValidCatalog(WithLock);
- stdx::mutex _mutex; // Protects all members.
+ Mutex _mutex = MONGO_MAKE_LATCH("ViewCatalog::_mutex"); // Protects all members.
ViewMap _viewMap;
ViewMap _viewMapBackup;
std::unique_ptr<DurableViewCatalog> _durable;