summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-11-01 17:24:53 +0000
committerevergreen <evergreen@mongodb.com>2019-11-01 17:24:53 +0000
commitbf5bef47a8e6937b4e0d2c9df3fde3470bdc72c9 (patch)
tree8f71a9f272082dd9ee0e471ef5fcb9f19519600d /src/mongo
parentf210bc645453c05979067c556bf6f2bd43e64134 (diff)
downloadmongo-bf5bef47a8e6937b4e0d2c9df3fde3470bdc72c9.tar.gz
SERVER-42165 Replace uses of stdx::mutex with mongo::Mutex
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/base/secure_allocator.cpp4
-rw-r--r--src/mongo/client/authenticate.cpp14
-rw-r--r--src/mongo/client/connection_pool.cpp10
-rw-r--r--src/mongo/client/connection_pool.h6
-rw-r--r--src/mongo/client/connection_string.h8
-rw-r--r--src/mongo/client/connection_string_connect.cpp6
-rw-r--r--src/mongo/client/connpool.cpp32
-rw-r--r--src/mongo/client/connpool.h4
-rw-r--r--src/mongo/client/connpool_integration_test.cpp22
-rw-r--r--src/mongo/client/dbclient_base.cpp2
-rw-r--r--src/mongo/client/dbclient_connection.cpp10
-rw-r--r--src/mongo/client/dbclient_connection.h4
-rw-r--r--src/mongo/client/fetcher.cpp20
-rw-r--r--src/mongo/client/fetcher.h6
-rw-r--r--src/mongo/client/mongo_uri.h2
-rw-r--r--src/mongo/client/remote_command_retry_scheduler.cpp16
-rw-r--r--src/mongo/client/remote_command_retry_scheduler.h4
-rw-r--r--src/mongo/client/remote_command_targeter_mock.cpp6
-rw-r--r--src/mongo/client/remote_command_targeter_mock.h2
-rw-r--r--src/mongo/client/replica_set_change_notifier.cpp8
-rw-r--r--src/mongo/client/replica_set_change_notifier.h4
-rw-r--r--src/mongo/client/replica_set_monitor.cpp32
-rw-r--r--src/mongo/client/replica_set_monitor_internal.h5
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp20
-rw-r--r--src/mongo/client/replica_set_monitor_manager.h4
-rw-r--r--src/mongo/client/scram_client_cache.h8
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp2
-rw-r--r--src/mongo/db/auth/authorization_manager.h2
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp20
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.h6
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp12
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h4
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp12
-rw-r--r--src/mongo/db/background.cpp32
-rw-r--r--src/mongo/db/baton.cpp10
-rw-r--r--src/mongo/db/catalog/collection.cpp8
-rw-r--r--src/mongo/db/catalog/collection.h4
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp46
-rw-r--r--src/mongo/db/catalog/collection_catalog.h6
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp10
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h5
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp6
-rw-r--r--src/mongo/db/catalog/multi_index_block.h4
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/collection_index_builds_tracker.cpp3
-rw-r--r--src/mongo/db/collection_index_builds_tracker.h2
-rw-r--r--src/mongo/db/commands/dbhash.cpp2
-rw-r--r--src/mongo/db/commands/fsync.cpp28
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/parameters.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/validate.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp8
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp6
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp4
-rw-r--r--src/mongo/db/concurrency/deferred_writer.h4
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.cpp6
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.h4
-rw-r--r--src/mongo/db/concurrency/lock_manager.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/database_index_builds_tracker.cpp2
-rw-r--r--src/mongo/db/database_index_builds_tracker.h2
-rw-r--r--src/mongo/db/default_baton.cpp8
-rw-r--r--src/mongo/db/default_baton.h4
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.cpp18
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h2
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp22
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h8
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.h8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.cpp8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.h2
-rw-r--r--src/mongo/db/ftdc/controller.cpp32
-rw-r--r--src/mongo/db/ftdc/controller.h4
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.h3
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp44
-rw-r--r--src/mongo/db/index_builds_coordinator.h4
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp12
-rw-r--r--src/mongo/db/keys_collection_cache.cpp10
-rw-r--r--src/mongo/db/keys_collection_cache.h4
-rw-r--r--src/mongo/db/keys_collection_manager.cpp18
-rw-r--r--src/mongo/db/keys_collection_manager.h5
-rw-r--r--src/mongo/db/logical_clock.cpp12
-rw-r--r--src/mongo/db/logical_clock.h4
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp34
-rw-r--r--src/mongo/db/logical_session_cache_impl.h2
-rw-r--r--src/mongo/db/logical_time_validator.cpp22
-rw-r--r--src/mongo/db/logical_time_validator.h7
-rw-r--r--src/mongo/db/operation_context.cpp2
-rw-r--r--src/mongo/db/operation_context.h2
-rw-r--r--src/mongo/db/operation_context_group.cpp10
-rw-r--r--src/mongo/db/operation_context_group.h4
-rw-r--r--src/mongo/db/operation_context_test.cpp54
-rw-r--r--src/mongo/db/operation_time_tracker.cpp6
-rw-r--r--src/mongo/db/operation_time_tracker.h4
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.h4
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h5
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.h6
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp8
-rw-r--r--src/mongo/db/query/plan_cache.cpp20
-rw-r--r--src/mongo/db/query/plan_cache.h4
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings.cpp10
-rw-r--r--src/mongo/db/query/query_settings.h4
-rw-r--r--src/mongo/db/read_concern_mongod.cpp6
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp18
-rw-r--r--src/mongo/db/repl/abstract_async_component.h8
-rw-r--r--src/mongo/db/repl/abstract_async_component_test.cpp14
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.cpp20
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.h6
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h4
-rw-r--r--src/mongo/db/repl/bgsync.cpp34
-rw-r--r--src/mongo/db/repl/bgsync.h4
-rw-r--r--src/mongo/db/repl/callback_completion_guard.h10
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp6
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp34
-rw-r--r--src/mongo/db/repl/collection_cloner.h6
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp18
-rw-r--r--src/mongo/db/repl/database_cloner.cpp8
-rw-r--r--src/mongo/db/repl/database_cloner.h6
-rw-r--r--src/mongo/db/repl/databases_cloner.cpp4
-rw-r--r--src/mongo/db/repl/databases_cloner.h8
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp10
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp10
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.h6
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp60
-rw-r--r--src/mongo/db/repl/initial_syncer.h10
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp10
-rw-r--r--src/mongo/db/repl/local_oplog_info.cpp4
-rw-r--r--src/mongo/db/repl/local_oplog_info.h2
-rw-r--r--src/mongo/db/repl/multiapplier.cpp14
-rw-r--r--src/mongo/db/repl/multiapplier.h4
-rw-r--r--src/mongo/db/repl/noop_writer.cpp10
-rw-r--r--src/mongo/db/repl/noop_writer.h4
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp4
-rw-r--r--src/mongo/db/repl/oplog_applier.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp30
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.cpp26
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.h6
-rw-r--r--src/mongo/db/repl/oplog_test.cpp22
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.cpp26
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp16
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h9
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h5
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp232
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp26
-rw-r--r--src/mongo/db/repl/replication_metrics.cpp74
-rw-r--r--src/mongo/db/repl/replication_metrics.h4
-rw-r--r--src/mongo/db/repl/replication_process.cpp8
-rw-r--r--src/mongo/db/repl/replication_process.h4
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp18
-rw-r--r--src/mongo/db/repl/reporter.cpp26
-rw-r--r--src/mongo/db/repl/reporter.h4
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp7
-rw-r--r--src/mongo/db/repl/rollback_checker.h5
-rw-r--r--src/mongo/db/repl/rollback_checker_test.cpp6
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.h18
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_interface_mock.cpp14
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h4
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp12
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h4
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp16
-rw-r--r--src/mongo/db/repl/sync_source_resolver.h4
-rw-r--r--src/mongo/db/repl/sync_tail.cpp18
-rw-r--r--src/mongo/db/repl/sync_tail.h4
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp8
-rw-r--r--src/mongo/db/repl/task_runner.cpp14
-rw-r--r--src/mongo/db/repl/task_runner.h4
-rw-r--r--src/mongo/db/repl/task_runner_test.cpp54
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1
-rw-r--r--src/mongo/db/repl_index_build_state.h2
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp12
-rw-r--r--src/mongo/db/s/active_migrations_registry.h4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp6
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp30
-rw-r--r--src/mongo/db/s/balancer/balancer.h4
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp22
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h4
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp6
-rw-r--r--src/mongo/db/s/chunk_splitter.h2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp10
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state_factory_shard.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.h4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h4
-rw-r--r--src/mongo/db/s/implicit_create_collection.cpp14
-rw-r--r--src/mongo/db/s/metadata_manager.cpp30
-rw-r--r--src/mongo/db/s/metadata_manager.h2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp28
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h6
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp28
-rw-r--r--src/mongo/db/s/migration_destination_manager.h4
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.cpp8
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.h4
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp18
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.h4
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp22
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.h7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp46
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.h6
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.h3
-rw-r--r--src/mongo/db/s/sharding_state.cpp10
-rw-r--r--src/mongo/db/s/sharding_state.h4
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp28
-rw-r--r--src/mongo/db/s/transaction_coordinator.h2
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp20
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.h4
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp16
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h14
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp8
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.h2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.cpp2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.h4
-rw-r--r--src/mongo/db/s/wait_for_majority_service_test.cpp10
-rw-r--r--src/mongo/db/server_recovery.cpp6
-rw-r--r--src/mongo/db/server_recovery.h4
-rw-r--r--src/mongo/db/service_context.cpp14
-rw-r--r--src/mongo/db/service_context.h6
-rw-r--r--src/mongo/db/service_context_test_fixture.cpp1
-rw-r--r--src/mongo/db/service_liaison_mock.cpp16
-rw-r--r--src/mongo/db/service_liaison_mock.h4
-rw-r--r--src/mongo/db/service_liaison_mongod.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongod.h2
-rw-r--r--src/mongo/db/service_liaison_mongos.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongos.h2
-rw-r--r--src/mongo/db/session_catalog.cpp18
-rw-r--r--src/mongo/db/session_catalog.h4
-rw-r--r--src/mongo/db/session_catalog_test.cpp4
-rw-r--r--src/mongo/db/session_killer.cpp8
-rw-r--r--src/mongo/db/session_killer.h6
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/db/sessions_collection_config_server.h4
-rw-r--r--src/mongo/db/sessions_collection_mock.cpp12
-rw-r--r--src/mongo/db/sessions_collection_mock.h4
-rw-r--r--src/mongo/db/sessions_collection_rs.h1
-rw-r--r--src/mongo/db/snapshot_window_util.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.h4
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp8
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.h7
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.cpp12
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.h3
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp20
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp16
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h7
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h2
-rw-r--r--src/mongo/db/storage/flow_control.cpp8
-rw-r--r--src/mongo/db/storage/flow_control.h4
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp12
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h4
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h4
-rw-r--r--src/mongo/db/storage/kv/storage_engine_test.cpp12
-rw-r--r--src/mongo/db/storage/mobile/mobile_kv_engine.h6
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.cpp16
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.h4
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.cpp8
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h6
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp10
-rw-r--r--src/mongo/db/storage/storage_engine_impl.h4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp48
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp38
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h9
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp26
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h8
-rw-r--r--src/mongo/db/time_proof_service.cpp4
-rw-r--r--src/mongo/db/time_proof_service.h4
-rw-r--r--src/mongo/db/traffic_recorder.cpp22
-rw-r--r--src/mongo/db/traffic_recorder.h4
-rw-r--r--src/mongo/db/ttl_collection_cache.cpp6
-rw-r--r--src/mongo/db/ttl_collection_cache.h6
-rw-r--r--src/mongo/db/views/view_catalog.cpp18
-rw-r--r--src/mongo/db/views/view_catalog.h4
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp6
-rw-r--r--src/mongo/dbtests/framework.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.cpp8
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.h2
-rw-r--r--src/mongo/dbtests/threadedtests.cpp6
-rw-r--r--src/mongo/embedded/index_builds_coordinator_embedded.cpp2
-rw-r--r--src/mongo/embedded/periodic_runner_embedded.cpp24
-rw-r--r--src/mongo/embedded/periodic_runner_embedded.h6
-rw-r--r--src/mongo/executor/async_multicaster.cpp10
-rw-r--r--src/mongo/executor/async_multicaster.h2
-rw-r--r--src/mongo/executor/async_timer_mock.cpp12
-rw-r--r--src/mongo/executor/async_timer_mock.h4
-rw-r--r--src/mongo/executor/connection_pool.cpp2
-rw-r--r--src/mongo/executor/connection_pool.h4
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp6
-rw-r--r--src/mongo/executor/connection_pool_tl.h2
-rw-r--r--src/mongo/executor/egress_tag_closer_manager.cpp10
-rw-r--r--src/mongo/executor/egress_tag_closer_manager.h4
-rw-r--r--src/mongo/executor/network_interface_integration_test.cpp8
-rw-r--r--src/mongo/executor/network_interface_mock.h4
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp6
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp18
-rw-r--r--src/mongo/executor/network_interface_thread_pool.h8
-rw-r--r--src/mongo/executor/network_interface_tl.cpp28
-rw-r--r--src/mongo/executor/network_interface_tl.h4
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp4
-rw-r--r--src/mongo/executor/scoped_task_executor.h2
-rw-r--r--src/mongo/executor/thread_pool_mock.cpp18
-rw-r--r--src/mongo/executor/thread_pool_mock.h10
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp50
-rw-r--r--src/mongo/executor/thread_pool_task_executor.h14
-rw-r--r--src/mongo/idl/mutable_observer_registry.h4
-rw-r--r--src/mongo/logger/console.cpp2
-rw-r--r--src/mongo/logger/console.h2
-rw-r--r--src/mongo/logger/log_component_settings.cpp4
-rw-r--r--src/mongo/logger/log_component_settings.h4
-rw-r--r--src/mongo/logger/log_severity_limiter.h6
-rw-r--r--src/mongo/logger/ramlog.cpp4
-rw-r--r--src/mongo/logger/ramlog.h4
-rw-r--r--src/mongo/logger/rotatable_file_writer.h6
-rw-r--r--src/mongo/s/balancer_configuration.cpp12
-rw-r--r--src/mongo/s/balancer_configuration.h5
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp40
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp12
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.h4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp44
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h4
-rw-r--r--src/mongo/s/catalog_cache.cpp28
-rw-r--r--src/mongo/s/catalog_cache.h4
-rw-r--r--src/mongo/s/chunk_writes_tracker.cpp2
-rw-r--r--src/mongo/s/chunk_writes_tracker.h4
-rw-r--r--src/mongo/s/client/rs_local_client.cpp4
-rw-r--r--src/mongo/s/client/rs_local_client.h4
-rw-r--r--src/mongo/s/client/shard_connection.cpp8
-rw-r--r--src/mongo/s/client/shard_registry.cpp30
-rw-r--r--src/mongo/s/client/shard_registry.h6
-rw-r--r--src/mongo/s/client/shard_remote.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.h5
-rw-r--r--src/mongo/s/client/version_manager.cpp10
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp6
-rw-r--r--src/mongo/s/cluster_identity_loader.h4
-rw-r--r--src/mongo/s/cluster_last_error_info.cpp8
-rw-r--r--src/mongo/s/cluster_last_error_info.h6
-rw-r--r--src/mongo/s/grid.cpp8
-rw-r--r--src/mongo/s/grid.h4
-rw-r--r--src/mongo/s/query/async_results_merger.cpp26
-rw-r--r--src/mongo/s/query/async_results_merger.h4
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp6
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp30
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h8
-rw-r--r--src/mongo/s/query/establish_cursors.h2
-rw-r--r--src/mongo/s/router_transactions_metrics.cpp4
-rw-r--r--src/mongo/s/router_transactions_metrics.h2
-rw-r--r--src/mongo/s/sharding_task_executor.h2
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.h4
-rw-r--r--src/mongo/scripting/deadline_monitor.h15
-rw-r--r--src/mongo/scripting/deadline_monitor_test.cpp6
-rw-r--r--src/mongo/scripting/engine.cpp8
-rw-r--r--src/mongo/scripting/mozjs/PosixNSPR.cpp14
-rw-r--r--src/mongo/scripting/mozjs/countdownlatch.cpp32
-rw-r--r--src/mongo/scripting/mozjs/engine.cpp8
-rw-r--r--src/mongo/scripting/mozjs/engine.h4
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp16
-rw-r--r--src/mongo/scripting/mozjs/implscope.h2
-rw-r--r--src/mongo/scripting/mozjs/jsthread.cpp8
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp6
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.h4
-rw-r--r--src/mongo/shell/bench.cpp16
-rw-r--r--src/mongo/shell/bench.h6
-rw-r--r--src/mongo/shell/dbshell.cpp2
-rw-r--r--src/mongo/shell/shell_utils.cpp8
-rw-r--r--src/mongo/shell/shell_utils.h6
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp10
-rw-r--r--src/mongo/shell/shell_utils_launcher.h2
-rw-r--r--src/mongo/stdx/condition_variable.h2
-rw-r--r--src/mongo/stdx/condition_variable_bm.cpp4
-rw-r--r--src/mongo/tools/bridge.cpp6
-rw-r--r--src/mongo/tools/bridge_commands.cpp16
-rw-r--r--src/mongo/tools/bridge_commands.h6
-rw-r--r--src/mongo/transport/baton_asio_linux.h24
-rw-r--r--src/mongo/transport/service_entry_point_impl.h4
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp22
-rw-r--r--src/mongo/transport/service_executor_adaptive.h15
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp36
-rw-r--r--src/mongo/transport/service_executor_reserved.cpp10
-rw-r--r--src/mongo/transport/service_executor_reserved.h4
-rw-r--r--src/mongo/transport/service_executor_synchronous.cpp2
-rw-r--r--src/mongo/transport/service_executor_synchronous.h4
-rw-r--r--src/mongo/transport/service_executor_test.cpp6
-rw-r--r--src/mongo/transport/service_state_machine.h2
-rw-r--r--src/mongo/transport/service_state_machine_test.cpp6
-rw-r--r--src/mongo/transport/session_asio.h6
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp12
-rw-r--r--src/mongo/transport/transport_layer_asio.h4
-rw-r--r--src/mongo/transport/transport_layer_asio_test.cpp22
-rw-r--r--src/mongo/transport/transport_layer_manager.cpp4
-rw-r--r--src/mongo/transport/transport_layer_manager.h6
-rw-r--r--src/mongo/unittest/barrier.h4
-rw-r--r--src/mongo/unittest/unittest.cpp4
-rw-r--r--src/mongo/util/alarm.cpp14
-rw-r--r--src/mongo/util/alarm.h6
-rw-r--r--src/mongo/util/alarm_runner_background_thread.cpp8
-rw-r--r--src/mongo/util/alarm_runner_background_thread.h2
-rw-r--r--src/mongo/util/background.cpp26
-rw-r--r--src/mongo/util/background_job_test.cpp6
-rw-r--r--src/mongo/util/background_thread_clock_source.h4
-rw-r--r--src/mongo/util/clock_source.h2
-rw-r--r--src/mongo/util/clock_source_mock.cpp1
-rw-r--r--src/mongo/util/clock_source_mock.h2
-rw-r--r--src/mongo/util/concurrency/notification.h14
-rw-r--r--src/mongo/util/concurrency/spin_lock.h4
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp22
-rw-r--r--src/mongo/util/concurrency/thread_pool.h8
-rw-r--r--src/mongo/util/concurrency/thread_pool_test.cpp18
-rw-r--r--src/mongo/util/concurrency/thread_pool_test_common.cpp8
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp12
-rw-r--r--src/mongo/util/concurrency/ticketholder.h6
-rw-r--r--src/mongo/util/concurrency/with_lock.h16
-rw-r--r--src/mongo/util/concurrency/with_lock_test.cpp14
-rw-r--r--src/mongo/util/exit.cpp16
-rw-r--r--src/mongo/util/fail_point.h4
-rw-r--r--src/mongo/util/fail_point_test.cpp13
-rw-r--r--src/mongo/util/future_impl.h10
-rw-r--r--src/mongo/util/heap_profiler.cpp6
-rw-r--r--src/mongo/util/interruptible.h8
-rw-r--r--src/mongo/util/invalidating_lru_cache.h12
-rw-r--r--src/mongo/util/net/http_client_curl.cpp8
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp11
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp6
-rw-r--r--src/mongo/util/periodic_runner.h2
-rw-r--r--src/mongo/util/periodic_runner_impl.cpp12
-rw-r--r--src/mongo/util/periodic_runner_impl.h4
-rw-r--r--src/mongo/util/periodic_runner_impl_test.cpp68
-rw-r--r--src/mongo/util/processinfo.h2
-rw-r--r--src/mongo/util/producer_consumer_queue.h34
-rw-r--r--src/mongo/util/producer_consumer_queue_test.cpp22
-rw-r--r--src/mongo/util/queue.h34
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp4
-rw-r--r--src/mongo/util/stacktrace_windows.cpp2
-rw-r--r--src/mongo/util/synchronized_value.h38
-rw-r--r--src/mongo/util/time_support.h2
-rw-r--r--src/mongo/util/uuid.cpp6
-rw-r--r--src/mongo/watchdog/watchdog.cpp16
-rw-r--r--src/mongo/watchdog/watchdog.h6
-rw-r--r--src/mongo/watchdog/watchdog_test.cpp22
475 files changed, 2484 insertions, 2465 deletions
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index 67aae3fbb1b..b076d3e8f63 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -44,8 +44,8 @@
#endif
#include "mongo/base/init.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -114,7 +114,7 @@ void EnablePrivilege(const wchar_t* name) {
* size, and then raising the working set. This is the same reason that "i++" has race conditions
* across multiple threads.
*/
-stdx::mutex workingSizeMutex;
+stdx::mutex workingSizeMutex; // NOLINT
/**
* There is a minimum gap between the minimum working set size and maximum working set size.
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 2576ef86a42..3fb41a28b27 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -42,9 +42,9 @@
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/sasl_command_constants.h"
#include "mongo/db/server_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/op_msg_rpc_impls.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/net/ssl_options.h"
@@ -186,13 +186,13 @@ Future<void> authenticateClient(const BSONObj& params,
AuthMongoCRHandler authMongoCR = authMongoCRImpl;
-static stdx::mutex internalAuthKeysMutex;
+static auto internalAuthKeysMutex = MONGO_MAKE_LATCH();
static bool internalAuthSet = false;
static std::vector<std::string> internalAuthKeys;
static BSONObj internalAuthParams;
void setInternalAuthKeys(const std::vector<std::string>& keys) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
internalAuthKeys = keys;
fassert(50996, internalAuthKeys.size() > 0);
@@ -200,24 +200,24 @@ void setInternalAuthKeys(const std::vector<std::string>& keys) {
}
void setInternalUserAuthParams(BSONObj obj) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
internalAuthParams = obj.getOwned();
internalAuthKeys.clear();
internalAuthSet = true;
}
bool hasMultipleInternalAuthKeys() {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
return internalAuthSet && internalAuthKeys.size() > 1;
}
bool isInternalAuthSet() {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
return internalAuthSet;
}
BSONObj getInternalAuthParams(size_t idx, const std::string& mechanism) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
if (!internalAuthSet) {
return BSONObj();
}
diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp
index 643817aa9c6..1abe1b46f92 100644
--- a/src/mongo/client/connection_pool.cpp
+++ b/src/mongo/client/connection_pool.cpp
@@ -68,7 +68,7 @@ ConnectionPool::~ConnectionPool() {
}
void ConnectionPool::cleanUpOlderThan(Date_t now) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
HostConnectionMap::iterator hostConns = _connections.begin();
while (hostConns != _connections.end()) {
@@ -102,7 +102,7 @@ bool ConnectionPool::_shouldKeepConnection(Date_t now, const ConnectionInfo& con
}
void ConnectionPool::closeAllInUseConnections() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (ConnectionList::iterator iter = _inUseConnections.begin(); iter != _inUseConnections.end();
++iter) {
iter->conn->shutdownAndDisallowReconnect();
@@ -127,7 +127,7 @@ void ConnectionPool::_cleanUpStaleHosts_inlock(Date_t now) {
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
const HostAndPort& target, Date_t now, Milliseconds timeout) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Clean up connections on stale/unused hosts
_cleanUpStaleHosts_inlock(now);
@@ -218,7 +218,7 @@ ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
}
void ConnectionPool::releaseConnection(ConnectionList::iterator iter, const Date_t now) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_shouldKeepConnection(now, *iter)) {
_destroyConnection_inlock(&_inUseConnections, iter);
return;
@@ -232,7 +232,7 @@ void ConnectionPool::releaseConnection(ConnectionList::iterator iter, const Date
}
void ConnectionPool::destroyConnection(ConnectionList::iterator iter) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_destroyConnection_inlock(&_inUseConnections, iter);
}
diff --git a/src/mongo/client/connection_pool.h b/src/mongo/client/connection_pool.h
index 5d72f5c25e1..3c796fd1922 100644
--- a/src/mongo/client/connection_pool.h
+++ b/src/mongo/client/connection_pool.h
@@ -32,8 +32,8 @@
#include <map>
#include "mongo/client/dbclient_connection.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/list.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -67,7 +67,7 @@ public:
const Date_t creationDate;
};
- typedef stdx::list<ConnectionInfo> ConnectionList;
+ typedef std::list<ConnectionInfo> ConnectionList;
typedef stdx::unordered_map<HostAndPort, ConnectionList> HostConnectionMap;
typedef std::map<HostAndPort, Date_t> HostLastUsedMap;
@@ -194,7 +194,7 @@ private:
const int _messagingPortTags;
// Mutex guarding members of the connection pool
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionPool::_mutex");
// Map from HostAndPort to idle connections.
HostConnectionMap _connections;
diff --git a/src/mongo/client/connection_string.h b/src/mongo/client/connection_string.h
index 29b77ecf9ab..493c916528f 100644
--- a/src/mongo/client/connection_string.h
+++ b/src/mongo/client/connection_string.h
@@ -37,7 +37,7 @@
#include "mongo/base/status_with.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
@@ -153,12 +153,12 @@ public:
};
static void setConnectionHook(ConnectionHook* hook) {
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
_connectHook = hook;
}
static ConnectionHook* getConnectionHook() {
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
return _connectHook;
}
@@ -190,7 +190,7 @@ private:
std::string _string;
std::string _setName;
- static stdx::mutex _connectHookMutex;
+ static Mutex _connectHookMutex;
static ConnectionHook* _connectHook;
};
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index a5d2832c016..83ec9f6e145 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -44,8 +44,8 @@
namespace mongo {
-stdx::mutex ConnectionString::_connectHookMutex;
-ConnectionString::ConnectionHook* ConnectionString::_connectHook = NULL;
+Mutex ConnectionString::_connectHookMutex = MONGO_MAKE_LATCH();
+ConnectionString::ConnectionHook* ConnectionString::_connectHook = nullptr;
std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationName,
std::string& errmsg,
@@ -85,7 +85,7 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
case CUSTOM: {
// Lock in case other things are modifying this at the same time
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
// Allow the replacement of connections with other connections - useful for testing.
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index d4d58addcfc..bb6d0dfdd99 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -217,7 +217,7 @@ void PoolForHost::initializeHostName(const std::string& hostName) {
}
}
-void PoolForHost::waitForFreeConnection(int timeout, stdx::unique_lock<stdx::mutex>& lk) {
+void PoolForHost::waitForFreeConnection(int timeout, stdx::unique_lock<Latch>& lk) {
auto condition = [&] { return (numInUse() < _maxInUse || _inShutdown.load()); };
if (timeout > 0) {
@@ -263,7 +263,7 @@ public:
// there are too many connections in this pool to make a new one, block until a
// connection is released.
{
- stdx::unique_lock<stdx::mutex> lk(_this->_mutex);
+ stdx::unique_lock<Latch> lk(_this->_mutex);
PoolForHost& p = _this->_pools[PoolKey(host, timeout)];
if (p.openConnections() >= _this->_maxInUse) {
@@ -307,7 +307,7 @@ DBConnectionPool::DBConnectionPool()
void DBConnectionPool::shutdown() {
if (!_inShutdown.swap(true)) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (auto i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p.shutdown();
@@ -319,7 +319,7 @@ DBClientBase* DBConnectionPool::_get(const string& ident, double socketTimeout)
uassert(ErrorCodes::ShutdownInProgress,
"Can't use connection pool during shutdown",
!globalInShutdownDeprecated());
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.setSocketTimeout(socketTimeout);
@@ -328,7 +328,7 @@ DBClientBase* DBConnectionPool::_get(const string& ident, double socketTimeout)
}
int DBConnectionPool::openConnections(const string& ident, double socketTimeout) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
return p.openConnections();
}
@@ -337,7 +337,7 @@ DBClientBase* DBConnectionPool::_finishCreate(const string& ident,
double socketTimeout,
DBClientBase* conn) {
{
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(ident);
@@ -400,13 +400,13 @@ DBClientBase* DBConnectionPool::get(const MongoURI& uri, double socketTimeout) {
}
int DBConnectionPool::getNumAvailableConns(const string& host, double socketTimeout) const {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
auto it = _pools.find(PoolKey(host, socketTimeout));
return (it == _pools.end()) ? 0 : it->second.numAvailable();
}
int DBConnectionPool::getNumBadConns(const string& host, double socketTimeout) const {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
auto it = _pools.find(PoolKey(host, socketTimeout));
return (it == _pools.end()) ? 0 : it->second.getNumBadConns();
}
@@ -424,7 +424,7 @@ void DBConnectionPool::onRelease(DBClientBase* conn) {
void DBConnectionPool::release(const string& host, DBClientBase* c) {
onRelease(c);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
PoolForHost& p = _pools[PoolKey(host, c->getSoTimeout())];
p.done(this, c);
@@ -441,7 +441,7 @@ void DBConnectionPool::decrementEgress(const string& host, DBClientBase* c) {
DBConnectionPool::~DBConnectionPool() {
// Do not log in destruction, because global connection pools get
// destroyed after the logging framework.
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p._parentDestroyed = true;
@@ -453,7 +453,7 @@ DBConnectionPool::~DBConnectionPool() {
}
void DBConnectionPool::flush() {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p.flush();
@@ -461,7 +461,7 @@ void DBConnectionPool::flush() {
}
void DBConnectionPool::clear() {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
LOG(2) << "Removing connections on all pools owned by " << _name << endl;
for (PoolMap::iterator iter = _pools.begin(); iter != _pools.end(); ++iter) {
iter->second.clear();
@@ -469,7 +469,7 @@ void DBConnectionPool::clear() {
}
void DBConnectionPool::removeHost(const string& host) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
LOG(2) << "Removing connections from all pools for host: " << host << endl;
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) {
const string& poolHost = i->first.ident;
@@ -513,7 +513,7 @@ void DBConnectionPool::onDestroy(DBClientBase* conn) {
void DBConnectionPool::appendConnectionStats(executor::ConnectionPoolStats* stats) const {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (PoolMap::const_iterator i = _pools.begin(); i != _pools.end(); ++i) {
if (i->second.numCreated() == 0)
continue;
@@ -581,7 +581,7 @@ bool DBConnectionPool::isConnectionGood(const string& hostName, DBClientBase* co
}
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
PoolForHost& pool = _pools[PoolKey(hostName, conn->getSoTimeout())];
if (pool.isBadSocketCreationTime(conn->getSockCreationMicroSec())) {
return false;
@@ -597,7 +597,7 @@ void DBConnectionPool::taskDoWork() {
{
// we need to get the connections inside the lock
// but we can actually delete them outside
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) {
i->second.getStaleConnections(idleThreshold, toDelete);
}
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h
index 24e2cd70dd4..87565ffb1de 100644
--- a/src/mongo/client/connpool.h
+++ b/src/mongo/client/connpool.h
@@ -179,7 +179,7 @@ public:
* throw if a free connection cannot be acquired within that amount of
* time. Timeout is in seconds.
*/
- void waitForFreeConnection(int timeout, stdx::unique_lock<stdx::mutex>& lk);
+ void waitForFreeConnection(int timeout, stdx::unique_lock<Latch>& lk);
/**
* Notifies any waiters that there are new connections available.
@@ -392,7 +392,7 @@ private:
typedef std::map<PoolKey, PoolForHost, poolKeyCompare> PoolMap; // servername -> pool
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DBConnectionPool::_mutex");
std::string _name;
// The maximum number of connections we'll save in the pool per-host
diff --git a/src/mongo/client/connpool_integration_test.cpp b/src/mongo/client/connpool_integration_test.cpp
index 2c07b67107d..8daada412a3 100644
--- a/src/mongo/client/connpool_integration_test.cpp
+++ b/src/mongo/client/connpool_integration_test.cpp
@@ -31,8 +31,8 @@
#include "mongo/client/connpool.h"
#include "mongo/client/global_conn_pool.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/integration_test.h"
#include "mongo/unittest/unittest.h"
@@ -46,7 +46,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
auto host = fixture.getServers()[0].toString();
stdx::condition_variable cv;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int counter = 0;
pool.setMaxInUse(2);
@@ -60,7 +60,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
// Try creating a new one, should block until we release one.
stdx::thread t([&] {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -69,7 +69,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
auto conn3 = pool.get(host);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -79,7 +79,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
// First thread should be blocked.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 1; });
}
@@ -87,7 +87,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
pool.release(host, conn2);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 2; });
}
@@ -125,7 +125,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
auto host = fixture.getServers()[0].toString();
stdx::condition_variable cv;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int counter = 0;
pool.setMaxInUse(2);
@@ -139,7 +139,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
// Attempt to open a new connection, should block.
stdx::thread t([&] {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -148,7 +148,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
ASSERT_THROWS(pool.get(host), AssertionException);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -157,14 +157,14 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
// Wait for new thread to block.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 1; });
}
// Shut down the pool, this should unblock our waiting connection.
pool.shutdown();
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 2; });
}
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 951b3fd58f5..ab36bec7810 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -56,13 +56,13 @@
#include "mongo/db/wire_version.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/rpc/reply_interface.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/debug_util.h"
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index cbe3aadbd4f..400799735cb 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -60,12 +60,12 @@
#include "mongo/db/wire_version.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/s/stale_exception.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/debug_util.h"
@@ -331,7 +331,7 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
}
{
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
if (_stayFailed.load()) {
// This object is still in a failed state. The session we just created will be destroyed
// immediately since we aren't holding on to it.
@@ -400,7 +400,7 @@ void DBClientConnection::_markFailed(FailAction action) {
} else if (action == kReleaseSession) {
transport::SessionHandle destroyedOutsideMutex;
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
_session.swap(destroyedOutsideMutex);
}
}
@@ -452,7 +452,7 @@ void DBClientConnection::setTags(transport::Session::TagMask tags) {
}
void DBClientConnection::shutdownAndDisallowReconnect() {
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
_stayFailed.store(true);
_markFailed(kEndSession);
}
@@ -519,7 +519,7 @@ uint64_t DBClientConnection::getSockCreationMicroSec() const {
}
}
-unsigned long long DBClientConnection::query(stdx::function<void(DBClientCursorBatchIterator&)> f,
+unsigned long long DBClientConnection::query(std::function<void(DBClientCursorBatchIterator&)> f,
const NamespaceStringOrUUID& nsOrUuid,
Query query,
const BSONObj* fieldsToReturn,
diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h
index 13788423545..21e0ae7711a 100644
--- a/src/mongo/client/dbclient_connection.h
+++ b/src/mongo/client/dbclient_connection.h
@@ -43,13 +43,13 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/message.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/op_msg.h"
#include "mongo/rpc/protocol.h"
#include "mongo/rpc/unique_message.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/message_compressor_manager.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
@@ -293,7 +293,7 @@ protected:
// rebind the handle from the owning thread. The thread that owns this DBClientConnection is
// allowed to use the _session without locking the mutex. This mutex also guards writes to
// _stayFailed, although reads are allowed outside the mutex.
- stdx::mutex _sessionMutex;
+ Mutex _sessionMutex = MONGO_MAKE_LATCH("DBClientConnection::_sessionMutex");
transport::SessionHandle _session;
boost::optional<Milliseconds> _socketTimeout;
transport::Session::TagMask _tagMask = transport::Session::kEmptyTagMask;
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 8843227ded5..df0bfb41077 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -195,7 +195,7 @@ std::string Fetcher::toString() const {
}
std::string Fetcher::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
str::stream output;
output << "Fetcher";
output << " source: " << _source.toString();
@@ -218,7 +218,7 @@ std::string Fetcher::getDiagnosticString() const {
}
bool Fetcher::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -227,7 +227,7 @@ bool Fetcher::_isActive_inlock() const {
}
Status Fetcher::schedule() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -250,7 +250,7 @@ Status Fetcher::schedule() {
}
void Fetcher::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -273,17 +273,17 @@ void Fetcher::shutdown() {
}
void Fetcher::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
Fetcher::State Fetcher::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
bool Fetcher::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShuttingDown_inlock();
}
@@ -292,7 +292,7 @@ bool Fetcher::_isShuttingDown_inlock() const {
}
Status Fetcher::_scheduleGetMore(const BSONObj& cmdObj) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_isShuttingDown_inlock()) {
return Status(ErrorCodes::CallbackCanceled,
"fetcher was shut down after previous batch was processed");
@@ -347,7 +347,7 @@ void Fetcher::_callback(const RemoteCommandCallbackArgs& rcbd, const char* batch
batchData.otherFields.metadata = std::move(rcbd.response.data);
batchData.elapsedMillis = rcbd.response.elapsedMillis.value_or(Milliseconds{0});
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
batchData.first = _first;
_first = false;
}
@@ -416,7 +416,7 @@ void Fetcher::_finishCallback() {
// 'tempWork' must be declared before lock guard 'lk' so that it is destroyed outside the lock.
Fetcher::CallbackFn tempWork;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(State::kComplete != _state);
_state = State::kComplete;
_first = false;
diff --git a/src/mongo/client/fetcher.h b/src/mongo/client/fetcher.h
index 3f877a0c9ec..1a56dc57715 100644
--- a/src/mongo/client/fetcher.h
+++ b/src/mongo/client/fetcher.h
@@ -41,9 +41,9 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/namespace_string.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -84,7 +84,7 @@ public:
/**
* Type of a fetcher callback function.
*/
- typedef stdx::function<void(const StatusWith<QueryResponse>&, NextAction*, BSONObjBuilder*)>
+ typedef std::function<void(const StatusWith<QueryResponse>&, NextAction*, BSONObjBuilder*)>
CallbackFn;
/**
@@ -239,7 +239,7 @@ private:
CallbackFn _work;
// Protects member data of this Fetcher.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Fetcher::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/client/mongo_uri.h b/src/mongo/client/mongo_uri.h
index 89de663d138..816ac10eda2 100644
--- a/src/mongo/client/mongo_uri.h
+++ b/src/mongo/client/mongo_uri.h
@@ -39,7 +39,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/util/builder.h"
#include "mongo/client/connection_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/client/remote_command_retry_scheduler.cpp b/src/mongo/client/remote_command_retry_scheduler.cpp
index 9aefd9fc902..8f33730abab 100644
--- a/src/mongo/client/remote_command_retry_scheduler.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler.cpp
@@ -78,7 +78,7 @@ RemoteCommandRetryScheduler::~RemoteCommandRetryScheduler() {
}
bool RemoteCommandRetryScheduler::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -87,7 +87,7 @@ bool RemoteCommandRetryScheduler::_isActive_inlock() const {
}
Status RemoteCommandRetryScheduler::startup() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
@@ -113,7 +113,7 @@ Status RemoteCommandRetryScheduler::startup() {
void RemoteCommandRetryScheduler::shutdown() {
executor::TaskExecutor::CallbackHandle remoteCommandCallbackHandle;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -136,12 +136,12 @@ void RemoteCommandRetryScheduler::shutdown() {
}
void RemoteCommandRetryScheduler::join() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condition.wait(lock, [this]() { return !_isActive_inlock(); });
}
std::string RemoteCommandRetryScheduler::toString() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
str::stream output;
output << "RemoteCommandRetryScheduler";
output << " request: " << _request.toString();
@@ -174,7 +174,7 @@ void RemoteCommandRetryScheduler::_remoteCommandCallback(
// Use a lambda to avoid unnecessary lock acquisition when checking conditions for termination.
auto getCurrentAttempt = [this]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _currentAttempt;
};
@@ -188,7 +188,7 @@ void RemoteCommandRetryScheduler::_remoteCommandCallback(
// TODO(benety): Check cumulative elapsed time of failed responses received against retry
// policy. Requires SERVER-24067.
auto scheduleStatus = [this]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (State::kShuttingDown == _state) {
return Status(ErrorCodes::CallbackCanceled,
"scheduler was shut down before retrying command");
@@ -213,7 +213,7 @@ void RemoteCommandRetryScheduler::_onComplete(
// RemoteCommandRetryScheduler, we release this function object outside the lock.
_callback = {};
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_isActive_inlock());
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/client/remote_command_retry_scheduler.h b/src/mongo/client/remote_command_retry_scheduler.h
index b4cfe52ef88..d466a3b217b 100644
--- a/src/mongo/client/remote_command_retry_scheduler.h
+++ b/src/mongo/client/remote_command_retry_scheduler.h
@@ -37,8 +37,8 @@
#include "mongo/base/error_codes.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -150,7 +150,7 @@ private:
Milliseconds _currentUsedMillis{0};
// Protects member data of this scheduler declared after mutex.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RemoteCommandRetryScheduler::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp
index 164b1e593b3..2120725e981 100644
--- a/src/mongo/client/remote_command_targeter_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_mock.cpp
@@ -78,12 +78,12 @@ SemiFuture<std::vector<HostAndPort>> RemoteCommandTargeterMock::findHostsWithMax
}
void RemoteCommandTargeterMock::markHostNotMaster(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_hostsMarkedDown.insert(host);
}
void RemoteCommandTargeterMock::markHostUnreachable(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_hostsMarkedDown.insert(host);
}
@@ -105,7 +105,7 @@ void RemoteCommandTargeterMock::setFindHostsReturnValue(
}
std::set<HostAndPort> RemoteCommandTargeterMock::getAndClearMarkedDownHosts() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto hostsMarkedDown = _hostsMarkedDown;
_hostsMarkedDown.clear();
return hostsMarkedDown;
diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h
index 3ea39b80d22..603e18eebcd 100644
--- a/src/mongo/client/remote_command_targeter_mock.h
+++ b/src/mongo/client/remote_command_targeter_mock.h
@@ -97,7 +97,7 @@ private:
StatusWith<std::vector<HostAndPort>> _findHostReturnValue;
// Protects _hostsMarkedDown.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RemoteCommandTargeterMock::_mutex");
// HostAndPorts marked not master or unreachable. Meant to verify a code path updates the
// RemoteCommandTargeterMock.
diff --git a/src/mongo/client/replica_set_change_notifier.cpp b/src/mongo/client/replica_set_change_notifier.cpp
index cf6c5b2d90b..d9333f54122 100644
--- a/src/mongo/client/replica_set_change_notifier.cpp
+++ b/src/mongo/client/replica_set_change_notifier.cpp
@@ -56,7 +56,7 @@ void ReplicaSetChangeNotifier::_removeListener(Listener* listener) {
void ReplicaSetChangeNotifier::onFoundSet(const std::string& name) {
LOG(2) << "Signaling found set " << name;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_replicaSetStates.emplace(name, State{});
@@ -73,7 +73,7 @@ void ReplicaSetChangeNotifier::onPossibleSet(ConnectionString connectionString)
const auto& name = connectionString.getSetName();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto state = [&] {
auto& state = _replicaSetStates[name];
@@ -99,7 +99,7 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
LOG(2) << "Signaling confirmed set " << connectionString << " with primary " << primary;
const auto& name = connectionString.getSetName();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto state = [&] {
auto& state = _replicaSetStates[name];
@@ -123,7 +123,7 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
void ReplicaSetChangeNotifier::onDroppedSet(const std::string& name) {
LOG(2) << "Signaling dropped set " << name;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If we never singaled the initial possible set, we should not on dropped set
auto it = _replicaSetStates.find(name);
diff --git a/src/mongo/client/replica_set_change_notifier.h b/src/mongo/client/replica_set_change_notifier.h
index de61d5dc504..b79da076c4d 100644
--- a/src/mongo/client/replica_set_change_notifier.h
+++ b/src/mongo/client/replica_set_change_notifier.h
@@ -35,7 +35,7 @@
#include "mongo/client/connection_string.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/functional.h"
@@ -102,7 +102,7 @@ private:
void _addListener(Listener* listener);
void _removeListener(Listener* listener);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ReplicaSetChangeNotifier::_mutex");
std::vector<Listener*> _listeners;
stdx::unordered_map<Key, State> _replicaSetStates;
};
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index dfbf8e59a3f..73cea67539c 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -45,8 +45,8 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/server_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/exit.h"
@@ -203,7 +203,7 @@ void ReplicaSetMonitor::init() {
_state->init();
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
_scheduleRefresh(_state->now(), lk);
}
@@ -245,7 +245,7 @@ void ReplicaSetMonitor::_scheduleRefresh(Date_t when, WithLock) {
}
void ReplicaSetMonitor::_doScheduledRefresh(const CallbackHandle& currentHandle) {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
if (currentHandle != _refresherHandle)
return; // We've been replaced!
@@ -294,7 +294,7 @@ Future<std::vector<HostAndPort>> ReplicaSetMonitor::_getHostsOrRefresh(
}
// Fast path, for the failure-free case
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
auto out = _state->getMatchingHosts(criteria);
if (!out.empty())
return {std::move(out)};
@@ -329,7 +329,7 @@ HostAndPort ReplicaSetMonitor::getMasterOrUassert() {
}
void ReplicaSetMonitor::failedHost(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
if (node)
node->markFailed(status);
@@ -337,19 +337,19 @@ void ReplicaSetMonitor::failedHost(const HostAndPort& host, const Status& status
}
bool ReplicaSetMonitor::isPrimary(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isMaster : false;
}
bool ReplicaSetMonitor::isHostUp(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isUp : false;
}
int ReplicaSetMonitor::getMinWireVersion() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
int minVersion = 0;
for (const auto& host : _state->nodes) {
if (host.isUp) {
@@ -361,7 +361,7 @@ int ReplicaSetMonitor::getMinWireVersion() const {
}
int ReplicaSetMonitor::getMaxWireVersion() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
int maxVersion = std::numeric_limits<int>::max();
for (const auto& host : _state->nodes) {
if (host.isUp) {
@@ -378,7 +378,7 @@ std::string ReplicaSetMonitor::getName() const {
}
std::string ReplicaSetMonitor::getServerAddress() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
// We return our setUri until first confirmation
return _state->seedConnStr.isValid() ? _state->seedConnStr.toString()
: _state->setUri.connectionString().toString();
@@ -390,7 +390,7 @@ const MongoURI& ReplicaSetMonitor::getOriginalUri() const {
}
bool ReplicaSetMonitor::contains(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
return _state->seedNodes.count(host);
}
@@ -422,7 +422,7 @@ ReplicaSetChangeNotifier& ReplicaSetMonitor::getNotifier() {
// TODO move to correct order with non-statics before pushing
void ReplicaSetMonitor::appendInfo(BSONObjBuilder& bsonObjBuilder, bool forFTDC) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
BSONObjBuilder monitorInfo(bsonObjBuilder.subobjStart(getName()));
if (forFTDC) {
@@ -467,7 +467,7 @@ void ReplicaSetMonitor::disableRefreshRetries_forTest() {
}
bool ReplicaSetMonitor::isKnownToHaveGoodPrimary() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
for (const auto& node : _state->nodes) {
if (node.isMaster) {
@@ -483,7 +483,7 @@ void ReplicaSetMonitor::markAsRemoved() {
}
void ReplicaSetMonitor::runScanForMockReplicaSet() {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Refresher::ensureScanInProgress(_state, lk);
// This function should only be called from tests using MockReplicaSet and they should use the
@@ -551,7 +551,7 @@ void Refresher::scheduleIsMaster(const HostAndPort& host, WithLock withLock) {
std::move(request),
[copy = *this, host, timer = Timer()](
const executor::TaskExecutor::RemoteCommandCallbackArgs& result) mutable {
- stdx::lock_guard<stdx::mutex> lk(copy._set->mutex);
+ stdx::lock_guard<Latch> lk(copy._set->mutex);
// Ignore the reply and return if we are no longer the current scan. This might
// happen if it was decided that the host we were contacting isn't part of the
// set.
@@ -1299,7 +1299,7 @@ void SetState::init() {
void SetState::drop() {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
currentScan.reset();
notify(/*finishedScan*/ true);
}
diff --git a/src/mongo/client/replica_set_monitor_internal.h b/src/mongo/client/replica_set_monitor_internal.h
index 67cc9ac73ab..4c6320da554 100644
--- a/src/mongo/client/replica_set_monitor_internal.h
+++ b/src/mongo/client/replica_set_monitor_internal.h
@@ -44,9 +44,9 @@
#include "mongo/client/read_preference.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -216,7 +216,8 @@ public:
ReplicaSetChangeNotifier* const notifier;
executor::TaskExecutor* const executor;
- stdx::mutex mutex; // You must hold this to access any member below.
+ // You must hold this to access any member below.
+ mutable Mutex mutex = MONGO_MAKE_LATCH("SetState::mutex");
// For starting scans
std::set<HostAndPort> seedNodes; // updated whenever a master reports set membership changes
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 2e5e9567fb6..9b9982c7e2d 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -43,9 +43,9 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -69,7 +69,7 @@ ReplicaSetMonitorManager::~ReplicaSetMonitorManager() {
}
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getMonitor(StringData setName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (auto monitor = _monitors[setName].lock()) {
return monitor;
@@ -106,7 +106,7 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
const ConnectionString& connStr) {
invariant(connStr.type() == ConnectionString::SET);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_setupTaskExecutorInLock(connStr.toString());
auto setName = connStr.getSetName();
auto monitor = _monitors[setName].lock();
@@ -126,7 +126,7 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(const MongoURI& uri) {
invariant(uri.type() == ConnectionString::SET);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_setupTaskExecutorInLock(uri.toString());
const auto& setName = uri.getSetName();
auto monitor = _monitors[setName].lock();
@@ -146,7 +146,7 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(const
vector<string> ReplicaSetMonitorManager::getAllSetNames() {
vector<string> allNames;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& entry : _monitors) {
allNames.push_back(entry.first);
@@ -156,7 +156,7 @@ vector<string> ReplicaSetMonitorManager::getAllSetNames() {
}
void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ReplicaSetMonitorsMap::const_iterator it = _monitors.find(setName);
if (it != _monitors.end()) {
if (auto monitor = it->second.lock()) {
@@ -170,7 +170,7 @@ void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
void ReplicaSetMonitorManager::shutdown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_taskExecutor || _isShutdown) {
return;
}
@@ -184,7 +184,7 @@ void ReplicaSetMonitorManager::shutdown() {
void ReplicaSetMonitorManager::removeAllMonitors() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_monitors = ReplicaSetMonitorsMap();
if (!_taskExecutor || _isShutdown) {
return;
@@ -198,7 +198,7 @@ void ReplicaSetMonitorManager::removeAllMonitors() {
_taskExecutor.reset();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isShutdown = false;
}
}
@@ -232,7 +232,7 @@ ReplicaSetChangeNotifier& ReplicaSetMonitorManager::getNotifier() {
}
bool ReplicaSetMonitorManager::isShutdown() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShutdown;
}
diff --git a/src/mongo/client/replica_set_monitor_manager.h b/src/mongo/client/replica_set_monitor_manager.h
index 4e037a70f91..1bf6cdee79e 100644
--- a/src/mongo/client/replica_set_monitor_manager.h
+++ b/src/mongo/client/replica_set_monitor_manager.h
@@ -34,7 +34,7 @@
#include "mongo/client/replica_set_change_notifier.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -104,7 +104,7 @@ private:
using ReplicaSetMonitorsMap = StringMap<std::weak_ptr<ReplicaSetMonitor>>;
// Protects access to the replica set monitors
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicaSetMonitorManager::_mutex");
// Executor for monitoring replica sets.
std::unique_ptr<executor::TaskExecutor> _taskExecutor;
diff --git a/src/mongo/client/scram_client_cache.h b/src/mongo/client/scram_client_cache.h
index f671cd88fe0..fb43d76b622 100644
--- a/src/mongo/client/scram_client_cache.h
+++ b/src/mongo/client/scram_client_cache.h
@@ -32,7 +32,7 @@
#include <string>
#include "mongo/crypto/mechanism_scram.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/net/hostandport.h"
@@ -76,7 +76,7 @@ public:
*/
scram::Secrets<HashBlock> getCachedSecrets(
const HostAndPort& target, const scram::Presecrets<HashBlock>& presecrets) const {
- const stdx::lock_guard<stdx::mutex> lock(_hostToSecretsMutex);
+ const stdx::lock_guard<Latch> lock(_hostToSecretsMutex);
// Search the cache for a record associated with the host we're trying to connect to.
auto foundSecret = _hostToSecrets.find(target);
@@ -102,7 +102,7 @@ public:
void setCachedSecrets(HostAndPort target,
scram::Presecrets<HashBlock> presecrets,
scram::Secrets<HashBlock> secrets) {
- const stdx::lock_guard<stdx::mutex> lock(_hostToSecretsMutex);
+ const stdx::lock_guard<Latch> lock(_hostToSecretsMutex);
typename HostToSecretsMap::iterator it;
bool insertionSuccessful;
@@ -117,7 +117,7 @@ public:
}
private:
- mutable stdx::mutex _hostToSecretsMutex;
+ mutable Mutex _hostToSecretsMutex = MONGO_MAKE_LATCH("SCRAMClientCache::_hostToSecretsMutex");
HostToSecretsMap _hostToSecrets;
};
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index 0e4851d2b72..6db233eda01 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -57,8 +57,8 @@
#include "mongo/db/global_settings.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index 0646954b6ae..2f9b4057372 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -48,9 +48,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 12efe81308e..91d0f8d22a9 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -62,8 +62,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/mongod_options.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -116,7 +116,7 @@ class PinnedUserSetParameter {
public:
void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) const {
BSONArrayBuilder sub(b.subarrayStart(name));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& username : _pinnedUsersList) {
BSONObjBuilder nameObj(sub.subobjStart());
nameObj << AuthorizationManager::USER_NAME_FIELD_NAME << username.getUser()
@@ -139,7 +139,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -172,7 +172,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -184,7 +184,7 @@ public:
}
void setAuthzManager(AuthorizationManager* authzManager) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_authzManager = authzManager;
_authzManager->updatePinnedUsersList(std::move(_pinnedUsersList));
}
@@ -201,7 +201,7 @@ private:
}
AuthorizationManager* _authzManager = nullptr;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PinnedUserSetParameter::_mutex");
std::vector<UserName> _pinnedUsersList;
} authorizationManagerPinnedUsers;
@@ -340,7 +340,7 @@ private:
bool _isThisGuardInFetchPhase;
AuthorizationManagerImpl* _authzManager;
- stdx::unique_lock<stdx::mutex> _cacheLock;
+ stdx::unique_lock<Latch> _cacheLock;
};
AuthorizationManagerImpl::AuthorizationManagerImpl()
@@ -397,7 +397,7 @@ Status AuthorizationManagerImpl::getAuthorizationVersion(OperationContext* opCtx
}
OID AuthorizationManagerImpl::getCacheGeneration() {
- stdx::lock_guard<stdx::mutex> lk(_cacheWriteMutex);
+ stdx::lock_guard<Latch> lk(_cacheWriteMutex);
return _fetchGeneration;
}
@@ -642,7 +642,7 @@ Status AuthorizationManagerImpl::_fetchUserV2(OperationContext* opCtx,
}
void AuthorizationManagerImpl::updatePinnedUsersList(std::vector<UserName> names) {
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
_usersToPin = std::move(names);
bool noUsersToPin = _usersToPin->empty();
_pinnedUsersCond.notify_one();
@@ -665,7 +665,7 @@ void AuthorizationManagerImpl::_pinnedUsersThreadRoutine() noexcept try {
while (true) {
auto opCtx = cc().makeOperationContext();
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
const Milliseconds timeout(authorizationManagerPinnedUsersRefreshIntervalMillis.load());
auto waitRes = opCtx->waitForConditionOrInterruptFor(
_pinnedUsersCond, lk, timeout, [&] { return _usersToPin.has_value(); });
diff --git a/src/mongo/db/auth/authorization_manager_impl.h b/src/mongo/db/auth/authorization_manager_impl.h
index 830e6094bce..b0a6903fbf1 100644
--- a/src/mongo/db/auth/authorization_manager_impl.h
+++ b/src/mongo/db/auth/authorization_manager_impl.h
@@ -49,9 +49,9 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/invalidating_lru_cache.h"
@@ -232,7 +232,7 @@ private:
InvalidatingLRUCache<UserName, User, UserCacheInvalidator> _userCache;
- stdx::mutex _pinnedUsersMutex;
+ Mutex _pinnedUsersMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_pinnedUsersMutex");
stdx::condition_variable _pinnedUsersCond;
std::once_flag _pinnedThreadTrackerStarted;
boost::optional<std::vector<UserName>> _usersToPin;
@@ -241,7 +241,7 @@ private:
* Protects _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
* via CacheGuard.
*/
- stdx::mutex _cacheWriteMutex;
+ Mutex _cacheWriteMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_cacheWriteMutex");
/**
* Current generation of cached data. Updated every time part of the cache gets
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index c5bf63894f5..bc027a48af9 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -204,7 +204,7 @@ void AuthzManagerExternalStateLocal::resolveUserRoles(mutablebson::Document* use
bool isRoleGraphConsistent = false;
{
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
isRoleGraphConsistent = _roleGraphState == roleGraphStateConsistent;
for (const auto& role : directRoles) {
indirectRoles.insert(role);
@@ -306,7 +306,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescription(
*result = resultDoc.getObject();
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
return _getRoleDescription_inlock(roleName, showPrivileges, showRestrictions, result);
}
@@ -326,7 +326,7 @@ Status AuthzManagerExternalStateLocal::getRolesDescription(
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
BSONArrayBuilder resultBuilder;
for (const RoleName& role : roles) {
BSONObj roleDoc;
@@ -441,7 +441,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(
"Cannot get user fragment for all roles in a database");
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname); it.more(); it.next()) {
if (!showBuiltinRoles && _roleGraph.isBuiltinRole(it.get())) {
continue;
@@ -476,7 +476,7 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
} // namespace
Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
+ stdx::lock_guard<Latch> lkInitialzeRoleGraph(_roleGraphMutex);
_roleGraphState = roleGraphStateInitial;
_roleGraph = RoleGraph();
@@ -562,7 +562,7 @@ private:
void _refreshRoleGraph() {
- stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_externalState->_roleGraphMutex);
Status status = _externalState->_roleGraph.handleLogOp(
_opCtx, _op.c_str(), _nss, _o, _o2 ? &*_o2 : NULL);
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 5ddb737b4f5..7ed27ab6cca 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -36,8 +36,8 @@
#include "mongo/db/auth/role_graph.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user_name.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -161,7 +161,7 @@ private:
/**
* Guards _roleGraphState and _roleGraph.
*/
- stdx::mutex _roleGraphMutex;
+ Mutex _roleGraphMutex = MONGO_MAKE_LATCH("AuthzManagerExternalStateLocal::_roleGraphMutex");
};
} // namespace mongo
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 4132fcbcf1e..b483b8e4d46 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -43,9 +43,9 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/duration.h"
@@ -62,7 +62,7 @@ public:
void setInterval(Seconds interval) {
{
- stdx::lock_guard<stdx::mutex> twiddle(_mutex);
+ stdx::lock_guard<Latch> twiddle(_mutex);
MONGO_LOG(5) << "setInterval: old=" << _interval << ", new=" << interval;
_interval = interval;
}
@@ -70,12 +70,12 @@ public:
}
void start() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_last = Date_t::now();
}
void abort() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_inShutdown = true;
_condition.notify_all();
}
@@ -86,7 +86,7 @@ public:
* interval has elapsed.
*/
bool wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (true) {
if (_inShutdown) {
return false;
@@ -110,7 +110,7 @@ public:
private:
Seconds _interval;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ThreadSleepInterval::_mutex");
stdx::condition_variable _condition;
bool _inShutdown = false;
Date_t _last;
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index 0f7b9cd7eea..14da56c02ec 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -35,8 +35,8 @@
#include <string>
#include "mongo/db/operation_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/map_util.h"
@@ -56,13 +56,13 @@ public:
void recordBegin();
int recordEnd();
- void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
+ void awaitNoBgOps(stdx::unique_lock<Latch>& lk);
int getOpsInProgCount() const {
return _opsInProgCount;
}
- void waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
private:
int _opsInProgCount;
@@ -75,7 +75,7 @@ typedef StringMap<std::shared_ptr<BgInfo>> BgInfoMap;
typedef BgInfoMap::const_iterator BgInfoMapIterator;
// Static data for this file is never destroyed.
-stdx::mutex& m = *(new stdx::mutex());
+Mutex& m = *(new Mutex());
BgInfoMap& dbsInProg = *(new BgInfoMap());
BgInfoMap& nsInProg = *(new BgInfoMap());
@@ -94,12 +94,12 @@ int BgInfo::recordEnd() {
return _opsInProgCount;
}
-void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
+void BgInfo::awaitNoBgOps(stdx::unique_lock<Latch>& lk) {
while (_opsInProgCount > 0)
_noOpsInProg.wait(lk);
}
-void BgInfo::waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx) {
+void BgInfo::waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx) {
int startOpRemovalsCount = _opRemovalsCount;
// Wait for an index build to finish.
@@ -122,7 +122,7 @@ void recordEndAndRemove(BgInfoMap& bgiMap, StringData key) {
}
}
-void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringData key) {
+void awaitNoBgOps(stdx::unique_lock<Latch>& lk, BgInfoMap* bgiMap, StringData key) {
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(*bgiMap, key, std::shared_ptr<BgInfo>());
if (!bgInfo)
return;
@@ -132,7 +132,7 @@ void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringD
} // namespace
void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx, StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(nsInProg, ns, std::shared_ptr<BgInfo>());
if (!bgInfo) {
// There are no index builds in progress on the collection, so no need to wait.
@@ -142,12 +142,12 @@ void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx,
}
bool BackgroundOperation::inProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return dbsInProg.find(db) != dbsInProg.end();
}
int BackgroundOperation::numInProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(dbsInProg, db, std::shared_ptr<BgInfo>());
if (!bgInfo)
return 0;
@@ -155,7 +155,7 @@ int BackgroundOperation::numInProgForDb(StringData db) {
}
bool BackgroundOperation::inProgForNs(StringData ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return nsInProg.find(ns) != nsInProg.end();
}
@@ -189,29 +189,29 @@ void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
}
void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &dbsInProg, db);
}
void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &nsInProg, ns);
}
BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordBeginAndInsert(dbsInProg, _ns.db());
recordBeginAndInsert(nsInProg, _ns.ns());
}
BackgroundOperation::~BackgroundOperation() {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordEndAndRemove(dbsInProg, _ns.db());
recordEndAndRemove(nsInProg, _ns.ns());
}
void BackgroundOperation::dump(std::ostream& ss) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
if (nsInProg.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for (BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i)
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index f648c3e13ed..937a8a61664 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/baton.h"
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -81,7 +81,7 @@ public:
}
_baton->schedule([this, anchor = shared_from_this()](Status status) {
- _runJobs(stdx::unique_lock(_mutex), status);
+ _runJobs(stdx::unique_lock<Latch>(_mutex), status);
});
}
@@ -114,14 +114,14 @@ public:
}
void detachImpl() noexcept override {
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isDead = true;
_runJobs(std::move(lk), kDetached);
}
private:
- void _runJobs(stdx::unique_lock<stdx::mutex> lk, Status status) {
+ void _runJobs(stdx::unique_lock<Latch> lk, Status status) {
if (status.isOK() && _isDead) {
status = kDetached;
}
@@ -140,7 +140,7 @@ private:
BatonHandle _baton;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SubBaton::_mutex");
bool _isDead = false;
std::vector<Task> _scheduled;
};
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 685632a2f8d..2ed6ac61a8a 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -46,13 +46,13 @@ std::string CompactOptions::toString() const {
//
void CappedInsertNotifier::notifyAll() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
++_version;
_notifier.notify_all();
}
void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_dead && prevVersion == _version) {
if (stdx::cv_status::timeout == _notifier.wait_until(lk, deadline.toSystemTimePoint())) {
return;
@@ -61,13 +61,13 @@ void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) cons
}
void CappedInsertNotifier::kill() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dead = true;
_notifier.notify_all();
}
bool CappedInsertNotifier::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _dead;
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 579da31fb5a..861bb372aa3 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -51,9 +51,9 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/snapshot.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
class CappedCallback;
@@ -136,7 +136,7 @@ private:
mutable stdx::condition_variable _notifier;
// Mutex used with '_notifier'. Protects access to '_version'.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CappedInsertNotifier::_mutex");
// A counter, incremented on insertion of new data into the capped collection.
//
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index a080463f486..ba77219fe29 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -73,7 +73,7 @@ CollectionCatalog::iterator::iterator(StringData dbName,
: _dbName(dbName), _genNum(genNum), _catalog(&catalog) {
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_mapIter = _catalog->_orderedCollections.lower_bound(std::make_pair(_dbName, minUuid));
if (_mapIter != _catalog->_orderedCollections.end() && _mapIter->first.first == _dbName) {
@@ -86,7 +86,7 @@ CollectionCatalog::iterator::iterator(
: _mapIter(mapIter) {}
const CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_repositionIfNeeded();
if (_exhausted()) {
return _nullCollection;
@@ -100,7 +100,7 @@ boost::optional<CollectionUUID> CollectionCatalog::iterator::uuid() {
}
CollectionCatalog::iterator CollectionCatalog::iterator::operator++() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (!_repositionIfNeeded()) {
_mapIter++; // If the position was not updated, increment iterator to next element.
@@ -125,7 +125,7 @@ CollectionCatalog::iterator CollectionCatalog::iterator::operator++(int) {
}
bool CollectionCatalog::iterator::operator==(const iterator& other) {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (other._mapIter == _catalog->_orderedCollections.end()) {
return _uuid == boost::none;
@@ -183,7 +183,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
// manager locks) are held. The purpose of this function is ensure that we write to the
// Collection's namespace string under '_catalogLock'.
invariant(coll);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(toCollection);
@@ -197,7 +197,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
addResource(newRid, toCollection.ns());
opCtx->recoveryUnit()->onRollback([this, coll, fromCollection, toCollection] {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(std::move(fromCollection));
_collections[fromCollection] = _collections[toCollection];
@@ -219,7 +219,7 @@ void CollectionCatalog::onCloseDatabase(OperationContext* opCtx, std::string dbN
void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(!_shadowCatalog);
_shadowCatalog.emplace();
for (auto& entry : _catalog)
@@ -228,13 +228,13 @@ void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
void CollectionCatalog::onOpenCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_shadowCatalog);
_shadowCatalog.reset();
}
Collection* CollectionCatalog::lookupCollectionByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
return _lookupCollectionByUUID(lock, uuid);
}
@@ -244,13 +244,13 @@ Collection* CollectionCatalog::_lookupCollectionByUUID(WithLock, CollectionUUID
}
Collection* CollectionCatalog::lookupCollectionByNamespace(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto it = _collections.find(nss);
return it == _collections.end() ? nullptr : it->second;
}
boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto foundIt = _catalog.find(uuid);
if (foundIt != _catalog.end()) {
NamespaceString ns = foundIt->second->ns();
@@ -271,7 +271,7 @@ boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUU
boost::optional<CollectionUUID> CollectionCatalog::lookupUUIDByNSS(
const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(nss.db().toString(), minUuid));
@@ -290,7 +290,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
CollectionInfoFn predicate) const {
invariant(predicate);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto collection = _lookupCollectionByUUID(lock, uuid);
if (!collection) {
@@ -302,7 +302,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
std::vector<CollectionUUID> CollectionCatalog::getAllCollectionUUIDsFromDb(
StringData dbName) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(dbName.toString(), minUuid));
@@ -318,7 +318,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
OperationContext* opCtx, StringData dbName) const {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_S));
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
std::vector<NamespaceString> ret;
@@ -332,7 +332,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
std::vector<std::string> CollectionCatalog::getAllDbNames() const {
std::vector<std::string> ret;
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto maxUuid = UUID::parse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").getValue();
auto iter = _orderedCollections.upper_bound(std::make_pair("", maxUuid));
while (iter != _orderedCollections.end()) {
@@ -344,7 +344,7 @@ std::vector<std::string> CollectionCatalog::getAllDbNames() const {
}
void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<Collection> coll) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(1) << "Registering collection " << coll->ns() << " with UUID " << uuid;
@@ -369,7 +369,7 @@ void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<
}
std::unique_ptr<Collection> CollectionCatalog::deregisterCollection(CollectionUUID uuid) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_catalog.find(uuid) != _catalog.end());
@@ -404,7 +404,7 @@ RecoveryUnit::Change* CollectionCatalog::makeFinishDropCollectionChange(
}
void CollectionCatalog::deregisterAllCollections() {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(0) << "Deregistering all the collections";
for (auto& entry : _catalog) {
@@ -422,7 +422,7 @@ void CollectionCatalog::deregisterAllCollections() {
_orderedCollections.clear();
_catalog.clear();
- stdx::lock_guard<stdx::mutex> resourceLock(_resourceLock);
+ stdx::lock_guard<Latch> resourceLock(_resourceLock);
_resourceInformation.clear();
_generationNumber++;
@@ -438,7 +438,7 @@ CollectionCatalog::iterator CollectionCatalog::end() const {
boost::optional<std::string> CollectionCatalog::lookupResourceName(const ResourceId& rid) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -458,7 +458,7 @@ boost::optional<std::string> CollectionCatalog::lookupResourceName(const Resourc
void CollectionCatalog::removeResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -476,7 +476,7 @@ void CollectionCatalog::removeResource(const ResourceId& rid, const std::string&
void CollectionCatalog::addResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index d42a94133e5..8ad0128e744 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -240,8 +240,8 @@ private:
Collection* _lookupCollectionByUUID(WithLock, CollectionUUID uuid) const;
const std::vector<CollectionUUID>& _getOrdering_inlock(const StringData& db,
- const stdx::lock_guard<stdx::mutex>&);
- mutable mongo::stdx::mutex _catalogLock;
+ const stdx::lock_guard<Latch>&);
+ mutable mongo::Mutex _catalogLock;
/**
* When present, indicates that the catalog is in closed state, and contains a map from UUID
@@ -265,7 +265,7 @@ private:
uint64_t _generationNumber;
// Protects _resourceInformation.
- mutable stdx::mutex _resourceLock;
+ mutable Mutex _resourceLock = MONGO_MAKE_LATCH("CollectionCatalog::_resourceLock");
// Mapping from ResourceId to a set of strings that contains collection and database namespaces.
std::map<ResourceId, std::set<std::string>> _resourceInformation;
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index ab263abaa42..87c3b384a9d 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -255,7 +255,7 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
}
bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -272,7 +272,7 @@ bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::strin
bool IndexBuildsManager::interruptIndexBuild(OperationContext* opCtx,
const UUID& buildUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -307,14 +307,14 @@ void IndexBuildsManager::verifyNoIndexBuilds_forTestOnly() {
}
void IndexBuildsManager::_registerIndexBuild(UUID buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::shared_ptr<MultiIndexBlock> mib = std::make_shared<MultiIndexBlock>();
invariant(_builders.insert(std::make_pair(buildUUID, mib)).second);
}
void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
@@ -322,7 +322,7 @@ void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
}
std::shared_ptr<MultiIndexBlock> IndexBuildsManager::_getBuilder(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
return builderIt->second;
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index d72915f5d19..c599db20ce8 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -35,8 +35,8 @@
#include "mongo/db/catalog/multi_index_block.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -205,7 +205,7 @@ private:
std::shared_ptr<MultiIndexBlock> _getBuilder(const UUID& buildUUID);
// Protects the map data structures below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsManager::_mutex");
// Map of index builders by build UUID. Allows access to the builders so that actions can be
// taken on and information passed to and from index builds.
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 1b8541dd594..822957dcc61 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -39,8 +39,8 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/debug_util.h"
namespace mongo {
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 234240b0ecb..2c51dbae331 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -73,7 +73,7 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
_isReady = _catalogIsReady(opCtx);
{
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
_isMultikey.store(_catalogIsMultikey(opCtx, &_indexMultikeyPaths));
_indexTracksPathLevelMultikeyInfo = !_indexMultikeyPaths.empty();
}
@@ -167,7 +167,7 @@ bool IndexCatalogEntryImpl::isMultikey(OperationContext* opCtx) const {
}
MultikeyPaths IndexCatalogEntryImpl::getMultikeyPaths(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
auto txnParticipant = TransactionParticipant::get(opCtx);
if (!txnParticipant || !txnParticipant.transactionIsOpen()) {
@@ -207,7 +207,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
}
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
invariant(multikeyPaths.size() == _indexMultikeyPaths.size());
bool newPathIsMultikey = false;
@@ -275,7 +275,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
_isMultikey.store(true);
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
for (size_t i = 0; i < multikeyPaths.size(); ++i) {
_indexMultikeyPaths[i].insert(multikeyPaths[i].begin(), multikeyPaths[i].end());
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index cb697a3d177..9698975bc8b 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -41,7 +41,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -222,7 +222,8 @@ private:
// Controls concurrent access to '_indexMultikeyPaths'. We acquire this mutex rather than the
// RESOURCE_METADATA lock as a performance optimization so that it is cheaper to detect whether
// there is actually any path-level multikey information to update or not.
- mutable stdx::mutex _indexMultikeyPathsMutex;
+ mutable Mutex _indexMultikeyPathsMutex =
+ MONGO_MAKE_LATCH("IndexCatalogEntryImpl::_indexMultikeyPathsMutex");
// Non-empty only if '_indexTracksPathLevelMultikeyInfo' is true.
//
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index d5b13448d31..7a40332c2a7 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -908,18 +908,18 @@ MultiIndexBlock::State MultiIndexBlock::getState_forTest() const {
}
MultiIndexBlock::State MultiIndexBlock::_getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void MultiIndexBlock::_setState(State newState) {
invariant(State::kAborted != newState);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = newState;
}
void MultiIndexBlock::_setStateToAbortedIfNotCommitted(StringData reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (State::kCommitted == _state) {
return;
}
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 7d5279280cd..f4618c2e29e 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -44,8 +44,8 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/record_id.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point_service.h"
namespace mongo {
@@ -343,7 +343,7 @@ private:
bool _constraintsChecked = false;
// Protects member variables of this class declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiIndexBlock::_mutex");
State _state = State::kUninitialized;
std::string _abortReason;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index c449932f653..e6966e30ce3 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -39,7 +39,7 @@
#include <boost/align/aligned_allocator.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/with_alignment.h"
diff --git a/src/mongo/db/collection_index_builds_tracker.cpp b/src/mongo/db/collection_index_builds_tracker.cpp
index 9ef10722691..4726e521a8d 100644
--- a/src/mongo/db/collection_index_builds_tracker.cpp
+++ b/src/mongo/db/collection_index_builds_tracker.cpp
@@ -104,8 +104,7 @@ int CollectionIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _buildStateByBuildUUID.size();
}
-void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(
- stdx::unique_lock<stdx::mutex>& lk) {
+void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] { return _buildStateByBuildUUID.empty(); });
}
diff --git a/src/mongo/db/collection_index_builds_tracker.h b/src/mongo/db/collection_index_builds_tracker.h
index 5a4ab1eb5f9..81c25fe73d3 100644
--- a/src/mongo/db/collection_index_builds_tracker.h
+++ b/src/mongo/db/collection_index_builds_tracker.h
@@ -96,7 +96,7 @@ public:
/**
* Returns when no index builds remain on this collection.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Maps of index build states on the collection, by build UUID and index name.
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 853ba96c51c..7e63a05f7b8 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -50,7 +50,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
#include "mongo/util/net/socket_utils.h"
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 5d9bda46bae..9cf33bd6d1c 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -95,7 +95,7 @@ public:
virtual ~FSyncCommand() {
// The FSyncLockThread is owned by the FSyncCommand and accesses FsyncCommand state. It must
// be shut down prior to FSyncCommand destruction.
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
if (_lockCount > 0) {
_lockCount = 0;
releaseFsyncLockSyncCV.notify_one();
@@ -166,7 +166,7 @@ public:
Status status = Status::OK();
{
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
threadStatus = Status::OK();
threadStarted = false;
_lockThread = stdx::make_unique<FSyncLockThread>(allowFsyncFailure);
@@ -199,13 +199,13 @@ public:
// Returns whether we are currently fsyncLocked. For use by callers not holding lockStateMutex.
bool fsyncLocked() {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
return _fsyncLocked;
}
// For callers not already holding 'lockStateMutex'.
int64_t getLockCount() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
return getLockCount_inLock();
}
@@ -215,17 +215,17 @@ public:
}
void releaseLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
releaseLock_inLock(lk);
}
- void releaseLock_inLock(stdx::unique_lock<stdx::mutex>& lk) {
+ void releaseLock_inLock(stdx::unique_lock<Latch>& lk) {
invariant(_lockCount >= 1);
_lockCount--;
if (_lockCount == 0) {
{
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = false;
}
releaseFsyncLockSyncCV.notify_one();
@@ -237,7 +237,7 @@ public:
// Allows for control of lock state change between the fsyncLock and fsyncUnlock commands and
// the FSyncLockThread that maintains the global read lock.
- stdx::mutex lockStateMutex;
+ Mutex lockStateMutex = MONGO_MAKE_LATCH("FSyncCommand::lockStateMutex");
stdx::condition_variable acquireFsyncLockSyncCV;
stdx::condition_variable releaseFsyncLockSyncCV;
@@ -248,11 +248,11 @@ public:
private:
void acquireLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
_lockCount++;
if (_lockCount == 1) {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = true;
}
}
@@ -263,7 +263,7 @@ private:
// number is decremented to 0. May only be accessed while 'lockStateMutex' is held.
int64_t _lockCount = 0;
- stdx::mutex _fsyncLockedMutex;
+ Mutex _fsyncLockedMutex = MONGO_MAKE_LATCH("FSyncCommand::_fsyncLockedMutex");
bool _fsyncLocked = false;
} fsyncCmd;
@@ -302,7 +302,7 @@ public:
Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
auto lockCount = fsyncCmd.getLockCount_inLock();
if (lockCount == 0) {
@@ -340,7 +340,7 @@ bool FSyncLockThread::_shutdownTaskRegistered = false;
void FSyncLockThread::run() {
ThreadClient tc("fsyncLockWorker", getGlobalServiceContext());
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
- stdx::unique_lock<stdx::mutex> lk(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> lk(fsyncCmd.lockStateMutex);
invariant(fsyncCmd.getLockCount_inLock() == 1);
@@ -357,7 +357,7 @@ void FSyncLockThread::run() {
if (!_shutdownTaskRegistered) {
_shutdownTaskRegistered = true;
registerShutdownTask([&] {
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
if (fsyncCmd.getLockCount_inLock() > 0) {
warning() << "Interrupting fsync because the server is shutting down.";
while (fsyncCmd.getLockCount_inLock()) {
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 9367b4180d6..31e848d80fd 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -65,6 +65,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/parallel.h"
#include "mongo/s/client/shard_connection.h"
@@ -73,7 +74,6 @@
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/stale_exception.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index 6e1366815a3..8e0802b0c15 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -185,7 +185,7 @@ Status setLogComponentVerbosity(const BSONObj& bsonSettings) {
}
// for automationServiceDescription
-stdx::mutex autoServiceDescriptorMutex;
+Mutex autoServiceDescriptorMutex;
std::string autoServiceDescriptorValue;
} // namespace
@@ -436,7 +436,7 @@ Status LogComponentVerbosityServerParameter::setFromString(const std::string& st
void AutomationServiceDescriptorServerParameter::append(OperationContext*,
BSONObjBuilder& builder,
const std::string& name) {
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
if (!autoServiceDescriptorValue.empty()) {
builder.append(name, autoServiceDescriptorValue);
}
@@ -458,7 +458,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
<< " must be no more than " << kMaxSize << " bytes"};
{
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
autoServiceDescriptorValue = str;
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index a155d443692..b217e20043f 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -66,10 +66,10 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/icu.h"
#include "mongo/util/log.h"
@@ -558,7 +558,7 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* opCtx,
return status;
}
-auto getUMCMutex = ServiceContext::declareDecoration<stdx::mutex>();
+auto getUMCMutex = ServiceContext::declareDecoration<Mutex>();
class AuthzLockGuard {
AuthzLockGuard(AuthzLockGuard&) = delete;
@@ -590,7 +590,7 @@ public:
private:
OperationContext* _opCtx;
AuthorizationManager* _authzManager;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
InvalidationMode _mode;
OID _cacheGeneration;
};
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index fd0b37a8514..171163a8b2a 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -55,7 +55,7 @@ MONGO_FAIL_POINT_DEFINE(validateCmdCollectionNotValid);
namespace {
// Protects `_validationQueue`
-stdx::mutex _validationMutex;
+Mutex _validationMutex;
// Wakes up `_validationQueue`
stdx::condition_variable _validationNotifier;
@@ -152,7 +152,7 @@ public:
// Only one validation per collection can be in progress, the rest wait in order.
{
- stdx::unique_lock<stdx::mutex> lock(_validationMutex);
+ stdx::unique_lock<Latch> lock(_validationMutex);
try {
while (_validationsInProgress.find(nss.ns()) != _validationsInProgress.end()) {
opCtx->waitForConditionOrInterrupt(_validationNotifier, lock);
@@ -169,7 +169,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lock(_validationMutex);
+ stdx::lock_guard<Latch> lock(_validationMutex);
_validationsInProgress.erase(nss.ns());
_validationNotifier.notify_all();
});
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 37c841594b0..b1a4d41e555 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -39,8 +39,8 @@
#include "mongo/db/concurrency/flow_control_ticketholder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -76,7 +76,7 @@ public:
}
static std::string nameForId(ResourceId resourceId) {
- stdx::lock_guard<stdx::mutex> lk(resourceIdFactory->labelsMutex);
+ stdx::lock_guard<Latch> lk(resourceIdFactory->labelsMutex);
return resourceIdFactory->labels.at(resourceId.getHashId());
}
@@ -92,7 +92,7 @@ public:
private:
ResourceId _newResourceIdForMutex(std::string resourceLabel) {
- stdx::lock_guard<stdx::mutex> lk(labelsMutex);
+ stdx::lock_guard<Latch> lk(labelsMutex);
invariant(nextId == labels.size());
labels.push_back(std::move(resourceLabel));
@@ -103,7 +103,7 @@ private:
std::uint64_t nextId = 0;
std::vector<std::string> labels;
- stdx::mutex labelsMutex;
+ Mutex labelsMutex = MONGO_MAKE_LATCH("ResourceIdFactory::labelsMutex");
};
ResourceIdFactory* ResourceIdFactory::resourceIdFactory;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index 95c6771badf..a13df7a3ea4 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
#include "mongo/db/storage/recovery_unit_noop.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -67,10 +67,10 @@ protected:
};
BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) {
- static stdx::mutex mtx;
+ static auto mtx = MONGO_MAKE_LATCH();
for (auto keepRunning : state) {
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
}
}
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index 2dbda1013c4..e72c4d16025 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -118,7 +118,7 @@ void DeferredWriter::_worker(InsertStatement stmt) {
return Status::OK();
});
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_numBytes -= stmt.doc.objsize();
@@ -166,7 +166,7 @@ bool DeferredWriter::insertDocument(BSONObj obj) {
// We can't insert documents if we haven't been started up.
invariant(_pool);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Check if we're allowed to insert this object.
if (_numBytes + obj.objsize() >= _maxNumBytes) {
diff --git a/src/mongo/db/concurrency/deferred_writer.h b/src/mongo/db/concurrency/deferred_writer.h
index d573f497851..0ac8238fa8d 100644
--- a/src/mongo/db/concurrency/deferred_writer.h
+++ b/src/mongo/db/concurrency/deferred_writer.h
@@ -32,7 +32,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -158,7 +158,7 @@ private:
/**
* Guards all non-const, non-thread-safe members.
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DeferredWriter::_mutex");
/**
* The number of bytes currently in the in-memory buffer.
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.cpp b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
index 8055a7597bd..6bb95797502 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.cpp
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
@@ -80,7 +80,7 @@ void FlowControlTicketholder::set(ServiceContext* service,
void FlowControlTicketholder::refreshTo(int numTickets) {
invariant(numTickets >= 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LOG(4) << "Refreshing tickets. Before: " << _tickets << " Now: " << numTickets;
_tickets = numTickets;
_cv.notify_all();
@@ -88,7 +88,7 @@ void FlowControlTicketholder::refreshTo(int numTickets) {
void FlowControlTicketholder::getTicket(OperationContext* opCtx,
FlowControlTicketholder::CurOp* stats) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -135,7 +135,7 @@ void FlowControlTicketholder::getTicket(OperationContext* opCtx,
// Should only be called once, during shutdown.
void FlowControlTicketholder::setInShutdown() {
LOG(0) << "Stopping further Flow Control ticket acquisitions.";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
}
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.h b/src/mongo/db/concurrency/flow_control_ticketholder.h
index 599779ddd15..8247318a842 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.h
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.h
@@ -31,8 +31,8 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _totalTimeAcquiringMicros;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FlowControlTicketHolder::_mutex");
stdx::condition_variable _cv;
int _tickets;
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index 50b2116d953..bbeafb1329e 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -40,8 +40,8 @@
#include "mongo/db/concurrency/lock_request_list.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/mutex.h"
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 279469eebaf..8252ee0781e 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -219,7 +219,7 @@ void CondVarLockGrantNotification::clear() {
}
LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _cond.wait_for(
lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; })
? _result
@@ -228,7 +228,7 @@ LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseconds timeout) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opCtx->waitForConditionOrInterruptFor(
_cond, lock, timeout, [this] { return _result != LOCK_INVALID; })) {
// Because waitForConditionOrInterruptFor evaluates the predicate before checking for
@@ -242,7 +242,7 @@ LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseco
}
void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_result == LOCK_INVALID);
_result = result;
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 7a592d1ac11..386ea3aa196 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -75,7 +75,7 @@ private:
virtual void notify(ResourceId resId, LockResult result);
// These two go together to implement the conditional variable pattern.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CondVarLockGrantNotification::_mutex");
stdx::condition_variable _cond;
// Result from the last call to notify
diff --git a/src/mongo/db/database_index_builds_tracker.cpp b/src/mongo/db/database_index_builds_tracker.cpp
index 6e31c8ffeff..25fa506ef72 100644
--- a/src/mongo/db/database_index_builds_tracker.cpp
+++ b/src/mongo/db/database_index_builds_tracker.cpp
@@ -71,7 +71,7 @@ int DatabaseIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _allIndexBuilds.size();
}
-void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk) {
+void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] { return _allIndexBuilds.empty(); });
}
diff --git a/src/mongo/db/database_index_builds_tracker.h b/src/mongo/db/database_index_builds_tracker.h
index b91ab826527..8b2eb4ea474 100644
--- a/src/mongo/db/database_index_builds_tracker.h
+++ b/src/mongo/db/database_index_builds_tracker.h
@@ -87,7 +87,7 @@ public:
/**
* Returns when no index builds remain on this database.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Map of index build states on the database, by build UUID.
diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp
index 8ae455226cf..cd9332cb92e 100644
--- a/src/mongo/db/default_baton.cpp
+++ b/src/mongo/db/default_baton.cpp
@@ -61,7 +61,7 @@ void DefaultBaton::detachImpl() noexcept {
decltype(_scheduled) scheduled;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_opCtx->getBaton().get() == this);
_opCtx->setBaton(nullptr);
@@ -79,7 +79,7 @@ void DefaultBaton::detachImpl() noexcept {
}
void DefaultBaton::schedule(Task func) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
lk.unlock();
@@ -97,14 +97,14 @@ void DefaultBaton::schedule(Task func) noexcept {
}
void DefaultBaton::notify() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_notified = true;
_cv.notify_one();
}
Waitable::TimeoutState DefaultBaton::run_until(ClockSource* clkSource,
Date_t oldDeadline) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
const auto guard = makeGuard([&] {
diff --git a/src/mongo/db/default_baton.h b/src/mongo/db/default_baton.h
index 063b12edd07..74fd724fae1 100644
--- a/src/mongo/db/default_baton.h
+++ b/src/mongo/db/default_baton.h
@@ -32,8 +32,8 @@
#include <vector>
#include "mongo/db/baton.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/functional.h"
namespace mongo {
@@ -62,7 +62,7 @@ public:
private:
void detachImpl() noexcept override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DefaultBaton::_mutex");
stdx::condition_variable _cv;
bool _notified = false;
bool _sleeping = false;
diff --git a/src/mongo/db/free_mon/free_mon_controller.cpp b/src/mongo/db/free_mon/free_mon_controller.cpp
index 057ae5ecd7a..a9de7ca4c49 100644
--- a/src/mongo/db/free_mon/free_mon_controller.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller.cpp
@@ -61,7 +61,7 @@ FreeMonNetworkInterface::~FreeMonNetworkInterface() = default;
void FreeMonController::addRegistrationCollector(
std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_registrationCollectors.add(std::move(collector));
@@ -70,7 +70,7 @@ void FreeMonController::addRegistrationCollector(
void FreeMonController::addMetricsCollector(std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_metricCollectors.add(std::move(collector));
@@ -128,7 +128,7 @@ void FreeMonController::notifyOnRollback() {
void FreeMonController::_enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -139,7 +139,7 @@ void FreeMonController::start(RegistrationType registrationType,
std::vector<std::string>& tags,
Seconds gatherMetricsInterval) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
}
@@ -154,7 +154,7 @@ void FreeMonController::start(RegistrationType registrationType,
_thread = stdx::thread([this] { _processor->run(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -170,7 +170,7 @@ void FreeMonController::stop() {
log() << "Shutting down free monitoring";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -194,7 +194,7 @@ void FreeMonController::stop() {
void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -205,7 +205,7 @@ void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
@@ -218,7 +218,7 @@ void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* statu
void FreeMonController::getServerStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 9307ab7570c..5c74a8a4b5f 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -191,7 +191,7 @@ private:
State _state{State::kNotStarted};
// Mutext to protect internal state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonController::_mutex");
// Set of registration collectors
FreeMonCollectorCollection _registrationCollectors;
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 99b2e4c36de..3b671376158 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -86,7 +86,7 @@ public:
builder.append("mock", "some data");
{
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
++_counter;
@@ -105,12 +105,12 @@ public:
}
std::uint32_t count() {
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
return _counter;
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -130,7 +130,7 @@ private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMetricsCollectorMock::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -158,7 +158,7 @@ public:
* Set the count of events to wait for.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_EQ(_count, 0UL);
ASSERT_GT(count, 0UL);
@@ -170,7 +170,7 @@ public:
* Set the payload and signal waiter.
*/
void set(T payload) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -187,7 +187,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<T> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(
lock, duration.toSystemDuration(), [this]() { return _count == 0; })) {
@@ -202,7 +202,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CountdownLatchResult::_mutex");
// Count to wait fore
uint32_t _count;
@@ -309,7 +309,7 @@ public:
auto cdr = req.getMetrics();
{
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
auto metrics = decompressMetrics(cdr);
_lastMetrics = metrics;
_countdownMetrics.set(metrics);
@@ -354,7 +354,7 @@ public:
}
BSONArray getLastMetrics() {
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
return _lastMetrics;
}
@@ -365,7 +365,7 @@ private:
executor::ThreadPoolTaskExecutor* _threadPool;
- stdx::mutex _metricsLock;
+ Mutex _metricsLock = MONGO_MAKE_LATCH("FreeMonNetworkInterfaceMock::_metricsLock");
BSONArray _lastMetrics;
Options _options;
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 71a34dd84b4..9b7fc83a05b 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -33,8 +33,8 @@
#include <vector>
#include "mongo/db/free_mon/free_mon_protocol_gen.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -292,7 +292,7 @@ public:
* Set Status and signal waiter.
*/
void set(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_set);
if (!_set) {
@@ -308,7 +308,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<Status> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(lock, duration.toSystemDuration(), [this]() { return _set; })) {
return {};
@@ -322,7 +322,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitableResult::_mutex");
// Indicates whether _status has been set
bool _set{false};
diff --git a/src/mongo/db/free_mon/free_mon_processor.h b/src/mongo/db/free_mon/free_mon_processor.h
index 11584595147..ab519bfb84d 100644
--- a/src/mongo/db/free_mon/free_mon_processor.h
+++ b/src/mongo/db/free_mon/free_mon_processor.h
@@ -235,7 +235,7 @@ public:
* Reset countdown latch wait for N events.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
dassert(_count == 0);
dassert(count > 0);
_count = count;
@@ -245,7 +245,7 @@ public:
* Count down an event.
*/
void countDown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -259,13 +259,13 @@ public:
* Wait until the N events specified in reset have occured.
*/
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.wait(lock, [&] { return _count == 0; });
}
private:
// mutex to break count and cond var
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonCountdownLatch::_mutex");
// cond var to signal and wait on
stdx::condition_variable _condvar;
diff --git a/src/mongo/db/free_mon/free_mon_queue.cpp b/src/mongo/db/free_mon/free_mon_queue.cpp
index d2cc1115ef5..56b01eade93 100644
--- a/src/mongo/db/free_mon/free_mon_queue.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue.cpp
@@ -74,7 +74,7 @@ FreeMonMessage::~FreeMonMessage() {}
void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If we were stopped, drop messages
if (_stop) {
@@ -98,7 +98,7 @@ void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
ClockSource* clockSource) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stop) {
return {};
}
@@ -188,7 +188,7 @@ boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
void FreeMonMessageQueue::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// We can be stopped twice in some situations:
// 1. Stop on unexpected error
@@ -204,7 +204,7 @@ void FreeMonMessageQueue::turnCrankForTest(size_t countMessagesToIgnore) {
invariant(_useCrank);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_waitable = std::make_unique<WaitableResult>();
diff --git a/src/mongo/db/free_mon/free_mon_queue.h b/src/mongo/db/free_mon/free_mon_queue.h
index 18be1b7a330..6e7bb85dcbf 100644
--- a/src/mongo/db/free_mon/free_mon_queue.h
+++ b/src/mongo/db/free_mon/free_mon_queue.h
@@ -133,7 +133,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMessageQueue::_mutex");
// Indicates whether queue has been stopped.
bool _stop{false};
diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp
index 0bb2e44fa21..7aaf071bfa8 100644
--- a/src/mongo/db/ftdc/controller.cpp
+++ b/src/mongo/db/ftdc/controller.cpp
@@ -37,9 +37,9 @@
#include "mongo/db/ftdc/collector.h"
#include "mongo/db/ftdc/util.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/exit.h"
@@ -49,7 +49,7 @@
namespace mongo {
Status FTDCController::setEnabled(bool enabled) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_path.empty()) {
return Status(ErrorCodes::FTDCPathNotSet,
@@ -64,37 +64,37 @@ Status FTDCController::setEnabled(bool enabled) {
}
void FTDCController::setPeriod(Milliseconds millis) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.period = millis;
_condvar.notify_one();
}
void FTDCController::setMaxDirectorySizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxDirectorySizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxFileSizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxFileSizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerArchiveMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerArchiveMetricChunk = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerInterimMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerInterimMetricChunk = size;
_condvar.notify_one();
}
Status FTDCController::setDirectory(const boost::filesystem::path& path) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_path.empty()) {
return Status(ErrorCodes::FTDCPathAlreadySet,
@@ -112,7 +112,7 @@ Status FTDCController::setDirectory(const boost::filesystem::path& path) {
void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_periodicCollectors.add(std::move(collector));
@@ -121,7 +121,7 @@ void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface
void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_rotateCollectors.add(std::move(collector));
@@ -130,7 +130,7 @@ void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface
BSONObj FTDCController::getMostRecentPeriodicDocument() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _mostRecentPeriodicDocument.getOwned();
}
}
@@ -143,7 +143,7 @@ void FTDCController::start() {
_thread = stdx::thread([this] { doLoop(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -154,7 +154,7 @@ void FTDCController::stop() {
log() << "Shutting down full-time diagnostic data capture";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -188,7 +188,7 @@ void FTDCController::doLoop() {
try {
// Update config
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_config = _configTemp;
}
@@ -205,7 +205,7 @@ void FTDCController::doLoop() {
// Wait for the next run or signal to shutdown
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// We ignore spurious wakeups by just doing an iteration of the loop
@@ -251,7 +251,7 @@ void FTDCController::doLoop() {
// Store a reference to the most recent document from the periodic collectors
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_mostRecentPeriodicDocument = std::get<0>(collectSample);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 5d1f2f5487a..50d6ae48886 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -37,8 +37,8 @@
#include "mongo/db/ftdc/config.h"
#include "mongo/db/ftdc/file_manager.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -187,7 +187,7 @@ private:
boost::filesystem::path _path;
// Mutex to protect the condvar, configuration changes, and most recent periodic document.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCController::_mutex");
stdx::condition_variable _condvar;
// Config settings that are used by controller, file manager, and all other classes.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 4f67923730c..7dfc7caefea 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -107,7 +107,7 @@ public:
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -133,7 +133,7 @@ private:
std::vector<BSONObj> _docs;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCMetricsCollectorMockTee::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index ace415aa6dd..a965feb4c60 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -377,7 +377,7 @@ bool IndexBuildInterceptor::areAllWritesApplied(OperationContext* opCtx) const {
}
boost::optional<MultikeyPaths> IndexBuildInterceptor::getMultikeyPaths() const {
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
return _multikeyPaths;
}
@@ -403,7 +403,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// SERVER-39705: It's worth noting that a document may not generate any keys, but be
// described as being multikey. This step must be done to maintain parity with `validate`s
// expectations.
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
if (_multikeyPaths) {
MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.get(), multikeyPaths);
} else {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index d280d4e4234..8f8d54bd737 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -167,7 +167,8 @@ private:
// shared resource.
std::shared_ptr<AtomicWord<long long>> _sideWritesCounter;
- mutable stdx::mutex _multikeyPathMutex;
+ mutable Mutex _multikeyPathMutex =
+ MONGO_MAKE_LATCH("IndexBuildInterceptor::_multikeyPathMutex");
boost::optional<MultikeyPaths> _multikeyPaths;
};
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 1ea32ad3ff6..91e1b637950 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -221,7 +221,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
/*commitQuorum=*/boost::none);
Status status = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _registerIndexBuild(lk, replIndexBuildState);
}();
if (!status.isOK()) {
@@ -257,7 +257,7 @@ Future<void> IndexBuildsCoordinator::joinIndexBuilds(const NamespaceString& nss,
}
void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// All index builds should have been signaled to stop via the ServiceContext.
@@ -272,7 +272,7 @@ void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the collection.
auto it = _disallowedCollections.find(collectionUUID);
@@ -292,7 +292,7 @@ void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUU
}
void IndexBuildsCoordinator::abortDatabaseIndexBuilds(StringData db, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the database.
auto it = _disallowedDbs.find(db);
@@ -333,7 +333,7 @@ void IndexBuildsCoordinator::recoverIndexBuilds() {
}
int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -343,7 +343,7 @@ int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
}
void IndexBuildsCoordinator::dump(std::ostream& ss) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_collectionIndexBuilds.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
@@ -360,17 +360,17 @@ void IndexBuildsCoordinator::dump(std::ostream& ss) const {
}
bool IndexBuildsCoordinator::inProgForCollection(const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _collectionIndexBuilds.find(collectionUUID) != _collectionIndexBuilds.end();
}
bool IndexBuildsCoordinator::inProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _databaseIndexBuilds.find(db) != _databaseIndexBuilds.end();
}
void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
<< _allIndexBuilds.size() << " index builds running.",
@@ -394,7 +394,7 @@ void IndexBuildsCoordinator::assertNoBgOpInProgForDb(StringData db) const {
void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collIndexBuildsIt = _collectionIndexBuilds.find(collectionUUID);
if (collIndexBuildsIt == _collectionIndexBuilds.end()) {
@@ -408,7 +408,7 @@ void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
}
void IndexBuildsCoordinator::awaitNoBgOpInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -426,7 +426,7 @@ void IndexBuildsCoordinator::onReplicaSetReconfig() {
}
void IndexBuildsCoordinator::sleepIndexBuilds_forTestOnly(bool sleep) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sleepForTest = sleep;
}
@@ -578,7 +578,7 @@ IndexBuildsCoordinator::_registerAndSetUpIndexBuild(
// Lock from when we ascertain what indexes to build through to when the build is registered
// on the Coordinator and persistedly set up in the catalog. This serializes setting up an
// index build so that no attempts are made to register the same build twice.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::vector<BSONObj> filteredSpecs;
try {
@@ -693,7 +693,7 @@ IndexBuildsCoordinator::_registerAndSetUpIndexBuild(
void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
const UUID& buildUUID) noexcept {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_sleepForTest) {
lk.unlock();
sleepmillis(100);
@@ -702,7 +702,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
}
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -720,7 +720,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
// Ensure the index build is unregistered from the Coordinator and the Promise is set with
// the build's result so that callers are notified of the outcome.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
@@ -961,7 +961,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(opCtx->lockState()->isW());
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -1023,7 +1023,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(indexCatalogStats.numIndexesBefore == indexCatalogStats.numIndexesAfter);
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
}
@@ -1034,7 +1034,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
}
void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
if (it != _disallowedDbs.end()) {
@@ -1045,7 +1045,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
if (it != _disallowedCollections.end()) {
@@ -1056,7 +1056,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collection
}
void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
invariant(it != _disallowedDbs.end());
@@ -1067,7 +1067,7 @@ void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_allowIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
invariant(it != _disallowedCollections.end());
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index c16aa0b4206..46adaa5b57e 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -43,8 +43,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl_index_build_state.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/future.h"
@@ -418,7 +418,7 @@ protected:
const UUID& buildUUID) noexcept;
// Protects the below state.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsCoordinator::_mutex");
// New index builds are not allowed on a collection or database if the collection or database is
// in either of these maps. These are used when concurrent operations need to abort index builds
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index d774636ab9b..208b8b6aa17 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -107,7 +107,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
}
auto replState = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -167,7 +167,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
](auto status) noexcept {
// Clean up the index build if we failed to schedule it.
if (!status.isOK()) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Unregister the index build before setting the promises,
// so callers do not see the build again.
@@ -217,17 +217,17 @@ Status IndexBuildsCoordinatorMongod::commitIndexBuild(OperationContext* opCtx,
}
void IndexBuildsCoordinatorMongod::signalChangeToPrimaryMode() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_replMode = ReplState::Primary;
}
void IndexBuildsCoordinatorMongod::signalChangeToSecondaryMode() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_replMode = ReplState::Secondary;
}
void IndexBuildsCoordinatorMongod::signalChangeToInitialSyncMode() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_replMode = ReplState::InitialSync;
}
@@ -257,7 +257,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
UUID collectionUUID = *collection->uuid();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collectionIt = _collectionIndexBuilds.find(collectionUUID);
if (collectionIt == _collectionIndexBuilds.end()) {
return Status(ErrorCodes::IndexNotFound,
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index c97697aea41..0e57d6b091a 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -47,7 +47,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
decltype(_cache)::size_type originalSize = 0;
{
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.crbegin();
if (iter != _cache.crend()) {
newerThanThis = iter->second.getExpiresAt();
@@ -73,7 +73,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
auto& newKeys = refreshStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (originalSize > _cache.size()) {
// _cache cleared while we getting the new keys, just return the newest key without
// touching the _cache so the next refresh will populate it properly.
@@ -96,7 +96,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long keyId,
const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
for (auto iter = _cache.lower_bound(forThisTime); iter != _cache.cend(); ++iter) {
if (iter->second.getKeyId() == keyId) {
@@ -111,7 +111,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.upper_bound(forThisTime);
@@ -126,7 +126,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime
void KeysCollectionCache::resetCache() {
// keys that read with non majority readConcern level can be rolled back.
if (!_client->supportsMajorityReads()) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
_cache.clear();
}
}
diff --git a/src/mongo/db/keys_collection_cache.h b/src/mongo/db/keys_collection_cache.h
index 28d72892277..61989d6ae5b 100644
--- a/src/mongo/db/keys_collection_cache.h
+++ b/src/mongo/db/keys_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/base/status_with.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -68,7 +68,7 @@ private:
const std::string _purpose;
KeysCollectionClient* const _client;
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("KeysCollectionCache::_cacheMutex");
std::map<LogicalTime, KeysCollectionDocument> _cache; // expiresAt -> KeysDocument
};
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index df82ebe71a0..92834348382 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -192,7 +192,7 @@ void KeysCollectionManager::clearCache() {
void KeysCollectionManager::PeriodicRunner::refreshNow(OperationContext* opCtx) {
auto refreshRequest = [this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
uasserted(ErrorCodes::ShutdownInProgress,
@@ -226,7 +226,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
unsigned errorCount = 0;
std::shared_ptr<RefreshFunc> doRefresh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
@@ -249,7 +249,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
auto currentTime = LogicalClock::get(service)->getClusterTime();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_hasSeenKeys = true;
}
@@ -272,7 +272,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
if (!hasRefreshRequestInitially) {
@@ -300,7 +300,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
_refreshRequest->set();
_refreshRequest.reset();
@@ -308,7 +308,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
void KeysCollectionManager::PeriodicRunner::setFunc(RefreshFunc newRefreshStrategy) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_doRefresh = std::make_shared<RefreshFunc>(std::move(newRefreshStrategy));
_refreshNeededCV.notify_all();
}
@@ -321,7 +321,7 @@ void KeysCollectionManager::PeriodicRunner::switchFunc(OperationContext* opCtx,
void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
const std::string& threadName,
Milliseconds refreshInterval) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_backgroundThread.joinable());
invariant(!_inShutdown);
@@ -332,7 +332,7 @@ void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
void KeysCollectionManager::PeriodicRunner::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_backgroundThread.joinable()) {
return;
}
@@ -346,7 +346,7 @@ void KeysCollectionManager::PeriodicRunner::stop() {
}
bool KeysCollectionManager::PeriodicRunner::hasSeenKeys() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _hasSeenKeys;
}
diff --git a/src/mongo/db/keys_collection_manager.h b/src/mongo/db/keys_collection_manager.h
index 660d998f07f..0016cb6f1d9 100644
--- a/src/mongo/db/keys_collection_manager.h
+++ b/src/mongo/db/keys_collection_manager.h
@@ -36,8 +36,8 @@
#include "mongo/db/keys_collection_cache.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/keys_collection_manager_gen.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/duration.h"
@@ -169,7 +169,8 @@ private:
std::string threadName,
Milliseconds refreshInterval);
- stdx::mutex _mutex; // protects all the member variables below.
+ // protects all the member variables below.
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicRunner::_mutex");
std::shared_ptr<Notification<void>> _refreshRequest;
stdx::condition_variable _refreshNeededCV;
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index 415566094d2..5cc982465d9 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -76,12 +76,12 @@ void LogicalClock::set(ServiceContext* service, std::unique_ptr<LogicalClock> cl
LogicalClock::LogicalClock(ServiceContext* service) : _service(service) {}
LogicalTime LogicalClock::getClusterTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _clusterTime;
}
Status LogicalClock::advanceClusterTime(const LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rateLimitStatus = _passesRateLimiter_inlock(newTime);
if (!rateLimitStatus.isOK()) {
@@ -99,7 +99,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
invariant(nTicks > 0 && nTicks <= kMaxSignedInt);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
LogicalTime clusterTime = _clusterTime;
@@ -142,7 +142,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
}
void LogicalClock::setClusterTimeFromTrustedSource(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Rate limit checks are skipped here so a server with no activity for longer than
// maxAcceptableLogicalClockDriftSecs seconds can still have its cluster time initialized.
@@ -177,12 +177,12 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
}
bool LogicalClock::isEnabled() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isEnabled;
}
void LogicalClock::disable() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_isEnabled = false;
}
diff --git a/src/mongo/db/logical_clock.h b/src/mongo/db/logical_clock.h
index c6cebe983d8..28191be87f6 100644
--- a/src/mongo/db/logical_clock.h
+++ b/src/mongo/db/logical_clock.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class ServiceContext;
@@ -107,7 +107,7 @@ private:
ServiceContext* const _service;
// The mutex protects _clusterTime and _isEnabled.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalClock::_mutex");
LogicalTime _clusterTime;
bool _isEnabled{true};
};
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index 8afd9f6889f..17b136f566a 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -116,7 +116,7 @@ Status LogicalSessionCacheImpl::reapNow(Client* client) {
}
size_t LogicalSessionCacheImpl::size() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _activeSessions.size();
}
@@ -140,7 +140,7 @@ void LogicalSessionCacheImpl::_periodicReap(Client* client) {
Status LogicalSessionCacheImpl::_reap(Client* client) {
// Take the lock to update some stats.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the last set of stats for our new run.
_stats.setLastTransactionReaperJobDurationMillis(0);
@@ -187,7 +187,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
Minutes(gTransactionRecordMinimumLifetimeMinutes));
} catch (const DBException& ex) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
}
@@ -196,7 +196,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
_stats.setLastTransactionReaperJobEntriesCleanedUp(numReaped);
@@ -208,7 +208,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
void LogicalSessionCacheImpl::_refresh(Client* client) {
// Stats for serverStatus:
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the refresh-related stats with the beginning of our run.
_stats.setLastSessionsCollectionJobDurationMillis(0);
@@ -223,7 +223,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// This will finish timing _refresh for our stats no matter when we return.
const auto timeRefreshJob = makeGuard([this] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
});
@@ -255,7 +255,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
{
using std::swap;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
swap(explicitlyEndingSessions, _endingSessions);
swap(activeSessions, _activeSessions);
}
@@ -264,7 +264,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// swapped out of LogicalSessionCache, and merges in any records that had been added since we
// swapped them out.
auto backSwap = [this](auto& member, auto& temp) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
using std::swap;
swap(member, temp);
for (const auto& it : temp) {
@@ -300,7 +300,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->refreshSessions(opCtx, activeSessionRecords));
activeSessionsBackSwapper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesRefreshed(activeSessionRecords.size());
}
@@ -308,7 +308,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->removeRecords(opCtx, explicitlyEndingSessions));
explicitlyEndingBackSwaper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesEnded(explicitlyEndingSessions.size());
}
@@ -321,7 +321,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// Exclude sessions added to _activeSessions from the openCursorSession to avoid race between
// killing cursors on the removed sessions and creating sessions.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& it : _activeSessions) {
auto newSessionIt = openCursorSessions.find(it.first);
@@ -351,18 +351,18 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
SessionKiller::Matcher matcher(std::move(patterns));
auto killRes = _service->killCursorsWithMatchingSessions(opCtx, std::move(matcher));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobCursorsClosed(killRes.second);
}
}
void LogicalSessionCacheImpl::endSessions(const LogicalSessionIdSet& sessions) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_endingSessions.insert(begin(sessions), end(sessions));
}
LogicalSessionCacheStats LogicalSessionCacheImpl::getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setActiveSessionsCount(_activeSessions.size());
return _stats;
}
@@ -380,7 +380,7 @@ Status LogicalSessionCacheImpl::_addToCache(WithLock, LogicalSessionRecord recor
}
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
ret.reserve(_activeSessions.size());
for (const auto& id : _activeSessions) {
@@ -391,7 +391,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
const std::vector<SHA256Block>& userDigests) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
for (const auto& it : _activeSessions) {
if (std::find(userDigests.cbegin(), userDigests.cend(), it.first.getUid()) !=
@@ -404,7 +404,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
boost::optional<LogicalSessionRecord> LogicalSessionCacheImpl::peekCached(
const LogicalSessionId& id) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _activeSessions.find(id);
if (it == _activeSessions.end()) {
return boost::none;
diff --git a/src/mongo/db/logical_session_cache_impl.h b/src/mongo/db/logical_session_cache_impl.h
index dcc827a98ef..c92e45fee4a 100644
--- a/src/mongo/db/logical_session_cache_impl.h
+++ b/src/mongo/db/logical_session_cache_impl.h
@@ -109,7 +109,7 @@ private:
const std::shared_ptr<SessionsCollection> _sessionsColl;
const ReapSessionsOlderThanFn _reapSessionsOlderThanFn;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalSessionCacheImpl::_mutex");
LogicalSessionIdMap<LogicalSessionRecord> _activeSessions;
diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp
index 66136950343..df814663ff6 100644
--- a/src/mongo/db/logical_time_validator.cpp
+++ b/src/mongo/db/logical_time_validator.cpp
@@ -51,7 +51,7 @@ namespace {
const auto getLogicalClockValidator =
ServiceContext::declareDecoration<std::unique_ptr<LogicalTimeValidator>>();
-stdx::mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
+Mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
std::vector<Privilege> advanceClusterTimePrivilege;
@@ -67,7 +67,7 @@ Milliseconds kRefreshIntervalIfErrored(200);
} // unnamed namespace
LogicalTimeValidator* LogicalTimeValidator::get(ServiceContext* service) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
return getLogicalClockValidator(service).get();
}
@@ -77,7 +77,7 @@ LogicalTimeValidator* LogicalTimeValidator::get(OperationContext* ctx) {
void LogicalTimeValidator::set(ServiceContext* service,
std::unique_ptr<LogicalTimeValidator> newValidator) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
auto& validator = getLogicalClockValidator(service);
validator = std::move(newValidator);
}
@@ -91,7 +91,7 @@ SignedLogicalTime LogicalTimeValidator::_getProof(const KeysCollectionDocument&
// Compare and calculate HMAC inside mutex to prevent multiple threads computing HMAC for the
// same cluster time.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: _lastSeenValidTime will initially not have a proof set.
if (newTime == _lastSeenValidTime.getTime() && _lastSeenValidTime.getProof()) {
return _lastSeenValidTime;
@@ -143,7 +143,7 @@ SignedLogicalTime LogicalTimeValidator::signLogicalTime(OperationContext* opCtx,
Status LogicalTimeValidator::validate(OperationContext* opCtx, const SignedLogicalTime& newTime) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (newTime.getTime() <= _lastSeenValidTime.getTime()) {
return Status::OK();
}
@@ -173,7 +173,7 @@ void LogicalTimeValidator::init(ServiceContext* service) {
}
void LogicalTimeValidator::shutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
if (_keyManager) {
_keyManager->stopMonitoring();
}
@@ -198,23 +198,23 @@ bool LogicalTimeValidator::shouldGossipLogicalTime() {
void LogicalTimeValidator::resetKeyManagerCache() {
log() << "Resetting key manager cache";
{
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
invariant(_keyManager);
_keyManager->clearCache();
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
}
void LogicalTimeValidator::stopKeyManager() {
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
if (_keyManager) {
log() << "Stopping key manager";
_keyManager->stopMonitoring();
_keyManager->clearCache();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
} else {
@@ -223,7 +223,7 @@ void LogicalTimeValidator::stopKeyManager() {
}
std::shared_ptr<KeysCollectionManager> LogicalTimeValidator::_getKeyManagerCopy() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
invariant(_keyManager);
return _keyManager;
}
diff --git a/src/mongo/db/logical_time_validator.h b/src/mongo/db/logical_time_validator.h
index b87ff47436e..e639b4435d8 100644
--- a/src/mongo/db/logical_time_validator.h
+++ b/src/mongo/db/logical_time_validator.h
@@ -33,7 +33,7 @@
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -122,8 +122,9 @@ private:
SignedLogicalTime _getProof(const KeysCollectionDocument& keyDoc, LogicalTime newTime);
- stdx::mutex _mutex; // protects _lastSeenValidTime
- stdx::mutex _mutexKeyManager; // protects _keyManager
+ Mutex _mutex = MONGO_MAKE_LATCH("LogicalTimeValidator::_mutex"); // protects _lastSeenValidTime
+ Mutex _mutexKeyManager =
+ MONGO_MAKE_LATCH("LogicalTimevalidator::_mutexKeyManager"); // protects _keyManager
SignedLogicalTime _lastSeenValidTime;
TimeProofService _timeProofService;
std::shared_ptr<KeysCollectionManager> _keyManager;
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index c7f50fe40bd..a88078acb5f 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -36,8 +36,8 @@
#include "mongo/bson/inline_decls.h"
#include "mongo/db/client.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/baton.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/clock_source.h"
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 025e8f7c1f3..20a367a2092 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -41,8 +41,8 @@
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/session.h"
#include "mongo/util/decorable.h"
#include "mongo/util/interruptible.h"
diff --git a/src/mongo/db/operation_context_group.cpp b/src/mongo/db/operation_context_group.cpp
index c3f46ea9f9f..bb215d21095 100644
--- a/src/mongo/db/operation_context_group.cpp
+++ b/src/mongo/db/operation_context_group.cpp
@@ -61,7 +61,7 @@ OperationContextGroup::Context::Context(OperationContext& ctx, OperationContextG
void OperationContextGroup::Context::discard() {
if (!_movedFrom) {
- stdx::lock_guard<stdx::mutex> lk(_ctxGroup._lock);
+ stdx::lock_guard<Latch> lk(_ctxGroup._lock);
auto it = find(_ctxGroup._contexts, &_opCtx);
_ctxGroup._contexts.erase(it);
_movedFrom = true;
@@ -77,7 +77,7 @@ auto OperationContextGroup::makeOperationContext(Client& client) -> Context {
auto OperationContextGroup::adopt(UniqueOperationContext opCtx) -> Context {
auto cp = opCtx.get();
invariant(cp);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_contexts.emplace_back(std::move(opCtx));
return Context(*cp, *this);
}
@@ -87,7 +87,7 @@ auto OperationContextGroup::take(Context ctx) -> Context {
return ctx;
}
{
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
auto it = find(ctx._ctxGroup._contexts, &ctx._opCtx);
_contexts.emplace_back(std::move(*it));
ctx._ctxGroup._contexts.erase(it);
@@ -98,14 +98,14 @@ auto OperationContextGroup::take(Context ctx) -> Context {
void OperationContextGroup::interrupt(ErrorCodes::Error code) {
invariant(code);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
for (auto&& uniqueOperationContext : _contexts) {
interruptOne(uniqueOperationContext.get(), code);
}
}
bool OperationContextGroup::isEmpty() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _contexts.empty();
}
diff --git a/src/mongo/db/operation_context_group.h b/src/mongo/db/operation_context_group.h
index 189069cdb17..0de0792e269 100644
--- a/src/mongo/db/operation_context_group.h
+++ b/src/mongo/db/operation_context_group.h
@@ -32,7 +32,7 @@
#include "mongo/db/client.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -96,7 +96,7 @@ public:
private:
friend class Context;
- stdx::mutex _lock;
+ Mutex _lock = MONGO_MAKE_LATCH("OperationContextGroup::_lock");
std::vector<UniqueOperationContext> _contexts;
}; // class OperationContextGroup
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index 289876d9df2..00fa8734534 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -254,9 +254,9 @@ public:
}
void checkForInterruptForTimeout(OperationContext* opCtx) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
opCtx->waitForConditionOrInterrupt(cv, lk);
}
@@ -334,18 +334,18 @@ TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesNanoseconds) {
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCV) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(
ErrorCodes::ExceededTimeLimit,
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now() + Seconds{10})
@@ -598,17 +598,17 @@ TEST_F(OperationDeadlineTests, DeadlineAfterRunWithoutInterruptDoesntSeeUnviolat
TEST_F(OperationDeadlineTests, WaitForKilledOpCV) {
auto opCtx = client->makeOperationContext();
opCtx->markKilled();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::Interrupted, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -617,9 +617,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now() + Seconds{10}, ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -627,9 +627,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_FALSE(opCtx->waitForConditionOrInterruptFor(
cv, lk, Milliseconds(-1000), []() -> bool { return false; }));
}
@@ -637,9 +637,9 @@ TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
TEST_F(OperationDeadlineTests, DuringWaitMaxTimeExpirationDominatesUntilExpiration) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(ErrorCodes::ExceededTimeLimit ==
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now()));
}
@@ -648,17 +648,17 @@ class ThreadedOperationDeadlineTests : public OperationDeadlineTests {
public:
using CvPred = stdx::function<bool()>;
using WaitFn = stdx::function<bool(
- OperationContext*, stdx::condition_variable&, stdx::unique_lock<stdx::mutex>&, CvPred)>;
+ OperationContext*, stdx::condition_variable&, stdx::unique_lock<Latch>&, CvPred)>;
struct WaitTestState {
void signal() {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
invariant(!isSignaled);
isSignaled = true;
cv.notify_all();
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("WaitTestState::mutex");
stdx::condition_variable cv;
bool isSignaled = false;
};
@@ -674,7 +674,7 @@ public:
opCtx->setDeadlineByDate(maxTime, ErrorCodes::ExceededTimeLimit);
}
auto predicate = [state] { return state->isSignaled; };
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
barrier->countDownAndWait();
return waitFn(opCtx, state->cv, lk, predicate);
});
@@ -684,7 +684,7 @@ public:
// Now we know that the waiter task must own the mutex, because it does not signal the
// barrier until it does.
- stdx::lock_guard<stdx::mutex> lk(state->mutex);
+ stdx::lock_guard<Latch> lk(state->mutex);
// Assuming that opCtx has not already been interrupted and that maxTime and until are
// unexpired, we know that the waiter must be blocked in the condition variable, because it
@@ -699,7 +699,7 @@ public:
Date_t maxTime) {
const auto waitFn = [until](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
if (until < Date_t::max()) {
return opCtx->waitForConditionOrInterruptUntil(cv, lk, until, predicate);
@@ -718,7 +718,7 @@ public:
Date_t maxTime) {
const auto waitFn = [duration](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
return opCtx->waitForConditionOrInterruptFor(cv, lk, duration, predicate);
};
@@ -735,7 +735,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepUntil](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepUntil(sleepUntil);
@@ -752,7 +752,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepFor](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepFor(sleepFor);
@@ -956,9 +956,9 @@ TEST(OperationContextTest, TestWaitForConditionOrInterruptNoAssertUntilAPI) {
auto client = serviceCtx->makeClient("OperationContextTest");
auto opCtx = client->makeOperationContext();
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Case (2). Expect a Status::OK with a cv_status::timeout.
Date_t deadline = Date_t::now() + Milliseconds(500);
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 27832209b69..2d45b49747c 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/operation_time_tracker.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace {
@@ -51,12 +51,12 @@ std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext
}
LogicalTime OperationTimeTracker::getMaxOperationTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _maxOperationTime;
}
void OperationTimeTracker::updateOperationTime(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (newTime > _maxOperationTime) {
_maxOperationTime = std::move(newTime);
}
diff --git a/src/mongo/db/operation_time_tracker.h b/src/mongo/db/operation_time_tracker.h
index 45b06ccac6e..a259ee22d37 100644
--- a/src/mongo/db/operation_time_tracker.h
+++ b/src/mongo/db/operation_time_tracker.h
@@ -31,7 +31,7 @@
#include "mongo/db/logical_time.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -57,7 +57,7 @@ public:
private:
// protects _maxOperationTime
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OperationTimeTracker::_mutex");
LogicalTime _maxOperationTime;
};
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
index 88bf08d7ee5..f372db87226 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -55,7 +55,7 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToAbortExpiredTransactions>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PeriodicThreadToAbortExpiredTransactions::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
index c6a2e830a86..3598ea0cdb9 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -58,7 +58,8 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToDecreaseSnapshotHistoryIfNotNeeded>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex =
+ MONGO_MAKE_LATCH("PeriodicThreadToDecreaseSnapshotHistoryCachePressure::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index 93cb6771f35..97eac1fc07f 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -48,13 +48,13 @@ MONGO_FAIL_POINT_DEFINE(exchangeFailLoadNextBatch);
class MutexAndResourceLock {
OperationContext* _opCtx;
ResourceYielder* _resourceYielder;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
public:
// Must be constructed with the mutex held. 'yielder' may be null if there are no resources
// which need to be yielded while waiting.
MutexAndResourceLock(OperationContext* opCtx,
- stdx::unique_lock<stdx::mutex> m,
+ stdx::unique_lock<Latch> m,
ResourceYielder* yielder)
: _opCtx(opCtx), _resourceYielder(yielder), _lock(std::move(m)) {
invariant(_lock.owns_lock());
@@ -78,7 +78,7 @@ public:
* Releases ownership of the lock to the caller. May only be called when the mutex is held
* (after a call to unlock(), for example).
*/
- stdx::unique_lock<stdx::mutex> releaseLockOwnership() {
+ stdx::unique_lock<Latch> releaseLockOwnership() {
invariant(_lock.owns_lock());
return std::move(_lock);
}
@@ -280,7 +280,7 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx,
size_t consumerId,
ResourceYielder* resourceYielder) {
// Grab a lock.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (;;) {
// Guard against some of the trickiness we do with moving the lock to/from the
@@ -434,7 +434,7 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
void Exchange::dispose(OperationContext* opCtx, size_t consumerId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_disposeRunDown < getConsumers());
diff --git a/src/mongo/db/pipeline/document_source_exchange.h b/src/mongo/db/pipeline/document_source_exchange.h
index 68b3a037047..bf83e023044 100644
--- a/src/mongo/db/pipeline/document_source_exchange.h
+++ b/src/mongo/db/pipeline/document_source_exchange.h
@@ -36,8 +36,8 @@
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/exchange_spec_gen.h"
#include "mongo/db/pipeline/field_path.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -170,8 +170,8 @@ private:
std::unique_ptr<Pipeline, PipelineDeleter> _pipeline;
// Synchronization.
- stdx::mutex _mutex;
- stdx::condition_variable_any _haveBufferSpace;
+ Mutex _mutex = MONGO_MAKE_LATCH("Exchange::_mutex");
+ stdx::condition_variable _haveBufferSpace;
// A thread that is currently loading the exchange buffers.
size_t _loadingThreadId{kInvalidThreadId};
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index ef4f626e7b6..0cf42afaf48 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -492,7 +492,7 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
// thread holds this while it calls getNext(). This is to simulate the case where a thread may
// hold some "real" resources which need to be yielded while waiting, such as the Session, or
// the locks held in a transaction.
- stdx::mutex artificalGlobalMutex;
+ auto artificalGlobalMutex = MONGO_MAKE_LATCH();
boost::intrusive_ptr<Exchange> ex =
new Exchange(std::move(spec), unittest::assertGet(Pipeline::create({source}, getExpCtx())));
@@ -503,7 +503,7 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
*/
class MutexYielder : public ResourceYielder {
public:
- MutexYielder(stdx::mutex* mutex) : _lock(*mutex, stdx::defer_lock) {}
+ MutexYielder(Latch* mutex) : _lock(*mutex, stdx::defer_lock) {}
void yield(OperationContext* opCtx) override {
_lock.unlock();
@@ -513,12 +513,12 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
_lock.lock();
}
- stdx::unique_lock<stdx::mutex>& getLock() {
+ stdx::unique_lock<Latch>& getLock() {
return _lock;
}
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
};
/**
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index ba451f112a6..1b506ecb4a2 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -553,7 +553,7 @@ Status PlanCache::set(const CanonicalQuery& query,
const auto key = computeKey(query);
const size_t newWorks = why->stats[0]->common.works;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
bool isNewEntryActive = false;
uint32_t queryHash;
uint32_t planCacheKey;
@@ -608,7 +608,7 @@ void PlanCache::deactivate(const CanonicalQuery& query) {
}
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -625,7 +625,7 @@ PlanCache::GetResult PlanCache::get(const CanonicalQuery& query) const {
}
PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -642,7 +642,7 @@ PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
PlanCacheKey ck = computeKey(cq);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(ck, &entry);
if (!cacheStatus.isOK()) {
@@ -659,12 +659,12 @@ Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
}
Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.remove(computeKey(canonicalQuery));
}
void PlanCache::clear() {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
_cache.clear();
}
@@ -679,7 +679,7 @@ PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQuery& query) const {
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -691,7 +691,7 @@ StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQ
}
std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
std::vector<std::unique_ptr<PlanCacheEntry>> entries;
for (auto&& cacheEntry : _cache) {
@@ -703,7 +703,7 @@ std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
}
size_t PlanCache::size() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.size();
}
@@ -715,7 +715,7 @@ std::vector<BSONObj> PlanCache::getMatchingStats(
const std::function<BSONObj(const PlanCacheEntry&)>& serializationFunc,
const std::function<bool(const BSONObj&)>& filterFunc) const {
std::vector<BSONObj> results;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
for (auto&& cacheEntry : _cache) {
const auto entry = cacheEntry.second;
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 0b8d6216678..46dc7838a69 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -40,7 +40,7 @@
#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -612,7 +612,7 @@ private:
LRUKeyValue<PlanCacheKey, PlanCacheEntry, PlanCacheKeyHasher> _cache;
// Protects _cache.
- mutable stdx::mutex _cacheMutex;
+ mutable Mutex _cacheMutex = MONGO_MAKE_LATCH("PlanCache::_cacheMutex");
// Full namespace of collection.
std::string _ns;
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index 9749d37eac3..f45a5bde0ea 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -557,7 +557,6 @@ TEST_F(QueryPlannerWildcardTest, OrEqualityWithTwoPredicatesUsesTwoPaths) {
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {'$_path': 1, b: 1},"
"bounds: {'$_path': [['b','b',true,true]], b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoIndexes) {
@@ -572,7 +571,6 @@ TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoI
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {b: 1},"
"bounds: {b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, BasicSkip) {
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index da477a862e1..5060d6d9ac8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -78,7 +78,7 @@ AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
const CanonicalQuery::QueryShapeString& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -90,7 +90,7 @@ boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
}
std::vector<AllowedIndexEntry> QuerySettings::getAllAllowedIndices() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
std::vector<AllowedIndexEntry> entries;
for (const auto& entryPair : _allowedIndexEntryMap) {
entries.push_back(entryPair.second);
@@ -109,7 +109,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
const BSONObj collation =
canonicalQuery.getCollator() ? canonicalQuery.getCollator()->getSpec().toBSON() : BSONObj();
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.erase(key);
_allowedIndexEntryMap.emplace(
std::piecewise_construct,
@@ -118,7 +118,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
}
void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString& key) {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -130,7 +130,7 @@ void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString&
}
void QuerySettings::clearAllowedIndices() {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.clear();
}
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index 4fac8e39161..f317a2780c2 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -37,7 +37,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
/**
* Protects data in query settings.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("QuerySettings::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index b5ce3b2fdb9..b31b73b8918 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -73,7 +73,7 @@ public:
*/
std::tuple<bool, std::shared_ptr<Notification<Status>>> getOrCreateWriteRequest(
LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto lastEl = _writeRequests.rbegin();
if (lastEl != _writeRequests.rend() && lastEl->first >= clusterTime.asTimestamp()) {
return std::make_tuple(false, lastEl->second);
@@ -88,7 +88,7 @@ public:
* Erases writeRequest that happened at clusterTime
*/
void deleteWriteRequest(LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto el = _writeRequests.find(clusterTime.asTimestamp());
invariant(el != _writeRequests.end());
invariant(el->second);
@@ -97,7 +97,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WriteRequestSynchronizer::_mutex");
std::map<Timestamp, std::shared_ptr<Notification<Status>>> _writeRequests;
};
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 1b99507fc5c..77b086af97e 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -52,7 +52,7 @@ std::string AbstractAsyncComponent::_getComponentName() const {
}
bool AbstractAsyncComponent::isActive() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isActive_inlock();
}
@@ -61,7 +61,7 @@ bool AbstractAsyncComponent::_isActive_inlock() noexcept {
}
bool AbstractAsyncComponent::_isShuttingDown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isShuttingDown_inlock();
}
@@ -70,7 +70,7 @@ bool AbstractAsyncComponent::_isShuttingDown_inlock() noexcept {
}
Status AbstractAsyncComponent::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -97,7 +97,7 @@ Status AbstractAsyncComponent::startup() noexcept {
}
void AbstractAsyncComponent::shutdown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -116,17 +116,17 @@ void AbstractAsyncComponent::shutdown() noexcept {
}
void AbstractAsyncComponent::join() noexcept {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
AbstractAsyncComponent::State AbstractAsyncComponent::getState_forTest() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _state;
}
void AbstractAsyncComponent::_transitionToComplete() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_transitionToComplete_inlock();
}
@@ -138,13 +138,13 @@ void AbstractAsyncComponent::_transitionToComplete_inlock() noexcept {
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(
const executor::TaskExecutor::CallbackArgs& callbackArgs, const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(callbackArgs, message);
}
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(const Status& status,
const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(status, message);
}
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 64d88ad41e8..5b0e6426900 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -37,8 +37,8 @@
#include "mongo/base/static_assert.h"
#include "mongo/base/status.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
namespace repl {
@@ -207,7 +207,7 @@ private:
/**
* Returns mutex to guard this component's state variable.
*/
- virtual stdx::mutex* _getMutex() noexcept = 0;
+ virtual Mutex* _getMutex() noexcept = 0;
private:
// All member variables are labeled with one of the following codes indicating the
@@ -259,7 +259,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
template <typename T>
Status AbstractAsyncComponent::_startupComponent(std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _startupComponent_inlock(component);
}
@@ -275,7 +275,7 @@ void AbstractAsyncComponent::_shutdownComponent_inlock(const std::unique_ptr<T>&
template <typename T>
void AbstractAsyncComponent::_shutdownComponent(const std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_shutdownComponent_inlock(component);
}
diff --git a/src/mongo/db/repl/abstract_async_component_test.cpp b/src/mongo/db/repl/abstract_async_component_test.cpp
index 4fdc4128cb7..ad74edf0f33 100644
--- a/src/mongo/db/repl/abstract_async_component_test.cpp
+++ b/src/mongo/db/repl/abstract_async_component_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/task_executor_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/unittest.h"
@@ -94,10 +94,10 @@ public:
private:
Status _doStartup_inlock() noexcept override;
void _doShutdown_inlock() noexcept override;
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
// Used by AbstractAsyncComponent to guard start changes.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockAsyncComponent::_mutex");
public:
// Returned by _doStartup_inlock(). Override for testing.
@@ -124,7 +124,7 @@ Status MockAsyncComponent::scheduleWorkAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAndSaveHandle_inlock(std::move(work), handle, name);
}
@@ -133,12 +133,12 @@ Status MockAsyncComponent::scheduleWorkAtAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAtAndSaveHandle_inlock(when, std::move(work), handle, name);
}
void MockAsyncComponent::cancelHandle_forTest(executor::TaskExecutor::CallbackHandle handle) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelHandle_inlock(handle);
}
@@ -159,7 +159,7 @@ Status MockAsyncComponent::_doStartup_inlock() noexcept {
void MockAsyncComponent::_doShutdown_inlock() noexcept {}
-stdx::mutex* MockAsyncComponent::_getMutex() noexcept {
+Mutex* MockAsyncComponent::_getMutex() noexcept {
return &_mutex;
}
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.cpp b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
index a3f27e65e94..820d2417e9d 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -92,7 +92,7 @@ Milliseconds AbstractOplogFetcher::_getGetMoreMaxTime() const {
}
std::string AbstractOplogFetcher::toString() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
str::stream msg;
msg << _getComponentName() << " -"
<< " last optime fetched: " << _lastFetched.toString();
@@ -117,7 +117,7 @@ void AbstractOplogFetcher::_makeAndScheduleFetcherCallback(
Status scheduleStatus = Status::OK();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_fetcher = _makeFetcher(findCommandObj, metadataObj, _getInitialFindMaxTime());
scheduleStatus = _scheduleFetcher_inlock();
}
@@ -143,7 +143,7 @@ void AbstractOplogFetcher::_doShutdown_inlock() noexcept {
}
}
-stdx::mutex* AbstractOplogFetcher::_getMutex() noexcept {
+Mutex* AbstractOplogFetcher::_getMutex() noexcept {
return &_mutex;
}
@@ -157,12 +157,12 @@ OpTime AbstractOplogFetcher::getLastOpTimeFetched_forTest() const {
}
OpTime AbstractOplogFetcher::_getLastOpTimeFetched() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _lastFetched;
}
BSONObj AbstractOplogFetcher::getCommandObject_forTest() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _fetcher->getCommandObject();
}
@@ -197,7 +197,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
_makeFindCommandObject(_nss, _getLastOpTimeFetched(), _getRetriedFindMaxTime());
BSONObj metadataObj = _makeMetadataObject();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_fetcherRestarts == _maxFetcherRestarts) {
log() << "Error returned from oplog query (no more query restarts left): "
<< redact(responseStatus);
@@ -229,7 +229,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
// Reset fetcher restart counter on successful response.
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_isActive_inlock());
_fetcherRestarts = 0;
}
@@ -274,7 +274,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
LOG(3) << _getComponentName()
<< " setting last fetched optime ahead after batch: " << lastDoc;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastFetched = lastDoc;
}
@@ -295,7 +295,7 @@ void AbstractOplogFetcher::_finishCallback(Status status) {
_onShutdownCallbackFn(status);
decltype(_onShutdownCallbackFn) onShutdownCallbackFn;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_transitionToComplete_inlock();
// Release any resources that might be held by the '_onShutdownCallbackFn' function object.
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.h b/src/mongo/db/repl/abstract_oplog_fetcher.h
index 11d59fd82a8..45c5961b385 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.h
@@ -34,8 +34,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/optime_with.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
namespace repl {
@@ -147,7 +147,7 @@ protected:
virtual void _doShutdown_inlock() noexcept override;
private:
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
/**
* This function must be overriden by subclass oplog fetchers to specify what `find` command
@@ -213,7 +213,7 @@ private:
const std::size_t _maxFetcherRestarts;
// Protects member data of this AbstractOplogFetcher.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AbstractOplogFetcher::_mutex");
// Function to call when the oplog fetcher shuts down.
OnShutdownCallbackFn _onShutdownCallbackFn;
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 6d7918a7f5c..bf98dc7eec9 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -149,13 +149,13 @@ void BaseClonerTest::clear() {
}
void BaseClonerTest::setStatus(const Status& status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_status = status;
_setStatusCondition.notify_all();
}
const Status& BaseClonerTest::getStatus() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _status;
}
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index c4d56c00397..328bfdb27d2 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -41,8 +41,8 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -135,7 +135,7 @@ protected:
private:
// Protects member data of this base cloner fixture.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BaseCloner::_mutex");
stdx::condition_variable _setStatusCondition;
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 53d3e6a3612..d8cfe485c67 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -133,7 +133,7 @@ void BackgroundSync::startup(OperationContext* opCtx) {
}
void BackgroundSync::shutdown(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
@@ -157,7 +157,7 @@ void BackgroundSync::join(OperationContext* opCtx) {
}
bool BackgroundSync::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown_inlock();
}
@@ -236,7 +236,7 @@ void BackgroundSync::_produce() {
HostAndPort source;
SyncSourceResolverResponse syncSourceResp;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
@@ -259,7 +259,7 @@ void BackgroundSync::_produce() {
auto opCtx = cc().makeOperationContext();
minValidSaved = _replicationProcess->getConsistencyMarkers()->getMinValid(opCtx.get());
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -289,7 +289,7 @@ void BackgroundSync::_produce() {
fassert(40349, status);
_syncSourceResolver->join();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceResolver.reset();
}
@@ -338,7 +338,7 @@ void BackgroundSync::_produce() {
return;
} else if (syncSourceResp.isOK() && !syncSourceResp.getSyncSource().empty()) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceHost = syncSourceResp.getSyncSource();
source = _syncSourceHost;
}
@@ -380,7 +380,7 @@ void BackgroundSync::_produce() {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -428,7 +428,7 @@ void BackgroundSync::_produce() {
},
onOplogFetcherShutdownCallbackFn,
bgSyncOplogFetcherBatchSize);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -504,7 +504,7 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
// are done to prevent going into shutdown. This avoids a race where shutdown() clears the
// buffer between the time we check _inShutdown and the point where we finish writing to the
// buffer.
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return Status::OK();
}
@@ -556,7 +556,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
OpTime lastOpTimeFetched;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
}
@@ -633,7 +633,7 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint(
rollbackRemoteOplogQueryBatchSize.load());
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -670,18 +670,18 @@ void BackgroundSync::_fallBackOnRollbackViaRefetch(
}
HostAndPort BackgroundSync::getSyncTarget() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _syncSourceHost;
}
void BackgroundSync::clearSyncTarget() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
log() << "Resetting sync source to empty, which was " << _syncSourceHost;
_syncSourceHost = HostAndPort();
}
void BackgroundSync::stop(bool resetLastFetchedOptime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
log() << "Stopping replication producer";
@@ -711,7 +711,7 @@ void BackgroundSync::start(OperationContext* opCtx) {
do {
lastAppliedOpTime = _readLastAppliedOpTime(opCtx);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Double check the state after acquiring the mutex.
if (_state != ProducerState::Starting) {
return;
@@ -781,12 +781,12 @@ bool BackgroundSync::shouldStopFetching() const {
}
BackgroundSync::ProducerState BackgroundSync::getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void BackgroundSync::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Let producer run if it's already running.
if (_state == ProducerState::Stopped) {
_state = ProducerState::Starting;
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index 194bf202b8f..de99f5191af 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -41,9 +41,9 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/rollback_impl.h"
#include "mongo/db/repl/sync_source_resolver.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -230,7 +230,7 @@ private:
// Protects member data of BackgroundSync.
// Never hold the BackgroundSync mutex when trying to acquire the ReplicationCoordinator mutex.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BackgroundSync::_mutex"); // (S)
OpTime _lastOpTimeFetched; // (M)
diff --git a/src/mongo/db/repl/callback_completion_guard.h b/src/mongo/db/repl/callback_completion_guard.h
index 9eb4020db7d..a83e27af979 100644
--- a/src/mongo/db/repl/callback_completion_guard.h
+++ b/src/mongo/db/repl/callback_completion_guard.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -83,9 +83,9 @@ public:
* Requires either a unique_lock or lock_guard to be passed in to ensure that we call
* _cancelRemainingWork_inlock()) while we have a lock on the callers's mutex.
*/
- void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<Latch>& lock,
const Result& result);
- void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<Latch>& lock,
const Result& result);
private:
@@ -124,13 +124,13 @@ CallbackCompletionGuard<Result>::~CallbackCompletionGuard() {
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::lock_guard<stdx::mutex>& lock, const Result& result) {
+ const stdx::lock_guard<Latch>& lock, const Result& result) {
_setResultAndCancelRemainingWork_inlock(result);
}
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::unique_lock<stdx::mutex>& lock, const Result& result) {
+ const stdx::unique_lock<Latch>& lock, const Result& result) {
invariant(lock.owns_lock());
_setResultAndCancelRemainingWork_inlock(result);
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index a80a9160896..31e4f1c9c42 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -87,7 +87,7 @@ private:
std::unique_ptr<stdx::thread> _quorumCheckThread;
Status _quorumCheckStatus;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CheckQuorumTest::_mutex");
bool _isQuorumCheckDone;
};
@@ -108,13 +108,13 @@ Status CheckQuorumTest::waitForQuorumCheck() {
}
bool CheckQuorumTest::isQuorumCheckDone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isQuorumCheckDone;
}
void CheckQuorumTest::_runQuorumCheck(const ReplSetConfig& config, int myIndex) {
_quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isQuorumCheckDone = true;
}
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index a974144673a..a283762f140 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -57,8 +57,8 @@ namespace mongo {
namespace repl {
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
constexpr auto kCountResponseDocumentCountFieldName = "n"_sd;
@@ -199,7 +199,7 @@ bool CollectionCloner::_isActive_inlock() const {
}
bool CollectionCloner::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -230,7 +230,7 @@ Status CollectionCloner::startup() noexcept {
}
void CollectionCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -263,12 +263,12 @@ void CollectionCloner::_cancelRemainingWork_inlock() {
}
CollectionCloner::Stats CollectionCloner::getStats() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stats;
}
void CollectionCloner::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() {
return (_queryState == QueryState::kNotStarted || _queryState == QueryState::kFinished) &&
!_isActive_inlock();
@@ -288,7 +288,7 @@ void CollectionCloner::setScheduleDbWorkFn_forTest(ScheduleDbWorkFn scheduleDbWo
}
void CollectionCloner::setCreateClientFn_forTest(const CreateClientFn& createClientFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_createClientFn = createClientFn;
}
@@ -478,7 +478,7 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
auto cancelRemainingWorkInLock = [this]() { _cancelRemainingWork_inlock(); };
auto finishCallbackFn = [this](const Status& status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_queryState = QueryState::kFinished;
_clientConnection.reset();
}
@@ -498,13 +498,13 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& callbackData,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!callbackData.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, callbackData.status);
return;
}
bool queryStateOK = false;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
queryStateOK = _queryState == QueryState::kNotStarted;
if (queryStateOK) {
_queryState = QueryState::kRunning;
@@ -529,12 +529,12 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
Status clientConnectionStatus = _clientConnection->connect(_source, StringData());
if (!clientConnectionStatus.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, clientConnectionStatus);
return;
}
if (!replAuthenticate(_clientConnection.get())) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(
lock,
{ErrorCodes::AuthenticationFailed,
@@ -561,7 +561,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
} catch (const DBException& e) {
auto queryStatus = e.toStatus().withContext(str::stream() << "Error querying collection '"
<< _sourceNss.ns());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (queryStatus.code() == ErrorCodes::OperationFailed ||
queryStatus.code() == ErrorCodes::CursorNotFound ||
queryStatus.code() == ErrorCodes::QueryPlanKilled) {
@@ -581,7 +581,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
}
}
waitForDbWorker();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, Status::OK());
}
@@ -589,7 +589,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
DBClientCursorBatchIterator& iter) {
_stats.receivedBatches++;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::CallbackCanceled,
"Collection cloning cancelled.",
_queryState != QueryState::kCanceling);
@@ -628,7 +628,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
}
void CollectionCloner::_verifyCollectionWasDropped(
- const stdx::unique_lock<stdx::mutex>& lk,
+ const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// If we already have a _verifyCollectionDroppedScheduler, just return; the existing
@@ -691,7 +691,7 @@ void CollectionCloner::_insertDocumentsCallback(
const executor::TaskExecutor::CallbackArgs& cbd,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!cbd.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, cbd.status);
return;
}
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index 33ca6ef6e71..1eb92679c9e 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -47,9 +47,9 @@
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/progress_meter.h"
@@ -239,7 +239,7 @@ private:
* Verifies that an error from the query was the result of a collection drop. If
* so, cloning is stopped with no error. Otherwise it is stopped with the given error.
*/
- void _verifyCollectionWasDropped(const stdx::unique_lock<stdx::mutex>& lk,
+ void _verifyCollectionWasDropped(const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard);
@@ -259,7 +259,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CollectionCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R) Not owned by us.
ThreadPool* _dbWorkThreadPool; // (R) Not owned by us.
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 09e61df9080..84cd3a8004b 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -66,7 +66,7 @@ public:
: MockDBClientConnection(remote), _net(net) {}
virtual ~FailableMockDBClientConnection() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_paused = false;
_cond.notify_all();
_cond.wait(lk, [this] { return !_resuming; });
@@ -87,13 +87,13 @@ public:
int batchSize) override {
ON_BLOCK_EXIT([this]() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_queryCount++;
}
_cond.notify_all();
});
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waiting = _paused;
_cond.notify_all();
while (_paused) {
@@ -119,14 +119,14 @@ public:
void pause() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_paused = true;
}
_cond.notify_all();
}
void resume() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_resuming = false;
@@ -136,13 +136,13 @@ public:
// Waits for the next query after pause() is called to start.
void waitForPausedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cond.wait(lk, [this] { return _waiting; });
}
// Resumes, then waits for the next query to run after resume() is called to complete.
void resumeAndWaitForResumedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_cond.notify_all(); // This is to wake up the paused thread.
@@ -153,7 +153,7 @@ public:
private:
executor::NetworkInterfaceMock* _net;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FailableMockDBClientConnection::_mutex");
stdx::condition_variable _cond;
bool _paused = false;
bool _waiting = false;
@@ -163,7 +163,7 @@ private:
Status _failureForConnect = Status::OK();
Status _failureForQuery = Status::OK();
- void _resume(stdx::unique_lock<stdx::mutex>* lk) {
+ void _resume(stdx::unique_lock<Latch>* lk) {
invariant(lk->owns_lock());
_paused = false;
_resumedQueryCount = _queryCount;
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 1466b7b2dc3..a8a61969809 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -57,8 +57,8 @@ MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeListCollections);
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
const char* kNameFieldName = "name";
@@ -206,7 +206,7 @@ Status DatabaseCloner::startup() noexcept {
}
void DatabaseCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -254,7 +254,7 @@ void DatabaseCloner::setStartCollectionClonerFn(
}
DatabaseCloner::State DatabaseCloner::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index 94b559d8278..051c0ba35a3 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -41,8 +41,8 @@
#include "mongo/db/repl/base_cloner.h"
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -201,7 +201,7 @@ private:
/**
* Calls the above method after unlocking.
*/
- void _finishCallback_inlock(stdx::unique_lock<stdx::mutex>& lk, const Status& status);
+ void _finishCallback_inlock(stdx::unique_lock<Latch>& lk, const Status& status);
//
// All member variables are labeled with one of the following codes indicating the
@@ -212,7 +212,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabaseCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R)
ThreadPool* _dbWorkThreadPool; // (R)
diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp
index 54c1b3fdfc5..1a0857ce6b7 100644
--- a/src/mongo/db/repl/databases_cloner.cpp
+++ b/src/mongo/db/repl/databases_cloner.cpp
@@ -56,8 +56,8 @@ namespace {
using Request = executor::RemoteCommandRequest;
using Response = executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
} // namespace
diff --git a/src/mongo/db/repl/databases_cloner.h b/src/mongo/db/repl/databases_cloner.h
index 8d94afe26fc..db6f1129edf 100644
--- a/src/mongo/db/repl/databases_cloner.h
+++ b/src/mongo/db/repl/databases_cloner.h
@@ -42,8 +42,8 @@
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/database_cloner.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -138,10 +138,10 @@ private:
void _setStatus_inlock(Status s);
/** Will fail the cloner, call the completion function, and become inactive. */
- void _fail_inlock(stdx::unique_lock<stdx::mutex>* lk, Status s);
+ void _fail_inlock(stdx::unique_lock<Latch>* lk, Status s);
/** Will call the completion function, and become inactive. */
- void _succeed_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _succeed_inlock(stdx::unique_lock<Latch>* lk);
/** Called each time a database clone is finished */
void _onEachDBCloneFinish(const Status& status, const std::string& name);
@@ -175,7 +175,7 @@ private:
// (M) Reads and writes guarded by _mutex
// (S) Self-synchronizing; access in any way from any context.
//
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabasesCloner::_mutex"); // (S)
Status _status{ErrorCodes::NotYetInitialized, ""}; // (M) If it is not OK, we stop everything.
executor::TaskExecutor* _exec; // (R) executor to schedule things with
ThreadPool* _dbWorkThreadPool; // (R) db worker thread pool for collection cloning.
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index a631fff5dbc..b09146240b3 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -43,7 +43,7 @@
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/task_executor_proxy.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/thread_name.h"
@@ -57,9 +57,9 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using mutex = stdx::mutex;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using mutex = Mutex;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
using namespace unittest;
using Responses = std::vector<std::pair<std::string, BSONObj>>;
@@ -288,7 +288,7 @@ protected:
void runCompleteClone(Responses responses) {
Status result{Status::OK()};
bool done = false;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cvDone;
DatabasesCloner cloner{&getStorage(),
&getExecutor(),
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index 31993c6acc2..2484bf1d892 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -78,7 +78,7 @@ DropPendingCollectionReaper::DropPendingCollectionReaper(StorageInterface* stora
void DropPendingCollectionReaper::addDropPendingNamespace(
const OpTime& dropOpTime, const NamespaceString& dropPendingNamespace) {
invariant(dropPendingNamespace.isDropPendingNamespace());
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(dropOpTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -95,7 +95,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
}
boost::optional<OpTime> DropPendingCollectionReaper::getEarliestDropOpTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
if (it == _dropPendingNamespaces.cend()) {
return boost::none;
@@ -110,7 +110,7 @@ bool DropPendingCollectionReaper::rollBackDropPendingCollection(
const auto pendingNss = collectionNamespace.makeDropPendingNamespace(opTime);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(opTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -135,7 +135,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
const OpTime& opTime) {
DropPendingNamespaces toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingNamespaces.cbegin();
it != _dropPendingNamespaces.cend() && it->first <= opTime;
++it) {
@@ -175,7 +175,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropOpTime()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
while (it != _dropPendingNamespaces.cend() && it->first <= opTime) {
if (toDrop.find(it->first) != toDrop.cend()) {
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.h b/src/mongo/db/repl/drop_pending_collection_reaper.h
index be8dd9a77d8..133c693fa0b 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.h
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.h
@@ -36,7 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -100,7 +100,7 @@ public:
void dropCollectionsOlderThan(OperationContext* opCtx, const OpTime& opTime);
void clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingNamespaces.clear();
}
@@ -126,7 +126,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DropPendingCollectionReaper::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index b489e842b4c..a5e8860ce30 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -120,8 +120,8 @@ using Event = executor::TaskExecutor::EventHandle;
using Handle = executor::TaskExecutor::CallbackHandle;
using Operations = MultiApplier::Operations;
using QueryResponseStatus = StatusWith<Fetcher::QueryResponse>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
// Used to reset the oldest timestamp during initial sync to a non-null timestamp.
const Timestamp kTimestampOne(0, 1);
@@ -243,7 +243,7 @@ InitialSyncer::~InitialSyncer() {
}
bool InitialSyncer::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -256,7 +256,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
invariant(opCtx);
invariant(initialSyncMaxAttempts >= 1U);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -289,7 +289,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
}
Status InitialSyncer::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -327,22 +327,22 @@ void InitialSyncer::_cancelRemainingWork_inlock() {
}
void InitialSyncer::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
InitialSyncer::State InitialSyncer::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
Date_t InitialSyncer::getWallClockTime_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastApplied.wallTime;
}
bool InitialSyncer::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isShuttingDown_inlock();
}
@@ -515,7 +515,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// Lock guard must be declared after completion guard because completion guard destructor
// has to run outside lock.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_oplogApplier = {};
@@ -569,7 +569,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
std::uint32_t chooseSyncSourceAttempt,
std::uint32_t chooseSyncSourceMaxAttempts,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Cancellation should be treated the same as other errors. In this case, the most likely cause
// of a failed _chooseSyncSourceCallback() task is a cancellation triggered by
// InitialSyncer::shutdown() or the task executor shutting down.
@@ -724,7 +724,7 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock(
void InitialSyncer::_rollbackCheckerResetCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting base rollback ID");
if (!status.isOK()) {
@@ -742,7 +742,7 @@ void InitialSyncer::_rollbackCheckerResetCallback(
void InitialSyncer::_getBeginFetchingOpTimeCallback(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(),
"error while getting oldest active transaction timestamp for begin fetching timestamp");
@@ -792,7 +792,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting last oplog entry for begin timestamp");
if (!status.isOK()) {
@@ -849,7 +849,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
const OpTime& lastOpTime,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting the remote feature compatibility version");
if (!status.isOK()) {
@@ -1026,7 +1026,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Finished fetching oplog during initial sync: " << redact(oplogFetcherFinishStatus)
<< ". Last fetched optime: " << _lastFetched.toString();
@@ -1073,7 +1073,7 @@ void InitialSyncer::_databasesClonerCallback(const Status& databaseClonerFinishS
}
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(databaseClonerFinishStatus,
"error cloning databases");
if (!status.isOK()) {
@@ -1098,7 +1098,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
OpTimeAndWallTime resultOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error fetching last oplog entry for stop timestamp");
if (!status.isOK()) {
@@ -1145,7 +1145,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
TimestampedBSONObj{oplogSeedDoc, resultOpTimeAndWallTime.opTime.getTimestamp()},
resultOpTimeAndWallTime.opTime.getTerm());
if (!status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
return;
}
@@ -1154,7 +1154,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
opCtx.get(), resultOpTimeAndWallTime.opTime.getTimestamp(), orderedCommit);
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastApplied = resultOpTimeAndWallTime;
log() << "No need to apply operations. (currently at "
<< _initialSyncState->stopTimestamp.toBSON() << ")";
@@ -1166,7 +1166,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
void InitialSyncer::_getNextApplierBatchCallback(
const executor::TaskExecutor::CallbackArgs& callbackArgs,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(callbackArgs, "error getting next applier batch");
if (!status.isOK()) {
@@ -1267,7 +1267,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
OpTimeAndWallTime lastApplied,
std::uint32_t numApplied,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(multiApplierStatus, "error applying batch");
@@ -1324,7 +1324,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
void InitialSyncer::_lastOplogEntryFetcherCallbackAfterFetchingMissingDocuments(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error getting last oplog entry after fetching missing documents");
if (!status.isOK()) {
@@ -1354,7 +1354,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackAfterFetchingMissingDocuments(
void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting last rollback ID");
if (!status.isOK()) {
@@ -1419,7 +1419,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
log() << "Initial sync attempt finishing up.";
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Initial Sync Attempt Statistics: " << redact(_getInitialSyncProgress_inlock());
auto runTime = _initialSyncState ? _initialSyncState->timer.millis() : 0;
@@ -1492,7 +1492,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before we transition the state to Complete.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto opCtx = makeOpCtx();
_tearDown_inlock(opCtx.get(), lastApplied);
@@ -1522,7 +1522,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before InitialSyncer::join() returns.
onCompletion = {};
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_stateCondition.notify_all();
@@ -1558,8 +1558,7 @@ Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn
}
void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
@@ -1614,8 +1613,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
}
void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index 4b994f9ea88..6103099a435 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -51,9 +51,9 @@
#include "mongo/db/repl/rollback_checker.h"
#include "mongo/db/repl/sync_source_selector.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/net/hostandport.h"
@@ -535,8 +535,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Schedules a rollback checker to get the rollback ID after data cloning or applying. This
@@ -546,8 +545,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Checks the given status (or embedded status inside the callback args) and current data
@@ -607,7 +605,7 @@ private:
// (MX) Must hold _mutex and be in a callback in _exec to write; must either hold
// _mutex or be in a callback in _exec to read.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InitialSyncer::_mutex"); // (S)
const InitialSyncerOptions _opts; // (R)
std::unique_ptr<DataReplicatorExternalState> _dataReplicatorExternalState; // (R)
executor::TaskExecutor* _exec; // (R)
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 003ec073d1d..8065edda292 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -60,7 +60,7 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
@@ -104,9 +104,9 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using unittest::log;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
struct CollectionCloneInfo {
std::shared_ptr<CollectionMockStats> stats = std::make_shared<CollectionMockStats>();
@@ -246,7 +246,9 @@ protected:
bool upgradeNonReplicatedUniqueIndexesShouldFail = false;
};
- stdx::mutex _storageInterfaceWorkDoneMutex; // protects _storageInterfaceWorkDone.
+ // protects _storageInterfaceWorkDone.
+ Mutex _storageInterfaceWorkDoneMutex =
+ MONGO_MAKE_LATCH("InitialSyncerTest::_storageInterfaceWorkDoneMutex");
StorageInterfaceResults _storageInterfaceWorkDone;
void setUp() override {
diff --git a/src/mongo/db/repl/local_oplog_info.cpp b/src/mongo/db/repl/local_oplog_info.cpp
index 069c199def1..b17da6d88c5 100644
--- a/src/mongo/db/repl/local_oplog_info.cpp
+++ b/src/mongo/db/repl/local_oplog_info.cpp
@@ -95,7 +95,7 @@ void LocalOplogInfo::resetCollection() {
}
void LocalOplogInfo::setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
LogicalClock::get(service)->setClusterTimeFromTrustedSource(LogicalTime(newTime));
}
@@ -120,7 +120,7 @@ std::vector<OplogSlot> LocalOplogInfo::getNextOpTimes(OperationContext* opCtx, s
// Allow the storage engine to start the transaction outside the critical section.
opCtx->recoveryUnit()->preallocateSnapshot();
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
ts = LogicalClock::get(opCtx)->reserveTicks(count).asTimestamp();
const bool orderedCommit = false;
diff --git a/src/mongo/db/repl/local_oplog_info.h b/src/mongo/db/repl/local_oplog_info.h
index 67ab7e0560d..96cdb259f36 100644
--- a/src/mongo/db/repl/local_oplog_info.h
+++ b/src/mongo/db/repl/local_oplog_info.h
@@ -92,7 +92,7 @@ private:
// Synchronizes the section where a new Timestamp is generated and when it is registered in the
// storage engine.
- mutable stdx::mutex _newOpMutex;
+ mutable Mutex _newOpMutex = MONGO_MAKE_LATCH("LocaloplogInfo::_newOpMutex");
};
} // namespace repl
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index 99f09fa2484..02c993a0e67 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -60,7 +60,7 @@ MultiApplier::~MultiApplier() {
}
bool MultiApplier::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -69,7 +69,7 @@ bool MultiApplier::_isActive_inlock() const {
}
Status MultiApplier::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
@@ -96,7 +96,7 @@ Status MultiApplier::startup() noexcept {
}
void MultiApplier::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -117,12 +117,12 @@ void MultiApplier::shutdown() {
}
void MultiApplier::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
MultiApplier::State MultiApplier::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
@@ -153,14 +153,14 @@ void MultiApplier::_finishCallback(const Status& result) {
// destroyed outside the lock.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_onCompletion);
std::swap(_onCompletion, onCompletion);
}
onCompletion(result);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(State::kComplete != _state);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/multiapplier.h b/src/mongo/db/repl/multiapplier.h
index 87d34964c2f..e550316ec8e 100644
--- a/src/mongo/db/repl/multiapplier.h
+++ b/src/mongo/db/repl/multiapplier.h
@@ -42,9 +42,9 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
namespace repl {
@@ -149,7 +149,7 @@ private:
CallbackFn _onCompletion;
// Protects member data of this MultiApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiApplier::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index 8c0fbfaa6b9..ac0c19bcceb 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -70,7 +70,7 @@ public:
: _thread([this, noopWrite, waitTime] { run(waitTime, std::move(noopWrite)); }) {}
~PeriodicNoopRunner() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
lk.unlock();
@@ -84,7 +84,7 @@ private:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_cv.wait_for(lk, waitTime.toSystemDuration(), [&] { return _inShutdown; });
@@ -103,7 +103,7 @@ private:
/**
* Mutex for the CV
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicNoopRunner::_mutex");
/**
* CV to wait for.
@@ -126,7 +126,7 @@ NoopWriter::~NoopWriter() {
}
Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastKnownOpTime = lastKnownOpTime;
invariant(!_noopRunner);
@@ -139,7 +139,7 @@ Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
}
void NoopWriter::stopWritingPeriodicNoops() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_noopRunner.reset();
}
diff --git a/src/mongo/db/repl/noop_writer.h b/src/mongo/db/repl/noop_writer.h
index 07f664668d0..4d01a8bd18c 100644
--- a/src/mongo/db/repl/noop_writer.h
+++ b/src/mongo/db/repl/noop_writer.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -74,7 +74,7 @@ private:
* Protects member data of this class during start and stop. There is no need to synchronize
* access once its running because its run by a one thread only.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("NoopWriter::_mutex");
std::unique_ptr<PeriodicNoopRunner> _noopRunner;
};
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index 2a1c71e9d77..e752aaa3d9e 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -107,12 +107,12 @@ Future<void> OplogApplier::startup() {
void OplogApplier::shutdown() {
_shutdown();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool OplogApplier::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/oplog_applier.h b/src/mongo/db/repl/oplog_applier.h
index 7752554cfbd..9a3a346a9d0 100644
--- a/src/mongo/db/repl/oplog_applier.h
+++ b/src/mongo/db/repl/oplog_applier.h
@@ -40,7 +40,7 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -244,7 +244,7 @@ private:
Observer* const _observer;
// Protects member data of OplogApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogApplier::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index cfea973d17d..2e0736c82d1 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -106,7 +106,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If we are starting from an existing collection, we must populate the in memory state of the
// buffer.
auto sizeResult = _storageInterface->getCollectionSize(opCtx, _nss);
@@ -148,7 +148,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
void OplogBufferCollection::shutdown(OperationContext* opCtx) {
if (_options.dropCollectionAtShutdown) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_size = 0;
_count = 0;
@@ -176,7 +176,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* opCtx,
}
size_t numDocs = std::distance(begin, end);
std::vector<InsertStatement> docsToInsert(numDocs);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto ts = _lastPushedTimestamp;
auto sentinelCount = _sentinelCount;
std::transform(begin, end, docsToInsert.begin(), [&sentinelCount, &ts](const Value& value) {
@@ -202,7 +202,7 @@ void OplogBufferCollection::pushAllNonBlocking(OperationContext* opCtx,
void OplogBufferCollection::waitForSpace(OperationContext* opCtx, std::size_t size) {}
bool OplogBufferCollection::isEmpty() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count == 0;
}
@@ -211,17 +211,17 @@ std::size_t OplogBufferCollection::getMaxSize() const {
}
std::size_t OplogBufferCollection::getSize() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _size;
}
std::size_t OplogBufferCollection::getCount() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count;
}
void OplogBufferCollection::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_createCollection(opCtx);
_size = 0;
@@ -233,7 +233,7 @@ void OplogBufferCollection::clear(OperationContext* opCtx) {
}
bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -241,7 +241,7 @@ bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
}
bool OplogBufferCollection::waitForData(Seconds waitDuration) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_cvNoLongerEmpty.wait_for(
lk, waitDuration.toSystemDuration(), [&]() { return _count != 0; })) {
return false;
@@ -250,7 +250,7 @@ bool OplogBufferCollection::waitForData(Seconds waitDuration) {
}
bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -260,7 +260,7 @@ bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto lastDocumentPushed = _lastDocumentPushed_inlock(opCtx);
if (lastDocumentPushed) {
BSONObj entryObj = extractEmbeddedOplogDocument(*lastDocumentPushed);
@@ -365,23 +365,23 @@ void OplogBufferCollection::_dropCollection(OperationContext* opCtx) {
}
std::size_t OplogBufferCollection::getSentinelCount_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _sentinelCount;
}
Timestamp OplogBufferCollection::getLastPushedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPushedTimestamp;
}
Timestamp OplogBufferCollection::getLastPoppedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPoppedKey.isEmpty() ? Timestamp()
: _lastPoppedKey[""].Obj()[kTimestampFieldName].timestamp();
}
std::queue<BSONObj> OplogBufferCollection::getPeekCache_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _peekCache;
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h
index b6ef88eb734..cc6af3a96cc 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.h
+++ b/src/mongo/db/repl/oplog_buffer_collection.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/queue.h"
namespace mongo {
@@ -183,7 +183,7 @@ private:
stdx::condition_variable _cvNoLongerEmpty;
// Protects member data below and synchronizes it with the underlying collection.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogBufferCollection::_mutex");
// Number of documents in buffer.
std::size_t _count = 0;
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp
index 1a339644c91..30b2f9e785a 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.cpp
+++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp
@@ -51,8 +51,8 @@ void OplogBufferProxy::startup(OperationContext* opCtx) {
void OplogBufferProxy::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
}
@@ -60,13 +60,13 @@ void OplogBufferProxy::shutdown(OperationContext* opCtx) {
}
void OplogBufferProxy::pushEvenIfFull(OperationContext* opCtx, const Value& value) {
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
_lastPushed = value;
_target->pushEvenIfFull(opCtx, value);
}
void OplogBufferProxy::push(OperationContext* opCtx, const Value& value) {
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
_lastPushed = value;
_target->push(opCtx, value);
}
@@ -77,7 +77,7 @@ void OplogBufferProxy::pushAllNonBlocking(OperationContext* opCtx,
if (begin == end) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
_lastPushed = *(end - 1);
_target->pushAllNonBlocking(opCtx, begin, end);
}
@@ -103,16 +103,16 @@ std::size_t OplogBufferProxy::getCount() const {
}
void OplogBufferProxy::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
_target->clear(opCtx);
}
bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
if (!_target->tryPop(opCtx, value)) {
return false;
}
@@ -126,7 +126,7 @@ bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
bool OplogBufferProxy::waitForData(Seconds waitDuration) {
{
- stdx::unique_lock<stdx::mutex> lk(_lastPushedMutex);
+ stdx::unique_lock<Latch> lk(_lastPushedMutex);
if (_lastPushed) {
return true;
}
@@ -135,7 +135,7 @@ bool OplogBufferProxy::waitForData(Seconds waitDuration) {
}
bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
if (_lastPeeked) {
*value = *_lastPeeked;
return true;
@@ -149,7 +149,7 @@ bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
if (!_lastPushed) {
return boost::none;
}
@@ -157,7 +157,7 @@ boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
}
boost::optional<OplogBuffer::Value> OplogBufferProxy::getLastPeeked_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
return _lastPeeked;
}
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h
index 544b5b6739f..7ef7537225b 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.h
+++ b/src/mongo/db/repl/oplog_buffer_proxy.h
@@ -33,7 +33,7 @@
#include <memory>
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -82,10 +82,10 @@ private:
std::unique_ptr<OplogBuffer> _target;
// If both mutexes have to be acquired, acquire _lastPushedMutex first.
- mutable stdx::mutex _lastPushedMutex;
+ mutable Mutex _lastPushedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPushedMutex");
boost::optional<Value> _lastPushed;
- mutable stdx::mutex _lastPeekedMutex;
+ mutable Mutex _lastPeekedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPeekedMutex");
boost::optional<Value> _lastPeeked;
};
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index 00f76f96c4d..0715f665f8c 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -44,8 +44,8 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -171,7 +171,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
// Run 2 concurrent logOp() requests using the thread pool.
// Use a barrier with a thread count of 3 to ensure both logOp() tasks are complete before this
// test thread can proceed with shutting the thread pool down.
- stdx::mutex mtx;
+ auto mtx = MONGO_MAKE_LATCH();
unittest::Barrier barrier(3U);
const NamespaceString nss1("test1.coll");
const NamespaceString nss2("test2.coll");
@@ -206,7 +206,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
std::reverse(oplogEntries->begin(), oplogEntries->end());
// Look up namespaces and their respective optimes (returned by logOp()) in the map.
- stdx::lock_guard<stdx::mutex> lock(mtx);
+ stdx::lock_guard<Latch> lock(mtx);
ASSERT_EQUALS(2U, opTimeNssMap->size());
}
@@ -216,10 +216,10 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
* Returns optime of generated oplog entry.
*/
OpTime _logOpNoopWithMsg(OperationContext* opCtx,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
// logOp() must be called while holding lock because ephemeralForTest storage engine does not
// support concurrent updates to its internal state.
@@ -252,7 +252,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithoutDocLockingSupport) {
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -285,7 +285,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupport) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -317,7 +317,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -335,7 +335,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
// Revert the first logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the more recent optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto firstOpTimeAndNss = *(opTimeNssMap->cbegin());
if (opTime == firstOpTimeAndNss.first) {
ASSERT_EQUALS(nss, firstOpTimeAndNss.second)
@@ -364,7 +364,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -382,7 +382,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
// Revert the last logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the earlier optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto lastOpTimeAndNss = *(opTimeNssMap->crbegin());
if (opTime == lastOpTimeAndNss.first) {
ASSERT_EQUALS(nss, lastOpTimeAndNss.second)
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.cpp b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
index 61f46bf0bef..5c698190445 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
@@ -36,12 +36,12 @@ namespace repl {
void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
{
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = {};
_oplogTruncateAfterPoint = {};
_appliedThrough = {};
@@ -49,64 +49,64 @@ void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationCont
}
bool ReplicationConsistencyMarkersMock::getInitialSyncFlag(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
return _initialSyncFlag;
}
void ReplicationConsistencyMarkersMock::setInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = true;
}
void ReplicationConsistencyMarkersMock::clearInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
OpTime ReplicationConsistencyMarkersMock::getMinValid(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _minValid;
}
void ReplicationConsistencyMarkersMock::setMinValid(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = minValid;
}
void ReplicationConsistencyMarkersMock::setMinValidToAtLeast(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = std::max(_minValid, minValid);
}
void ReplicationConsistencyMarkersMock::setOplogTruncateAfterPoint(OperationContext* opCtx,
const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_oplogTruncateAfterPoint = timestamp;
}
Timestamp ReplicationConsistencyMarkersMock::getOplogTruncateAfterPoint(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _oplogTruncateAfterPoint;
}
void ReplicationConsistencyMarkersMock::setAppliedThrough(OperationContext* opCtx,
const OpTime& optime,
bool setTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = optime;
}
void ReplicationConsistencyMarkersMock::clearAppliedThrough(OperationContext* opCtx,
const Timestamp& writeTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = {};
}
OpTime ReplicationConsistencyMarkersMock::getAppliedThrough(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _appliedThrough;
}
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.h b/src/mongo/db/repl/replication_consistency_markers_mock.h
index 3215264110f..3fe3c2670f5 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.h
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.h
@@ -31,7 +31,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -73,10 +73,12 @@ public:
Status createInternalCollections(OperationContext* opCtx) override;
private:
- mutable stdx::mutex _initialSyncFlagMutex;
+ mutable Mutex _initialSyncFlagMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_initialSyncFlagMutex");
bool _initialSyncFlag = false;
- mutable stdx::mutex _minValidBoundariesMutex;
+ mutable Mutex _minValidBoundariesMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_minValidBoundariesMutex");
OpTime _appliedThrough;
OpTime _minValid;
Timestamp _oplogTruncateAfterPoint;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 54cf29caf20..6abca36f5d6 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -205,7 +205,7 @@ bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationCont
void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
OperationContext* opCtx, ReplicationCoordinator* replCoord) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
// We've shut down the external state, don't start again.
if (_inShutdown)
@@ -256,12 +256,12 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
}
void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_stopDataReplication_inlock(opCtx, lk);
}
void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
- OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock) {
+ OperationContext* opCtx, stdx::unique_lock<Latch>& lock) {
// Make sue no other _stopDataReplication calls are in progress.
_dataReplicationStopped.wait(lock, [this]() { return !_stoppingDataReplication; });
_stoppingDataReplication = true;
@@ -316,7 +316,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& settings) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_startedThreads) {
return;
}
@@ -339,7 +339,7 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s
}
void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_inShutdown = true;
if (!_startedThreads) {
return;
@@ -826,21 +826,21 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
}
void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->clearSyncTarget();
}
}
void ReplicationCoordinatorExternalStateImpl::stopProducer() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->stop(false);
}
}
void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->startProducerIfStopped();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index caacf96c068..8b984b454e5 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -39,7 +39,7 @@
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/snapshot_manager.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -120,7 +120,7 @@ private:
/**
* Stops data replication and returns with 'lock' locked.
*/
- void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock);
+ void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<Latch>& lock);
/**
* Called when the instance transitions to primary in order to notify a potentially sharded host
@@ -141,7 +141,7 @@ private:
ServiceContext* _service;
// Guards starting threads and setting _startedThreads
- stdx::mutex _threadMutex;
+ Mutex _threadMutex = MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_threadMutex");
// Flag for guarding against concurrent data replication stopping.
bool _stoppingDataReplication = false;
@@ -187,7 +187,8 @@ private:
Future<void> _oplogApplierShutdownFuture;
// Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- stdx::mutex _nextThreadIdMutex;
+ Mutex _nextThreadIdMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_nextThreadIdMutex");
// Number used to uniquely name threads.
long long _nextThreadId = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index d7cb605e61a..c742ac33db2 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -145,7 +145,7 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteD
Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
OperationContext* opCtx, const LastVote& lastVote) {
{
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
_shouldHangLastVoteCondVar.wait(lock);
}
@@ -210,7 +210,7 @@ void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentStatu
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
_storeLocalLastVoteDocumentShouldHang = hang;
if (!hang) {
_shouldHangLastVoteCondVar.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 772cab29b66..8676aaa8c14 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -37,8 +37,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/last_vote.h"
#include "mongo/db/repl/replication_coordinator_external_state.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -196,7 +196,8 @@ private:
Status _storeLocalConfigDocumentStatus;
Status _storeLocalLastVoteDocumentStatus;
// mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- stdx::mutex _shouldHangLastVoteMutex;
+ Mutex _shouldHangLastVoteMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateMock::_shouldHangLastVoteMutex");
stdx::condition_variable _shouldHangLastVoteCondVar;
bool _storeLocalLastVoteDocumentShouldHang;
bool _connectionsClosed;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 7354e5e5ede..060e425238b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -81,10 +81,10 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/executor/connection_pool_stats.h"
#include "mongo/executor/network_interface.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/oplog_query_metadata.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -149,7 +149,7 @@ private:
const bool _initialState;
};
-void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const stdx::function<void()>& fn) {
+void lockAndCall(stdx::unique_lock<Latch>* lk, const stdx::function<void()>& fn) {
if (!lk->owns_lock()) {
lk->lock();
}
@@ -228,7 +228,7 @@ public:
* _list is guarded by ReplicationCoordinatorImpl::_mutex, thus it is illegal to construct one
* of these without holding _mutex
*/
- WaiterGuard(const stdx::unique_lock<stdx::mutex>& lock, WaiterList* list, Waiter* waiter)
+ WaiterGuard(const stdx::unique_lock<Latch>& lock, WaiterList* list, Waiter* waiter)
: _lock(lock), _list(list), _waiter(waiter) {
invariant(_lock.owns_lock());
list->add_inlock(_waiter);
@@ -240,7 +240,7 @@ public:
}
private:
- const stdx::unique_lock<stdx::mutex>& _lock;
+ const stdx::unique_lock<Latch>& _lock;
WaiterList* _list;
Waiter* _waiter;
};
@@ -370,7 +370,7 @@ void ReplicationCoordinatorImpl::waitForStartUpComplete_forTest() {
void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
CallbackHandle handle;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
}
@@ -382,12 +382,12 @@ void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
}
ReplSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _rsConfig;
}
Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_handleElectionTimeoutCbh.isValid()) {
return Date_t();
}
@@ -395,12 +395,12 @@ Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
}
Milliseconds ReplicationCoordinatorImpl::getRandomizedElectionOffset_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getRandomizedElectionOffset_inlock();
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_priorityTakeoverCbh.isValid()) {
return boost::none;
}
@@ -408,7 +408,7 @@ boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest(
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getCatchupTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_catchupTakeoverCbh.isValid()) {
return boost::none;
}
@@ -421,12 +421,12 @@ executor::TaskExecutor::CallbackHandle ReplicationCoordinatorImpl::getCatchupTak
}
OpTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTimeAndWallTime_inlock();
}
@@ -477,7 +477,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
log() << "Did not find local initialized voted for document at startup.";
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->loadLastVote(lastVote.getValue());
}
@@ -538,7 +538,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
handle = CallbackHandle{};
}
fassert(40446, handle);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_finishLoadLocalConfigCbh = std::move(handle.getValue());
return false;
@@ -638,7 +638,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), lastOpTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_rsConfigState == kConfigStartingUp);
const PostMemberStateUpdateAction action =
_setCurrentRSConfig(lock, opCtx.get(), localConfig, myIndex.getValue());
@@ -655,7 +655,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Step down is impossible, so we don't need to wait for the returned event.
_updateTerm_inlock(term);
}
@@ -671,7 +671,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* opCtx) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_initialSyncer.swap(initialSyncerCopy);
}
if (initialSyncerCopy) {
@@ -713,7 +713,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
auto onCompletion = [this, startCompleted](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (opTimeStatus == ErrorCodes::CallbackCanceled) {
log() << "Initial Sync has been cancelled: " << opTimeStatus.getStatus();
return;
@@ -754,7 +754,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
try {
{
// Must take the lock to set _initialSyncer, but not call it.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
log() << "Initial Sync not starting because replication is shutting down.";
return;
@@ -811,7 +811,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
storageGlobalParams.readOnly = true;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
return;
}
@@ -819,7 +819,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
invariant(!ReplSettings::shouldRecoverFromOplogAsStandalone());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
fassert(18822, !_inShutdown);
_setConfigState_inlock(kConfigStartingUp);
_topCoord->setStorageEngineSupportsReadCommitted(
@@ -835,7 +835,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigUninitialized);
}
@@ -861,7 +861,7 @@ void ReplicationCoordinatorImpl::shutdown(OperationContext* opCtx) {
// Used to shut down outside of the lock.
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
fassert(28533, !_inShutdown);
_inShutdown = true;
if (_rsConfigState == kConfigPreStart) {
@@ -909,12 +909,12 @@ ReplicationCoordinator::Mode ReplicationCoordinatorImpl::getReplicationMode() co
}
MemberState ReplicationCoordinatorImpl::getMemberState() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getMemberState_inlock();
}
std::vector<MemberData> ReplicationCoordinatorImpl::getMemberData() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMemberData();
}
@@ -928,7 +928,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -940,7 +940,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_rsConfig.isInitialized());
if (_selfIndex == -1) {
// We aren't currently in the set. Return 0 seconds so we can clear out the applier's
@@ -951,7 +951,7 @@ Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
}
void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->clearSyncSourceBlacklist();
}
@@ -968,7 +968,7 @@ Status ReplicationCoordinatorImpl::setFollowerMode(const MemberState& newState)
Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
const MemberState& newState) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (newState == _topCoord->getMemberState()) {
return Status::OK();
}
@@ -999,7 +999,7 @@ Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
}
ReplicationCoordinator::ApplierState ReplicationCoordinatorImpl::getApplierState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _applierState;
}
@@ -1031,7 +1031,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// When we go to drop all temp collections, we must replicate the drops.
invariant(opCtx->writesAreReplicated());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_applierState != ApplierState::Draining) {
return;
}
@@ -1092,7 +1092,7 @@ Status ReplicationCoordinatorImpl::waitForDrainFinish(Milliseconds timeout) {
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this]() { return _applierState != ApplierState::Draining; };
if (!_drainFinishedCond.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -1107,7 +1107,7 @@ void ReplicationCoordinatorImpl::signalUpstreamUpdater() {
}
void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_topCoord->setMyHeartbeatMessage(_replExecutor->now(), msg);
}
@@ -1118,7 +1118,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
const auto opTime = opTimeAndWallTime.opTime;
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto myLastAppliedOpTime = _getMyLastAppliedOpTime_inlock();
if (opTime > myLastAppliedOpTime) {
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, consistency);
@@ -1144,7 +1144,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opTimeAndWallTime.opTime > _getMyLastDurableOpTime_inlock()) {
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
@@ -1158,7 +1158,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// The optime passed to this function is required to represent a consistent database state.
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, DataConsistency::Consistent);
_reportUpstream_inlock(std::move(lock));
@@ -1166,13 +1166,13 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
}
void ReplicationCoordinatorImpl::resetMyLastOpTimes() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_resetMyLastOpTimes(lock);
_reportUpstream_inlock(std::move(lock));
}
@@ -1187,7 +1187,7 @@ void ReplicationCoordinatorImpl::_resetMyLastOpTimes(WithLock lk) {
_stableOpTimeCandidates.clear();
}
-void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock) {
+void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
if (getReplicationMode() != modeReplSet) {
@@ -1274,22 +1274,22 @@ void ReplicationCoordinatorImpl::_setMyLastDurableOpTimeAndWallTime(
}
OpTime ReplicationCoordinatorImpl::getMyLastAppliedOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastAppliedOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTimeAndWallTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastDurableOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTimeAndWallTime_inlock();
}
OpTime ReplicationCoordinatorImpl::getMyLastDurableOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTime_inlock();
}
@@ -1396,7 +1396,7 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (isMajorityCommittedRead && !_externalState->snapshotsEnabled()) {
return {ErrorCodes::CommandNotSupported,
@@ -1563,7 +1563,7 @@ Status ReplicationCoordinatorImpl::setLastDurableOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1582,7 +1582,7 @@ Status ReplicationCoordinatorImpl::setLastAppliedOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1682,7 +1682,7 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli
OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
Timer timer;
WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _awaitReplication_inlock(&lock, opCtx, opTime, fixedWriteConcern);
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
@@ -1705,7 +1705,7 @@ BSONObj ReplicationCoordinatorImpl::_getReplicationProgress(WithLock wl) const {
return progress.obj();
}
Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
- stdx::unique_lock<stdx::mutex>* lock,
+ stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
@@ -1825,7 +1825,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
void ReplicationCoordinatorImpl::waitForStepDownAttempt_forTest() {
auto isSteppingDown = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If true, we know that a stepdown is underway.
return (_topCoord->isSteppingDown());
};
@@ -1924,7 +1924,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn()
// X mode for the first time. This ensures that no writing operations will continue
// after the node's term change.
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stopKillingOps.wait_for(
lock, Milliseconds(10).toSystemDuration(), [this] { return _killSignaled; })) {
log() << "Stopped killing user operations";
@@ -1940,7 +1940,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_stopAndWaitForKi
return;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_killSignaled = true;
_stopKillingOps.notify_all();
}
@@ -2000,7 +2000,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
auto deadline = force ? stepDownUntil : waitUntil;
AutoGetRstlForStepUpStepDown arsd(this, opCtx, deadline);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
opCtx->checkForInterrupt();
@@ -2033,7 +2033,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
while (MONGO_FAIL_POINT(stepdownHangBeforePerformingPostMemberStateUpdateActions)) {
mongo::sleepsecs(1);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
}
@@ -2139,7 +2139,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
}
void ReplicationCoordinatorImpl::_performElectionHandoff() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto candidateIndex = _topCoord->chooseElectionHandoffCandidate();
if (candidateIndex < 0) {
@@ -2187,7 +2187,7 @@ bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
return _getMemberState_inlock().primary();
}
@@ -2216,7 +2216,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont
}
bool ReplicationCoordinatorImpl::canAcceptNonLocalWrites() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _readWriteAbility->canAcceptNonLocalWrites(lk);
}
@@ -2248,7 +2248,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opC
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_memberState.rollback()) {
return false;
}
@@ -2276,7 +2276,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext
// Oplog reads are not allowed during STARTUP state, but we make an exception for internal
// reads. Internal reads are required for cleaning up unfinished apply batches.
if (!isPrimaryOrSecondary && getReplicationMode() == modeReplSet && ns.isOplog()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if ((_memberState.startup() && client->isFromUserConnection()) || _memberState.startup2() ||
_memberState.rollback()) {
return Status{ErrorCodes::NotMasterOrSecondary,
@@ -2320,17 +2320,17 @@ bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* o
}
OID ReplicationCoordinatorImpl::getElectionId() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _electionId;
}
int ReplicationCoordinatorImpl::getMyId() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyId_inlock();
}
HostAndPort ReplicationCoordinatorImpl::getMyHostAndPort() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.getMemberAt(_selfIndex).getHostAndPort();
}
@@ -2347,7 +2347,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
f = [&finishedEvent, this]() { _replExecutor->signalEvent(finishedEvent); };
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_resetMyLastOpTimes(lk);
}
// unlock before calling _startDataReplication().
@@ -2359,7 +2359,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
}
StatusWith<BSONObj> ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareReplSetUpdatePositionCommand(
_getCurrentCommittedSnapshotOpTime_inlock());
}
@@ -2371,7 +2371,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
if (responseStyle == ReplSetGetStatusResponseStyle::kInitialSync) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
initialSyncerCopy = _initialSyncer;
}
@@ -2388,7 +2388,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
BSONObj electionParticipantMetrics =
ReplicationMetrics::get(getServiceContext()).getElectionParticipantMetricsBSON();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Status result(ErrorCodes::InternalError, "didn't set status in prepareStatusResponse");
_topCoord->prepareStatusResponse(
TopologyCoordinator::ReplSetStatusArgs{
@@ -2409,7 +2409,7 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
IsMasterResponse* response, const SplitHorizon::Parameters& horizonParams) {
invariant(getSettings().usingReplSets());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->fillIsMasterForReplSet(response, horizonParams);
OpTime lastOpTime = _getMyLastAppliedOpTime_inlock();
@@ -2432,17 +2432,17 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
}
void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->fillMemberData(result);
}
ReplSetConfig ReplicationCoordinatorImpl::getConfig() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig;
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
result->append("config", _rsConfig.toBSON());
}
@@ -2450,7 +2450,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
EventHandle evh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
evh = _processReplSetMetadata_inlock(replMetadata);
}
@@ -2460,7 +2460,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
}
void ReplicationCoordinatorImpl::cancelAndRescheduleElectionTimeout() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelAndRescheduleElectionTimeout_inlock();
}
@@ -2473,7 +2473,7 @@ EventHandle ReplicationCoordinatorImpl::_processReplSetMetadata_inlock(
}
bool ReplicationCoordinatorImpl::getMaintenanceMode() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMaintenanceCount() > 0;
}
@@ -2483,7 +2483,7 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
"can only set maintenance mode on replica set members");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_topCoord->getRole() == TopologyCoordinator::Role::kCandidate) {
return Status(ErrorCodes::NotSecondary, "currently running for election");
}
@@ -2522,7 +2522,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
auto doResync = false;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->prepareSyncFromResponse(target, resultObj, &result);
// If we are in the middle of an initial sync, do a resync.
doResync = result.isOK() && _initialSyncer && _initialSyncer->isActive();
@@ -2537,7 +2537,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
auto result = [=]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj);
}();
if (!result.isOK()) {
@@ -2560,7 +2560,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
log() << "replSetReconfig admin command received from client; new config: "
<< args.newConfigObj;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
@@ -2665,7 +2665,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
executor::TaskExecutor::EventHandle electionFinishedEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
electionFinishedEvent = _cancelElectionIfNeeded_inlock();
}
@@ -2680,7 +2680,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
}
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (isForceReconfig && _shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -2739,7 +2739,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
log() << "replSetInitiate admin command received from client";
const auto replEnabled = _settings.usingReplSets();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!replEnabled) {
return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
}
@@ -2828,7 +2828,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
void ReplicationCoordinatorImpl::_finishReplSetInitiate(OperationContext* opCtx,
const ReplSetConfig& newConfig,
int myIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_rsConfigState == kConfigInitiating);
invariant(!_rsConfig.isInitialized());
auto action = _setCurrentRSConfig(lk, opCtx, newConfig, myIndex);
@@ -3059,7 +3059,7 @@ void ReplicationCoordinatorImpl::CatchupState::start_inlock() {
if (!cbData.status.isOK()) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(*mutex);
+ stdx::lock_guard<Latch> lk(*mutex);
// Check whether the callback has been cancelled while holding mutex.
if (cbData.myHandle.isCanceled()) {
return;
@@ -3169,7 +3169,7 @@ void ReplicationCoordinatorImpl::CatchupState::incrementNumCatchUpOps_inlock(lon
}
Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->abort_inlock(reason);
return Status::OK();
@@ -3178,14 +3178,14 @@ Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusion
}
void ReplicationCoordinatorImpl::incrementNumCatchUpOpsIfCatchingUp(long numOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->incrementNumCatchUpOps_inlock(numOps);
}
}
void ReplicationCoordinatorImpl::signalDropPendingCollectionsRemovedFromStorage() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_wakeReadyWaiters(lock);
}
@@ -3302,7 +3302,7 @@ void ReplicationCoordinatorImpl::_wakeReadyWaiters(WithLock lk) {
Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePositionArgs& updates,
long long* configVersion) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
Status status = Status::OK();
bool somethingChanged = false;
for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
@@ -3324,7 +3324,7 @@ Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePosi
}
bool ReplicationCoordinatorImpl::buildsIndexes() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_selfIndex == -1) {
return true;
}
@@ -3334,12 +3334,12 @@ bool ReplicationCoordinatorImpl::buildsIndexes() {
std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op,
bool durablyWritten) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getHostsWrittenTo(op, durablyWritten);
}
std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
@@ -3358,7 +3358,7 @@ std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() co
Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
const WriteConcernOptions& writeConcern) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
@@ -3375,7 +3375,7 @@ Status ReplicationCoordinatorImpl::_checkIfWriteConcernCanBeSatisfied_inlock(
Status ReplicationCoordinatorImpl::checkIfCommitQuorumCanBeSatisfied(
const CommitQuorumOptions& commitQuorum) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
}
@@ -3408,7 +3408,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
// If the 'commitQuorum' cannot be satisfied with all the members of this replica set, we
// need to inform the caller to avoid hanging while waiting for satisfiability of the
// 'commitQuorum' with 'commitReadyMembers' due to replica set reconfigurations.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status status = _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
if (!status.isOK()) {
return status;
@@ -3419,7 +3419,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
@@ -3447,7 +3447,7 @@ bool ReplicationCoordinatorImpl::isReplEnabled() const {
}
HostAndPort ReplicationCoordinatorImpl::chooseNewSyncSource(const OpTime& lastOpTimeFetched) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
HostAndPort oldSyncSource = _topCoord->getSyncSourceAddress();
// Always allow chaining while in catchup and drain mode.
@@ -3472,12 +3472,12 @@ void ReplicationCoordinatorImpl::_unblacklistSyncSource(
if (cbData.status == ErrorCodes::CallbackCanceled)
return;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->unblacklistSyncSource(host, _replExecutor->now());
}
void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->blacklistSyncSource(host, until);
_scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
_unblacklistSyncSource(cbData, host);
@@ -3501,7 +3501,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opC
_externalState->setGlobalTimestamp(opCtx->getServiceContext(),
lastOpTimeAndWallTime.opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
bool isRollbackAllowed = true;
_setMyLastAppliedOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed, consistency);
_setMyLastDurableOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed);
@@ -3512,7 +3512,7 @@ bool ReplicationCoordinatorImpl::shouldChangeSyncSource(
const HostAndPort& currentSource,
const rpc::ReplSetMetadata& replMetadata,
boost::optional<rpc::OplogQueryMetadata> oqMetadata) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->shouldChangeSyncSource(
currentSource, replMetadata, oqMetadata, _replExecutor->now());
}
@@ -3608,7 +3608,7 @@ void ReplicationCoordinatorImpl::_cleanupStableOpTimeCandidates(
boost::optional<OpTimeAndWallTime>
ReplicationCoordinatorImpl::chooseStableOpTimeFromCandidates_forTest(
const std::set<OpTimeAndWallTime>& candidates, const OpTimeAndWallTime& maximumStableOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _chooseStableOpTimeFromCandidates(lk, candidates, maximumStableOpTime);
}
void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
@@ -3617,12 +3617,12 @@ void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
}
std::set<OpTimeAndWallTime> ReplicationCoordinatorImpl::getStableOpTimeCandidates_forTest() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stableOpTimeCandidates;
}
void ReplicationCoordinatorImpl::attemptToAdvanceStableTimestamp() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_setStableTimestampForStorage(lk);
}
@@ -3704,7 +3704,7 @@ void ReplicationCoordinatorImpl::_setStableTimestampForStorage(WithLock lk) {
void ReplicationCoordinatorImpl::advanceCommitPoint(
const OpTimeAndWallTime& committedOpTimeAndWallTime, bool fromSyncSource) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_advanceCommitPoint(lk, committedOpTimeAndWallTime, fromSyncSource);
}
@@ -3726,12 +3726,12 @@ void ReplicationCoordinatorImpl::_advanceCommitPoint(
}
OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTime();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getLastCommittedOpTimeAndWallTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTimeAndWallTime();
}
@@ -3745,7 +3745,7 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
return termStatus;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// We should only enter terminal shutdown from global terminal exit. In that case, rather
// than voting in a term we don't plan to stay alive in, refuse to vote.
@@ -3812,7 +3812,7 @@ void ReplicationCoordinatorImpl::prepareReplMetadata(const BSONObj& metadataRequ
invariant(-1 != rbid);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasReplSetMetadata) {
_prepareReplSetMetadata_inlock(lastOpTimeFromClient, builder);
@@ -3847,7 +3847,7 @@ bool ReplicationCoordinatorImpl::getWriteConcernMajorityShouldJournal_inlock() c
Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
@@ -3855,7 +3855,7 @@ Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgs
}
Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto senderHost(args.getSenderHost());
const Date_t now = _replExecutor->now();
@@ -3888,7 +3888,7 @@ long long ReplicationCoordinatorImpl::getTerm() {
EventHandle ReplicationCoordinatorImpl::updateTerm_forTest(
long long term, TopologyCoordinator::UpdateTermResult* updateResult) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
EventHandle finishEvh;
finishEvh = _updateTerm_inlock(term, updateResult);
@@ -3907,7 +3907,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long
EventHandle finishEvh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
finishEvh = _updateTerm_inlock(term, &updateTermResult);
}
@@ -3960,7 +3960,7 @@ EventHandle ReplicationCoordinatorImpl::_updateTerm_inlock(
void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* opCtx,
const Timestamp& untilSnapshot) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
uassert(ErrorCodes::NotYetInitialized,
"Cannot use snapshots until replica set is finished initializing.",
@@ -3976,7 +3976,7 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {
}
void ReplicationCoordinatorImpl::createWMajorityWriteAvailabilityDateWaiter(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto opTimeCB = [this, opTime]() {
ReplicationMetrics::get(getServiceContext())
.setWMajorityWriteAvailabilityDate(_replExecutor->now());
@@ -4022,7 +4022,7 @@ bool ReplicationCoordinatorImpl::_updateCommittedSnapshot(
}
void ReplicationCoordinatorImpl::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropAllSnapshots_inlock();
}
@@ -4068,7 +4068,7 @@ EventHandle ReplicationCoordinatorImpl::_makeEvent() {
WriteConcernOptions ReplicationCoordinatorImpl::populateUnsetWriteConcernOptionsSyncMode(
WriteConcernOptions wc) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _populateUnsetWriteConcernOptionsSyncMode(lock, wc);
}
@@ -4104,7 +4104,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
EventHandle finishEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
finishEvent = _electionFinishedEvent;
}
if (finishEvent.isValid()) {
@@ -4114,7 +4114,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
// Step up is considered successful only if we are currently a primary and we are not in the
// process of stepping down. If we know we are going to step down, we should fail the
// replSetStepUp command so caller can retry if necessary.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_getMemberState_inlock().primary())
return Status(ErrorCodes::CommandFailed, "Election failed.");
else if (_topCoord->isSteppingDown())
@@ -4137,7 +4137,7 @@ int64_t ReplicationCoordinatorImpl::_nextRandomInt64_inlock(int64_t limit) {
}
bool ReplicationCoordinatorImpl::setContainsArbiter() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.containsArbiter();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 503186a1e8e..07bdf226a80 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -569,7 +569,7 @@ private:
// Tracks number of operations left running on step down.
size_t _userOpsRunning = 0;
// Protects killSignaled and stopKillingOps cond. variable.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AutoGetRstlForStepUpStepDown::_mutex");
// Signals thread about the change of killSignaled value.
stdx::condition_variable _stopKillingOps;
// Once this is set to true, the killOpThreadFn method will terminate.
@@ -800,7 +800,7 @@ private:
* Helper method for _awaitReplication that takes an already locked unique_lock, but leaves
* operation timing to the caller.
*/
- Status _awaitReplication_inlock(stdx::unique_lock<stdx::mutex>* lock,
+ Status _awaitReplication_inlock(stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern);
@@ -852,7 +852,7 @@ private:
*
* Lock will be released after this method finishes.
*/
- void _reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock);
+ void _reportUpstream_inlock(stdx::unique_lock<Latch> lock);
/**
* Helpers to set the last applied and durable OpTime.
@@ -1135,10 +1135,10 @@ private:
*
* Requires "lock" to own _mutex, and returns the same unique_lock.
*/
- stdx::unique_lock<stdx::mutex> _handleHeartbeatResponseAction_inlock(
+ stdx::unique_lock<Latch> _handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock);
+ stdx::unique_lock<Latch> lock);
/**
* Updates the last committed OpTime to be 'committedOpTime' if it is more recent than the
@@ -1360,7 +1360,7 @@ private:
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationCoordinatorImpl::_mutex"); // (S)
// Handles to actively queued heartbeats.
HeartbeatHandles _heartbeatHandles; // (M)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 04f191ad81c..cebb98aef11 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_metrics.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -93,7 +93,7 @@ public:
};
void ReplicationCoordinatorImpl::_startElectSelfV1(StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_startElectSelfV1_inlock(reason);
}
@@ -181,7 +181,7 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(StartElectionReasonEnu
void ReplicationCoordinatorImpl::_processDryRunResult(long long originalTerm,
StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
invariant(_voteRequester);
@@ -285,7 +285,7 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection(
return _externalState->storeLocalLastVoteDocument(opCtx.get(), lastVote);
}();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
if (status == ErrorCodes::CallbackCanceled) {
return;
@@ -331,7 +331,7 @@ MONGO_FAIL_POINT_DEFINE(electionHangsBeforeUpdateMemberState);
void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long newTerm,
StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionGuardV1 lossGuard(this);
invariant(_voteRequester);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index faec4c34d41..5b2a7730e04 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -53,10 +53,10 @@
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -91,7 +91,7 @@ Milliseconds ReplicationCoordinatorImpl::_getRandomizedElectionOffset_inlock() {
void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::CallbackArgs cbData,
const HostAndPort& target,
int targetIndex) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_untrackHeartbeatHandle_inlock(cbData.myHandle);
if (cbData.status == ErrorCodes::CallbackCanceled) {
@@ -131,7 +131,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, int targetIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// remove handle from queued heartbeats
_untrackHeartbeatHandle_inlock(cbData.myHandle);
@@ -263,10 +263,10 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
_handleHeartbeatResponseAction_inlock(action, hbStatusResponse, std::move(lk));
}
-stdx::unique_lock<stdx::mutex> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
+stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock) {
+ stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
switch (action.getAction()) {
case HeartbeatResponseAction::NoAction:
@@ -391,7 +391,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
"Blocking until fail point is disabled.";
auto inShutdown = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _inShutdown;
};
@@ -406,7 +406,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
// have taken global lock in S mode and operations blocked on prepare conflict will be killed to
// avoid 3-way deadlock between read, prepared transaction and step down thread.
AutoGetRstlForStepUpStepDown arsd(this, opCtx.get());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// This node has already stepped down due to reconfig. So, signal anyone who is waiting on the
// step down event.
@@ -512,7 +512,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
_externalState.get(), newConfig, getGlobalServiceContext());
if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If this node absent in newConfig, and this node was not previously initialized,
// return to kConfigUninitialized immediately, rather than storing the config and
// transitioning into the RS_REMOVED state. See SERVER-15740.
@@ -538,7 +538,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
auto status = _externalState->storeLocalConfigDocument(opCtx.get(), newConfig.toBSON());
bool isFirstConfig;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
isFirstConfig = !_rsConfig.isInitialized();
if (!status.isOK()) {
error() << "Ignoring new configuration in heartbeat response because we failed to"
@@ -609,7 +609,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// we have already set our ReplicationCoordinatorImpl::_rsConfigState state to
// "kConfigReconfiguring" which prevents new elections from happening.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
LOG_FOR_HEARTBEATS(0)
<< "Waiting for election to complete before finishing reconfig to version "
@@ -628,7 +628,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
auto opCtx = cc().makeOperationContext();
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -755,7 +755,7 @@ void ReplicationCoordinatorImpl::_startHeartbeats_inlock() {
void ReplicationCoordinatorImpl::_handleLivenessTimeout(
const executor::TaskExecutor::CallbackArgs& cbData) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Only reset the callback handle if it matches, otherwise more will be coming through
if (cbData.myHandle == _handleLivenessTimeoutCbh) {
_handleLivenessTimeoutCbh = CallbackHandle();
@@ -878,7 +878,7 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
}
void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If it is not a single node replica set, no need to start an election after stepdown timeout.
if (reason == StartElectionReasonEnum::kSingleNodePromptElection &&
_rsConfig.getNumMembers() != 1) {
diff --git a/src/mongo/db/repl/replication_metrics.cpp b/src/mongo/db/repl/replication_metrics.cpp
index e7d55c50660..addb4f7027d 100644
--- a/src/mongo/db/repl/replication_metrics.cpp
+++ b/src/mongo/db/repl/replication_metrics.cpp
@@ -57,7 +57,7 @@ ReplicationMetrics::ReplicationMetrics()
ReplicationMetrics::~ReplicationMetrics() {}
void ReplicationMetrics::incrementNumElectionsCalledForReason(StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case StartElectionReasonEnum::kStepUpRequest:
case StartElectionReasonEnum::kStepUpRequestSkipDryRun: {
@@ -89,7 +89,7 @@ void ReplicationMetrics::incrementNumElectionsCalledForReason(StartElectionReaso
}
void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(StartElectionReasonEnum reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case StartElectionReasonEnum::kStepUpRequest:
case StartElectionReasonEnum::kStepUpRequestSkipDryRun: {
@@ -121,20 +121,20 @@ void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(StartElectionR
}
void ReplicationMetrics::incrementNumStepDownsCausedByHigherTerm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumStepDownsCausedByHigherTerm(
_electionMetrics.getNumStepDownsCausedByHigherTerm() + 1);
}
void ReplicationMetrics::incrementNumCatchUps() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumCatchUps(_electionMetrics.getNumCatchUps() + 1);
_updateAverageCatchUpOps(lk);
}
void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
ReplicationCoordinator::PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case ReplicationCoordinator::PrimaryCatchUpConclusionReason::kSucceeded:
_electionMetrics.setNumCatchUpsSucceeded(_electionMetrics.getNumCatchUpsSucceeded() +
@@ -167,97 +167,97 @@ void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
}
long ReplicationMetrics::getNumStepUpCmdsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getCalled();
}
long ReplicationMetrics::getNumPriorityTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getCalled();
}
long ReplicationMetrics::getNumCatchUpTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getCalled();
}
long ReplicationMetrics::getNumElectionTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getCalled();
}
long ReplicationMetrics::getNumFreezeTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getCalled();
}
long ReplicationMetrics::getNumStepUpCmdsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getSuccessful();
}
long ReplicationMetrics::getNumPriorityTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getSuccessful();
}
long ReplicationMetrics::getNumCatchUpTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getSuccessful();
}
long ReplicationMetrics::getNumElectionTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getSuccessful();
}
long ReplicationMetrics::getNumFreezeTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getSuccessful();
}
long ReplicationMetrics::getNumStepDownsCausedByHigherTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumStepDownsCausedByHigherTerm();
}
long ReplicationMetrics::getNumCatchUps_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUps();
}
long ReplicationMetrics::getNumCatchUpsSucceeded_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSucceeded();
}
long ReplicationMetrics::getNumCatchUpsAlreadyCaughtUp_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsAlreadyCaughtUp();
}
long ReplicationMetrics::getNumCatchUpsSkipped_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSkipped();
}
long ReplicationMetrics::getNumCatchUpsTimedOut_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsTimedOut();
}
long ReplicationMetrics::getNumCatchUpsFailedWithError_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithError();
}
long ReplicationMetrics::getNumCatchUpsFailedWithNewTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithNewTerm();
}
long ReplicationMetrics::getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd();
}
@@ -272,7 +272,7 @@ void ReplicationMetrics::setElectionCandidateMetrics(
const Milliseconds electionTimeout,
const boost::optional<int> priorPrimaryMemberId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_nodeIsCandidateOrPrimary = true;
_electionCandidateMetrics.setLastElectionReason(reason);
@@ -288,12 +288,12 @@ void ReplicationMetrics::setElectionCandidateMetrics(
}
void ReplicationMetrics::setTargetCatchupOpTime(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(opTime);
}
void ReplicationMetrics::setNumCatchUpOps(long numCatchUpOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(numCatchUpOps >= 0);
_electionCandidateMetrics.setNumCatchUpOps(numCatchUpOps);
_totalNumCatchUpOps += numCatchUpOps;
@@ -301,27 +301,27 @@ void ReplicationMetrics::setNumCatchUpOps(long numCatchUpOps) {
}
void ReplicationMetrics::setCandidateNewTermStartDate(Date_t newTermStartDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setNewTermStartDate(newTermStartDate);
}
void ReplicationMetrics::setWMajorityWriteAvailabilityDate(Date_t wMajorityWriteAvailabilityDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setWMajorityWriteAvailabilityDate(wMajorityWriteAvailabilityDate);
}
boost::optional<OpTime> ReplicationMetrics::getTargetCatchupOpTime_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionCandidateMetrics.getTargetCatchupOpTime();
}
BSONObj ReplicationMetrics::getElectionMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.toBSON();
}
BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_nodeIsCandidateOrPrimary) {
return _electionCandidateMetrics.toBSON();
}
@@ -329,7 +329,7 @@ BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
}
void ReplicationMetrics::clearElectionCandidateMetrics() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(boost::none);
_electionCandidateMetrics.setNumCatchUpOps(boost::none);
_electionCandidateMetrics.setNewTermStartDate(boost::none);
@@ -345,7 +345,7 @@ void ReplicationMetrics::setElectionParticipantMetrics(const bool votedForCandid
const OpTime lastAppliedOpTime,
const OpTime maxAppliedOpTimeInSet,
const double priorityAtElection) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_nodeHasVotedInElection = true;
_electionParticipantMetrics.setVotedForCandidate(votedForCandidate);
@@ -359,7 +359,7 @@ void ReplicationMetrics::setElectionParticipantMetrics(const bool votedForCandid
}
BSONObj ReplicationMetrics::getElectionParticipantMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_nodeHasVotedInElection) {
return _electionParticipantMetrics.toBSON();
}
@@ -368,13 +368,13 @@ BSONObj ReplicationMetrics::getElectionParticipantMetricsBSON() {
void ReplicationMetrics::setParticipantNewTermDates(Date_t newTermStartDate,
Date_t newTermAppliedDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionParticipantMetrics.setNewTermStartDate(newTermStartDate);
_electionParticipantMetrics.setNewTermAppliedDate(newTermAppliedDate);
}
void ReplicationMetrics::clearParticipantNewTermDates() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionParticipantMetrics.setNewTermStartDate(boost::none);
_electionParticipantMetrics.setNewTermAppliedDate(boost::none);
}
diff --git a/src/mongo/db/repl/replication_metrics.h b/src/mongo/db/repl/replication_metrics.h
index 59d27ace445..108510bbcd8 100644
--- a/src/mongo/db/repl/replication_metrics.h
+++ b/src/mongo/db/repl/replication_metrics.h
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_metrics_gen.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -125,7 +125,7 @@ private:
void _updateAverageCatchUpOps(WithLock lk);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationMetrics::_mutex");
ElectionMetrics _electionMetrics;
ElectionCandidateMetrics _electionCandidateMetrics;
ElectionParticipantMetrics _electionParticipantMetrics;
diff --git a/src/mongo/db/repl/replication_process.cpp b/src/mongo/db/repl/replication_process.cpp
index d3e77314cd3..117972289af 100644
--- a/src/mongo/db/repl/replication_process.cpp
+++ b/src/mongo/db/repl/replication_process.cpp
@@ -84,7 +84,7 @@ ReplicationProcess::ReplicationProcess(
_rbid(kUninitializedRollbackId) {}
Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rbidResult = _storageInterface->getRollbackID(opCtx);
if (!rbidResult.isOK()) {
@@ -102,7 +102,7 @@ Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
}
int ReplicationProcess::getRollbackID() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (kUninitializedRollbackId == _rbid) {
// This may happen when serverStatus is called by an internal client before we have a chance
// to read the rollback ID from storage.
@@ -112,7 +112,7 @@ int ReplicationProcess::getRollbackID() const {
}
Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(kUninitializedRollbackId == _rbid);
@@ -132,7 +132,7 @@ Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
}
Status ReplicationProcess::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _storageInterface->incrementRollbackID(opCtx);
diff --git a/src/mongo/db/repl/replication_process.h b/src/mongo/db/repl/replication_process.h
index 849ac7df8c4..82c298d363d 100644
--- a/src/mongo/db/repl/replication_process.h
+++ b/src/mongo/db/repl/replication_process.h
@@ -38,7 +38,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/replication_recovery.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -103,7 +103,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationProcess::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index c97746080e5..bf440816b12 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -63,47 +63,47 @@ const NamespaceString testNs("a.a");
class StorageInterfaceRecovery : public StorageInterfaceImpl {
public:
boost::optional<Timestamp> getRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _recoveryTimestamp;
}
void setRecoveryTimestamp(Timestamp recoveryTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoveryTimestamp = recoveryTimestamp;
}
bool supportsRecoverToStableTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoverToStableTimestamp;
}
void setSupportsRecoverToStableTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoverToStableTimestamp = supports;
}
bool supportsRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoveryTimestamp;
}
void setSupportsRecoveryTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoveryTimestamp = supports;
}
void setPointInTimeReadTimestamp(Timestamp pointInTimeReadTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_pointInTimeReadTimestamp = pointInTimeReadTimestamp;
}
Timestamp getPointInTimeReadTimestamp(OperationContext* opCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _pointInTimeReadTimestamp;
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRecovery::_mutex");
Timestamp _initialDataTimestamp = Timestamp::min();
boost::optional<Timestamp> _recoveryTimestamp = boost::none;
Timestamp _pointInTimeReadTimestamp = {};
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 6ad2390d3c0..451c00615f9 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -118,17 +118,17 @@ std::string Reporter::toString() const {
}
HostAndPort Reporter::getTarget() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _target;
}
Milliseconds Reporter::getKeepAliveInterval() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveInterval;
}
void Reporter::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_status = Status(ErrorCodes::CallbackCanceled, "Reporter no longer valid");
@@ -152,13 +152,13 @@ void Reporter::shutdown() {
}
Status Reporter::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
return _status;
}
Status Reporter::trigger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If these was a previous error then the reporter is dead and return that error.
if (!_status.isOK()) {
@@ -196,7 +196,7 @@ Status Reporter::trigger() {
StatusWith<BSONObj> Reporter::_prepareCommand() {
auto prepareResult = _prepareReplSetUpdatePositionCommandFn();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Reporter could have been canceled while preparing the command.
if (!_status.isOK()) {
@@ -239,7 +239,7 @@ void Reporter::_sendCommand_inlock(BSONObj commandRequest, Milliseconds netTimeo
void Reporter::_processResponseCallback(
const executor::TaskExecutor::RemoteCommandCallbackArgs& rcbd) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the reporter was shut down before this callback is invoked,
// return the canceled "_status".
@@ -299,7 +299,7 @@ void Reporter::_processResponseCallback(
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -318,7 +318,7 @@ void Reporter::_processResponseCallback(
void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::CallbackArgs& args,
bool fromTrigger) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -341,7 +341,7 @@ void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::Call
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -367,7 +367,7 @@ void Reporter::_onShutdown_inlock() {
}
bool Reporter::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -376,12 +376,12 @@ bool Reporter::_isActive_inlock() const {
}
bool Reporter::isWaitingToSendReport() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isWaitingToSendReporter;
}
Date_t Reporter::getKeepAliveTimeoutWhen_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveTimeoutWhen;
}
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index f332401ea2e..6e41083635a 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -34,9 +34,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -187,7 +187,7 @@ private:
const Milliseconds _updatePositionTimeout;
// Protects member data of this Reporter declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Reporter::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index cb5e57f6ae9..9089163aae5 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -33,14 +33,13 @@
#include "mongo/db/repl/rollback_checker.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
namespace repl {
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
RollbackChecker::RollbackChecker(executor::TaskExecutor* executor, HostAndPort syncSource)
: _executor(executor), _syncSource(syncSource), _baseRBID(-1), _lastRBID(-1) {
@@ -121,12 +120,12 @@ Status RollbackChecker::reset_sync() {
}
int RollbackChecker::getBaseRBID() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _baseRBID;
}
int RollbackChecker::getLastRBID_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastRBID;
}
diff --git a/src/mongo/db/repl/rollback_checker.h b/src/mongo/db/repl/rollback_checker.h
index ed589e57c7c..d1397cccae4 100644
--- a/src/mongo/db/repl/rollback_checker.h
+++ b/src/mongo/db/repl/rollback_checker.h
@@ -31,12 +31,11 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
-class Mutex;
-
/**
* The RollbackChecker maintains a sync source and its baseline rollback ID (rbid). It
* contains methods to check if a rollback occurred by checking if the rbid has changed since
@@ -119,7 +118,7 @@ private:
executor::TaskExecutor* const _executor;
// Protects member data of this RollbackChecker.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackChecker::_mutex");
// The sync source to check for rollbacks against.
HostAndPort _syncSource;
diff --git a/src/mongo/db/repl/rollback_checker_test.cpp b/src/mongo/db/repl/rollback_checker_test.cpp
index 21ff4dbd149..1dd737557d1 100644
--- a/src/mongo/db/repl/rollback_checker_test.cpp
+++ b/src/mongo/db/repl/rollback_checker_test.cpp
@@ -45,7 +45,7 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
class RollbackCheckerTest : public executor::ThreadPoolExecutorTest {
public:
@@ -57,7 +57,7 @@ protected:
std::unique_ptr<RollbackChecker> _rollbackChecker;
RollbackChecker::Result _hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
bool _hasCalledCallback;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackCheckerTest::_mutex");
};
void RollbackCheckerTest::setUp() {
@@ -65,7 +65,7 @@ void RollbackCheckerTest::setUp() {
launchExecutorThread();
getNet()->enterNetwork();
_rollbackChecker = stdx::make_unique<RollbackChecker>(&getExecutor(), HostAndPort());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
_hasCalledCallback = false;
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 73c484ec452..ae022c6e09e 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -264,12 +264,12 @@ Status RollbackImpl::runRollback(OperationContext* opCtx) {
}
void RollbackImpl::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool RollbackImpl::_isInShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 660231c4dbc..424b394fa95 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -448,7 +448,7 @@ private:
void _resetDropPendingState(OperationContext* opCtx);
// Guards access to member variables.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackImpl::_mutex"); // (S)
// Set to true when RollbackImpl should shut down.
bool _inShutdown = false; // (M)
diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h
index 8f03742bfd4..a8a1e1fd690 100644
--- a/src/mongo/db/repl/rollback_test_fixture.h
+++ b/src/mongo/db/repl/rollback_test_fixture.h
@@ -119,7 +119,7 @@ protected:
class RollbackTest::StorageInterfaceRollback : public StorageInterfaceImpl {
public:
void setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
@@ -129,7 +129,7 @@ public:
* of '_currTimestamp'.
*/
StatusWith<Timestamp> recoverToStableTimestamp(OperationContext* opCtx) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_recoverToTimestampStatus) {
return _recoverToTimestampStatus.get();
} else {
@@ -152,17 +152,17 @@ public:
}
void setRecoverToTimestampStatus(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoverToTimestampStatus = status;
}
void setCurrentTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_currTimestamp = ts;
}
Timestamp getCurrentTimestamp() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _currTimestamp;
}
@@ -172,7 +172,7 @@ public:
Status setCollectionCount(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID,
long long newCount) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_setCollectionCountStatus && _setCollectionCountStatusUUID &&
nsOrUUID.uuid() == _setCollectionCountStatusUUID) {
return *_setCollectionCountStatus;
@@ -182,18 +182,18 @@ public:
}
void setSetCollectionCountStatus(UUID uuid, Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_setCollectionCountStatus = status;
_setCollectionCountStatusUUID = uuid;
}
long long getFinalCollectionCount(const UUID& uuid) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _newCounts[uuid];
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRollback::_mutex");
Timestamp _stableTimestamp;
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index 77c87778308..aca7d774b50 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -46,7 +46,7 @@ namespace mongo {
namespace repl {
using executor::RemoteCommandRequest;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using CallbackHandle = executor::TaskExecutor::CallbackHandle;
using EventHandle = executor::TaskExecutor::EventHandle;
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
diff --git a/src/mongo/db/repl/scatter_gather_runner.h b/src/mongo/db/repl/scatter_gather_runner.h
index 4d65e417efb..831e58b44d5 100644
--- a/src/mongo/db/repl/scatter_gather_runner.h
+++ b/src/mongo/db/repl/scatter_gather_runner.h
@@ -32,8 +32,8 @@
#include <vector>
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -134,7 +134,7 @@ private:
executor::TaskExecutor::EventHandle _sufficientResponsesReceived;
std::vector<executor::TaskExecutor::CallbackHandle> _callbacks;
bool _started = false;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RunnerImpl::_mutex");
};
executor::TaskExecutor* _executor; // Not owned here.
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 19e7c8840fa..5c70c521a7b 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -87,7 +87,7 @@ const char StorageInterfaceImpl::kRollbackIdFieldName[] = "rollbackId";
const char StorageInterfaceImpl::kRollbackIdDocumentId[] = "rollbackId";
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
const auto kIdIndexName = "_id_"_sd;
diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp
index 77936b4453d..e9fa17504be 100644
--- a/src/mongo/db/repl/storage_interface_mock.cpp
+++ b/src/mongo/db/repl/storage_interface_mock.cpp
@@ -41,7 +41,7 @@ namespace mongo {
namespace repl {
StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -49,7 +49,7 @@ StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
}
StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rbidInitialized) {
return Status(ErrorCodes::NamespaceExists, "Rollback ID already initialized");
}
@@ -61,7 +61,7 @@ StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opC
}
StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -70,23 +70,23 @@ StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCt
}
void StorageInterfaceMock::setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
void StorageInterfaceMock::setInitialDataTimestamp(ServiceContext* serviceCtx,
Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_initialDataTimestamp = snapshotName;
}
Timestamp StorageInterfaceMock::getStableTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _stableTimestamp;
}
Timestamp StorageInterfaceMock::getInitialDataTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _initialDataTimestamp;
}
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index cc031904cb8..68811f01bab 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -43,7 +43,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -420,7 +420,7 @@ public:
Timestamp oldestOpenReadTimestamp = Timestamp::min();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceMock::_mutex");
int _rbid;
bool _rbidInitialized = false;
Timestamp _stableTimestamp = Timestamp::min();
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 012bad86797..03b5af98376 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -98,7 +98,7 @@ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePosition
void SyncSourceFeedback::forwardSlaveProgress() {
{
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
_positionChanged = true;
_cond.notify_all();
if (_reporter) {
@@ -133,7 +133,7 @@ Status SyncSourceFeedback::_updateUpstream(Reporter* reporter) {
}
void SyncSourceFeedback::shutdown() {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
if (_reporter) {
_reporter->shutdown();
}
@@ -161,7 +161,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
// Take SyncSourceFeedback lock before calling into ReplicationCoordinator
// to avoid deadlock because ReplicationCoordinator could conceivably calling back into
// this class.
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
{
MONGO_IDLE_THREAD_BLOCK;
@@ -184,7 +184,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
}
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
MemberState state = replCoord->getMemberState();
if (state.primary() || state.startup()) {
continue;
@@ -220,14 +220,14 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
keepAliveInterval,
syncSourceFeedbackNetworkTimeoutSecs);
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
if (_shutdownSignaled) {
break;
}
_reporter = &reporter;
}
ON_BLOCK_EXIT([this]() {
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
_reporter = nullptr;
});
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index a75cb23ad64..fdec94bff72 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -32,8 +32,8 @@
#include "mongo/base/status.h"
#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
struct HostAndPort;
@@ -79,7 +79,7 @@ private:
Status _updateUpstream(Reporter* reporter);
// protects cond, _shutdownSignaled, _keepAliveInterval, and _positionChanged.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("SyncSourceFeedback::_mtx");
// used to alert our thread of changes which need to be passed up the chain
stdx::condition_variable _cond;
// used to indicate a position change which has not yet been pushed along
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index af82a940d35..2baa83477e1 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -84,7 +84,7 @@ SyncSourceResolver::~SyncSourceResolver() {
}
bool SyncSourceResolver::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -94,7 +94,7 @@ bool SyncSourceResolver::_isActive_inlock() const {
Status SyncSourceResolver::startup() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -112,7 +112,7 @@ Status SyncSourceResolver::startup() {
}
void SyncSourceResolver::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Transition directly from PreStart to Complete if not started yet.
if (State::kPreStart == _state) {
_state = State::kComplete;
@@ -136,12 +136,12 @@ void SyncSourceResolver::shutdown() {
}
void SyncSourceResolver::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
bool SyncSourceResolver::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -205,7 +205,7 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
}
Status SyncSourceResolver::_scheduleFetcher(std::unique_ptr<Fetcher> fetcher) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// TODO SERVER-27499 need to check if _state is kShuttingDown inside the mutex.
// Must schedule fetcher inside lock in case fetcher's callback gets invoked immediately by task
// executor.
@@ -340,7 +340,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea
// Once a work is scheduled, nothing prevents it finishing. We need the mutex to protect the
// access of member variables after scheduling, because otherwise the scheduled callback could
// finish and allow the destructor to fire before we access the member variables.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::kShuttingDown) {
return Status(
ErrorCodes::CallbackCanceled,
@@ -529,7 +529,7 @@ Status SyncSourceResolver::_finishCallback(const SyncSourceResolverResponse& res
<< exceptionToStatus();
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/sync_source_resolver.h b/src/mongo/db/repl/sync_source_resolver.h
index bf38628ac32..6f13242e5e0 100644
--- a/src/mongo/db/repl/sync_source_resolver.h
+++ b/src/mongo/db/repl/sync_source_resolver.h
@@ -37,9 +37,9 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -234,7 +234,7 @@ private:
const OnCompletionFn _onCompletion;
// Protects members of this sync source resolver defined below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncSourceResolverResponse::_mutex");
mutable stdx::condition_variable _condition;
// State transitions:
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 1fced6c80f0..c3b20fbf009 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -158,7 +158,7 @@ private:
void _run();
// Protects _cond, _shutdownSignaled, and _latestOpTime.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ApplyBatchFinalizerForJournal::_mutex");
// Used to alert our thread of a new OpTime.
stdx::condition_variable _cond;
// The next OpTime to set as the ReplicationCoordinator's lastOpTime after flushing.
@@ -170,7 +170,7 @@ private:
};
ApplyBatchFinalizerForJournal::~ApplyBatchFinalizerForJournal() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_shutdownSignaled = true;
_cond.notify_all();
lock.unlock();
@@ -182,7 +182,7 @@ void ApplyBatchFinalizerForJournal::record(const OpTimeAndWallTime& newOpTimeAnd
ReplicationCoordinator::DataConsistency consistency) {
_recordApplied(newOpTimeAndWallTime, consistency);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_latestOpTimeAndWallTime = newOpTimeAndWallTime;
_cond.notify_all();
}
@@ -194,7 +194,7 @@ void ApplyBatchFinalizerForJournal::_run() {
OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_latestOpTimeAndWallTime.opTime.isNull() && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -601,7 +601,7 @@ public:
}
OpQueue getNextBatch(Seconds maxWaitTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// _ops can indicate the following cases:
// 1. A new batch is ready to consume.
// 2. Shutdown.
@@ -713,7 +713,7 @@ private:
}
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Block until the previous batch has been taken.
_cv.wait(lk, [&] { return _ops.empty() && !_ops.termWhenExhausted(); });
_ops = std::move(ops);
@@ -730,7 +730,7 @@ private:
OplogBuffer* const _oplogBuffer;
OplogApplier::GetNextApplierBatchFn const _getNextApplierBatchFn;
- stdx::mutex _mutex; // Guards _ops.
+ Mutex _mutex = MONGO_MAKE_LATCH("OpQueueBatcher::_mutex"); // Guards _ops.
stdx::condition_variable _cv;
OpQueue _ops;
@@ -881,12 +881,12 @@ void SyncTail::shutdown() {
fassertFailedNoTrace(40304);
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool SyncTail::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index c4923544c29..1bff8061adb 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -42,8 +42,8 @@
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/session_update_tracker.h"
#include "mongo/db/repl/storage_interface.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -277,7 +277,7 @@ private:
const OplogApplier::Options _options;
// Protects member data of SyncTail.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncTail::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 188e00875a9..22024e7d35c 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -64,7 +64,7 @@
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/transaction_participant_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -498,7 +498,7 @@ protected:
_insertOp2->getOpTime());
_opObserver->onInsertsFn =
[&](OperationContext*, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
- stdx::lock_guard<stdx::mutex> lock(_insertMutex);
+ stdx::lock_guard<Latch> lock(_insertMutex);
if (nss.isOplog() || nss == _nss1 || nss == _nss2 ||
nss == NamespaceString::kSessionTransactionsTableNamespace) {
_insertedDocs[nss].insert(_insertedDocs[nss].end(), docs.begin(), docs.end());
@@ -545,7 +545,7 @@ protected:
std::unique_ptr<ThreadPool> _writerPool;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntrySyncTailTest::_insertMutex");
};
TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionSeparate) {
@@ -881,7 +881,7 @@ protected:
_abortSinglePrepareApplyOp;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntryPreparedTransactionTest::_insertMutex");
};
TEST_F(MultiOplogEntryPreparedTransactionTest, MultiApplyPreparedTransactionSteadyState) {
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 4c53b558aa1..86edc6da9c5 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -50,8 +50,8 @@ namespace mongo {
namespace repl {
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
/**
@@ -87,7 +87,7 @@ TaskRunner::~TaskRunner() {
}
std::string TaskRunner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
str::stream output;
output << "TaskRunner";
output << " scheduled tasks: " << _tasks.size();
@@ -97,14 +97,14 @@ std::string TaskRunner::getDiagnosticString() const {
}
bool TaskRunner::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _active;
}
void TaskRunner::schedule(Task task) {
invariant(task);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_tasks.push_back(std::move(task));
_condition.notify_all();
@@ -123,7 +123,7 @@ void TaskRunner::schedule(Task task) {
}
void TaskRunner::cancel() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cancelRequested = true;
_condition.notify_all();
}
@@ -159,7 +159,7 @@ void TaskRunner::_runTasks() {
// Release thread back to pool after disposing if no scheduled tasks in queue.
if (nextAction == NextAction::kDisposeOperationContext ||
nextAction == NextAction::kInvalid) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tasks.empty()) {
_finishRunTasks_inlock();
return;
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index a63a428177f..202b64d6286 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -32,9 +32,9 @@
#include <list>
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
@@ -151,7 +151,7 @@ private:
ThreadPool* _threadPool;
// Protects member data of this TaskRunner.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TaskRunner::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp
index 6953f4900ec..d71dc3c42e8 100644
--- a/src/mongo/db/repl/task_runner_test.cpp
+++ b/src/mongo/db/repl/task_runner_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/repl/task_runner_test_fixture.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -57,12 +57,12 @@ TEST_F(TaskRunnerTest, GetDiagnosticString) {
}
TEST_F(TaskRunnerTest, CallbackValues) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool called = false;
OperationContext* opCtx = nullptr;
Status status = getDetectableErrorStatus();
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
called = true;
opCtx = theTxn;
status = theStatus;
@@ -72,7 +72,7 @@ TEST_F(TaskRunnerTest, CallbackValues) {
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_TRUE(called);
ASSERT(opCtx);
ASSERT_OK(status);
@@ -84,11 +84,11 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
TaskRunner::NextAction nextAction,
unique_function<void(Task task)> schedule) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
std::vector<OperationContext*> txns;
OpIdVector txnIds;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (txns.size() >= 2U) {
return TaskRunner::NextAction::kInvalid;
}
@@ -111,7 +111,7 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
test.getThreadPool().waitForIdle();
ASSERT_FALSE(test.getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2U, txns.size());
ASSERT(txns[0]);
ASSERT(txns[1]);
@@ -148,14 +148,14 @@ TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
}
TEST_F(TaskRunnerTest, SkipSecondTask) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -174,14 +174,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -190,14 +190,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
}
TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -223,14 +223,14 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -239,7 +239,7 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
}
TEST_F(TaskRunnerTest, Cancel) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -247,7 +247,7 @@ TEST_F(TaskRunnerTest, Cancel) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -261,7 +261,7 @@ TEST_F(TaskRunnerTest, Cancel) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -276,13 +276,13 @@ TEST_F(TaskRunnerTest, Cancel) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
Status status1 = getDetectableErrorStatus();
Status status2 = getDetectableErrorStatus();
@@ -290,7 +290,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task1" requests the task runner to retain the operation context. This has
// effect of keeping the task runner active.
auto task1 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
barrier.countDownAndWait();
status1 = theStatus;
return TaskRunner::NextAction::kKeepOperationContext;
@@ -300,7 +300,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task2" requests the task runner to dispose the operation context. After the
// operation context is destroyed, the task runner will go into an inactive state.
auto task2 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status2 = theStatus;
return TaskRunner::NextAction::kDisposeOperationContext;
};
@@ -314,13 +314,13 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// This status should be OK because we ensured that the task
// was scheduled and invoked before we called cancel().
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status1);
ASSERT_OK(status2);
}
TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -328,7 +328,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -338,7 +338,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -350,7 +350,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 170bfc1587b..f6483cc3197 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -1529,7 +1529,6 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
Date_t appliedWallTime = Date_t() + Seconds(oplogProgress.getSecs());
OpTime oplogDurable(Timestamp(1, 1), 19);
Date_t durableWallTime = Date_t() + Seconds(oplogDurable.getSecs());
- ;
OpTime lastCommittedOpTime(Timestamp(5, 1), 20);
Date_t lastCommittedWallTime = Date_t() + Seconds(lastCommittedOpTime.getSecs());
OpTime readConcernMajorityOpTime(Timestamp(4, 1), 20);
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index 363eba6eb94..9798b913c78 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -106,7 +106,7 @@ struct ReplIndexBuildState {
IndexBuildProtocol protocol;
// Protects the state below.
- mutable stdx::mutex mutex;
+ mutable Mutex mutex = MONGO_MAKE_LATCH("ReplIndexBuildState::mutex");
// Secondaries do not set this information, so it is only set on primaries or on
// transition to primary.
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index a3854cb9038..def2a02bac2 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -60,7 +60,7 @@ ActiveMigrationsRegistry& ActiveMigrationsRegistry::get(OperationContext* opCtx)
StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
const MoveChunkRequest& args) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -80,7 +80,7 @@ StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
const NamespaceString& nss, const ChunkRange& chunkRange, const ShardId& fromShardId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -95,7 +95,7 @@ StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
}
boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
return _activeMoveChunkState->args.getNss();
}
@@ -106,7 +106,7 @@ boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkN
BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContext* opCtx) {
boost::optional<NamespaceString> nss;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
nss = _activeMoveChunkState->args.getNss();
@@ -132,13 +132,13 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex
}
void ActiveMigrationsRegistry::_clearDonateChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMoveChunkState);
_activeMoveChunkState.reset();
}
void ActiveMigrationsRegistry::_clearReceiveChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeReceiveChunkState);
_activeReceiveChunkState.reset();
}
diff --git a/src/mongo/db/s/active_migrations_registry.h b/src/mongo/db/s/active_migrations_registry.h
index d2e3f4b2ad0..205060d0ab2 100644
--- a/src/mongo/db/s/active_migrations_registry.h
+++ b/src/mongo/db/s/active_migrations_registry.h
@@ -32,9 +32,9 @@
#include <boost/optional.hpp>
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
void _clearReceiveChunk();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMigrationsRegistry::_mutex");
// If there is an active moveChunk operation, this field contains the original request
boost::optional<ActiveMoveChunkState> _activeMoveChunkState;
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index f71f7a63d80..827b7912506 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -56,7 +56,7 @@ ActiveMovePrimariesRegistry& ActiveMovePrimariesRegistry::get(OperationContext*
StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
const ShardMovePrimary& requestArgs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
if (_activeMovePrimaryState->requestArgs == requestArgs) {
return {ScopedMovePrimary(nullptr, false, _activeMovePrimaryState->notification)};
@@ -71,7 +71,7 @@ StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
}
boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrimaryNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
return _activeMovePrimaryState->requestArgs.get_movePrimary();
}
@@ -80,7 +80,7 @@ boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrima
}
void ActiveMovePrimariesRegistry::_clearMovePrimary() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMovePrimaryState);
_activeMovePrimaryState.reset();
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 38b19a6c94f..94f55657cba 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -99,7 +99,7 @@ private:
void _clearMovePrimary();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMovePrimariesRegistry::_mutex");
// If there is an active movePrimary operation going on, this field contains the request that
// initiated it.
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index 6a01fdd90ee..d2bda7ece20 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -91,7 +91,7 @@ ActiveShardCollectionRegistry& ActiveShardCollectionRegistry::get(OperationConte
StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCollection(
const ShardsvrShardCollection& request) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::string nss = request.get_shardsvrShardCollection().get().ns();
auto iter = _activeShardCollectionMap.find(nss);
@@ -114,7 +114,7 @@ StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCo
}
void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
_activeShardCollectionMap.erase(nss);
@@ -122,7 +122,7 @@ void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
void ActiveShardCollectionRegistry::_setUUIDOrError(std::string nss,
StatusWith<boost::optional<UUID>> swUUID) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
auto activeShardCollectionState = iter->second;
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index cab710b5ee5..0a8d05d52e2 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -31,9 +31,9 @@
#include <boost/optional.hpp>
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/shard_collection_gen.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -107,7 +107,7 @@ private:
void _setUUIDOrError(std::string nss, StatusWith<boost::optional<UUID>> swUUID);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveShardCollectionRegistry::_mutex");
// Map containing any collections currently being sharded
StringMap<std::shared_ptr<ActiveShardCollectionState>> _activeShardCollectionMap;
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 43f924132aa..bcb2fb1d389 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -161,7 +161,7 @@ Balancer::Balancer(ServiceContext* serviceContext)
Balancer::~Balancer() {
// The balancer thread must have been stopped
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
}
@@ -179,7 +179,7 @@ Balancer* Balancer::get(OperationContext* operationContext) {
}
void Balancer::initiateBalancer(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
_state = kRunning;
@@ -191,7 +191,7 @@ void Balancer::initiateBalancer(OperationContext* opCtx) {
}
void Balancer::interruptBalancer() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state != kRunning)
return;
@@ -215,7 +215,7 @@ void Balancer::interruptBalancer() {
void Balancer::waitForBalancerToStop() {
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state == kStopped)
return;
@@ -225,7 +225,7 @@ void Balancer::waitForBalancerToStop() {
_thread.join();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_state = kStopped;
_thread = {};
@@ -233,7 +233,7 @@ void Balancer::waitForBalancerToStop() {
}
void Balancer::joinCurrentRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> scopedLock(_mutex);
+ stdx::unique_lock<Latch> scopedLock(_mutex);
const auto numRoundsAtStart = _numBalancerRounds;
opCtx->waitForConditionOrInterrupt(_condVar, scopedLock, [&] {
return !_inBalancerRound || _numBalancerRounds != numRoundsAtStart;
@@ -286,7 +286,7 @@ void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
const auto mode = balancerConfig->getBalancerMode();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
builder->append("mode", BalancerSettingsType::kBalancerModes[mode]);
builder->append("inBalancerRound", _inBalancerRound);
builder->append("numBalancerRounds", _numBalancerRounds);
@@ -300,7 +300,7 @@ void Balancer::_mainThread() {
log() << "CSRS balancer is starting";
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_threadOperationContext = opCtx.get();
}
@@ -413,7 +413,7 @@ void Balancer::_mainThread() {
}
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopping);
invariant(_migrationManagerInterruptThread.joinable());
}
@@ -422,7 +422,7 @@ void Balancer::_mainThread() {
_migrationManager.drainActiveMigrations();
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_migrationManagerInterruptThread = {};
_threadOperationContext = nullptr;
}
@@ -431,19 +431,19 @@ void Balancer::_mainThread() {
}
bool Balancer::_stopRequested() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
return (_state != kRunning);
}
void Balancer::_beginRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_inBalancerRound = true;
_condVar.notify_all();
}
void Balancer::_endRound(OperationContext* opCtx, Seconds waitTimeout) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inBalancerRound = false;
_numBalancerRounds++;
_condVar.notify_all();
@@ -454,7 +454,7 @@ void Balancer::_endRound(OperationContext* opCtx, Seconds waitTimeout) {
}
void Balancer::_sleepFor(OperationContext* opCtx, Seconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait_for(lock, waitTimeout.toSystemDuration(), [&] { return _state != kRunning; });
}
@@ -663,7 +663,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
}
void Balancer::notifyPersistedBalancerSettingsChanged() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.notify_all();
}
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index 2b6738def19..7f1b6cd5b75 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -32,8 +32,8 @@
#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h"
#include "mongo/db/s/balancer/balancer_random.h"
#include "mongo/db/s/balancer/migration_manager.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -208,7 +208,7 @@ private:
const BSONObj& minKey);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Balancer::_mutex");
// Indicates the current state of the balancer
State _state{kStopped};
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 0a988cf1b13..4af124368e4 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -210,7 +210,7 @@ Status MigrationManager::executeManualMigration(
void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStopped);
invariant(_migrationRecoveryMap.empty());
_state = State::kRecovering;
@@ -285,7 +285,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kStopping) {
_migrationRecoveryMap.clear();
return;
@@ -367,7 +367,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
scopedGuard.dismiss();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kRecovering) {
_state = State::kEnabled;
_condVar.notify_all();
@@ -383,7 +383,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
void MigrationManager::interruptAndDisableMigrations() {
auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kEnabled || _state == State::kRecovering);
_state = State::kStopping;
@@ -402,7 +402,7 @@ void MigrationManager::interruptAndDisableMigrations() {
}
void MigrationManager::drainActiveMigrations() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopped)
return;
@@ -421,7 +421,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
// Ensure we are not stopped in order to avoid doing the extra work
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
Status(ErrorCodes::BalancerInterrupted,
@@ -457,7 +457,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
secondaryThrottle,
waitForDelete);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
@@ -522,7 +522,7 @@ void MigrationManager::_schedule(WithLock lock,
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_complete(lock, opCtx.get(), itMigration, args.response);
});
@@ -573,12 +573,12 @@ void MigrationManager::_checkDrained(WithLock) {
}
void MigrationManager::_waitForRecovery() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait(lock, [this] { return _state != State::kRecovering; });
}
void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopping) {
// The balancer was interrupted. Let the next balancer recover the state.
return;
@@ -605,7 +605,7 @@ Status MigrationManager::_processRemoteCommandResponse(
const RemoteCommandResponse& remoteCommandResponse,
ScopedMigrationRequest* scopedMigrationRequest) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status commandStatus(ErrorCodes::InternalError, "Uninitialized value.");
// Check for local errors sending the remote command caused by stepdown.
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index 4f6c1288571..b321b361e79 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -38,10 +38,10 @@
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -260,7 +260,7 @@ private:
stdx::unordered_map<NamespaceString, std::list<MigrationType>> _migrationRecoveryMap;
// Protects the class state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationManager::_mutex");
// Always start the migration manager in a stopped state.
State _state{State::kStopped};
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 049ab0ae261..c7dd1e22250 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -234,12 +234,12 @@ ChunkSplitter& ChunkSplitter::get(ServiceContext* serviceContext) {
}
void ChunkSplitter::onShardingInitialization(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_isPrimary = isPrimary;
}
void ChunkSplitter::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_isPrimary) {
return;
}
@@ -249,7 +249,7 @@ void ChunkSplitter::onStepUp() {
}
void ChunkSplitter::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_isPrimary) {
return;
}
diff --git a/src/mongo/db/s/chunk_splitter.h b/src/mongo/db/s/chunk_splitter.h
index ef774dc017c..a05683fc6e7 100644
--- a/src/mongo/db/s/chunk_splitter.h
+++ b/src/mongo/db/s/chunk_splitter.h
@@ -107,7 +107,7 @@ private:
long dataWritten);
// Protects the state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ChunkSplitter::_mutex");
// The ChunkSplitter is only active on a primary node.
bool _isPrimary{false};
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index d5affc26cc0..92e8b65a4fa 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -134,7 +134,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
bool writeOpLog = false;
{
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (self->isEmpty()) {
LOG(1) << "No further range deletions scheduled on " << nss.ns();
return boost::none;
@@ -181,7 +181,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
<< "ns" << nss.ns() << "epoch" << epoch << "min"
<< range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
scopedLock,
e.toStatus("cannot push startRangeDeletion record to Op Log,"
@@ -254,7 +254,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean;
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (!replicationStatus.isOK()) {
LOG(0) << "Error when waiting for write concern after removing " << nss << " range "
@@ -304,7 +304,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
if (!scopedCollectionMetadata) {
LOG(0) << "Abandoning any range deletions because the metadata for " << nss.ns()
<< " was reset";
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
@@ -319,7 +319,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
<< nss.ns();
}
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index feb519090e3..540b87c49ef 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -54,7 +54,7 @@ public:
: _factory(std::move(factory)) {}
CollectionShardingState& getOrCreate(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _collections.find(nss.ns());
if (it == _collections.end()) {
@@ -70,7 +70,7 @@ public:
BSONObjBuilder versionB(builder->subobjStart("versions"));
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (auto& coll : _collections) {
const auto optMetadata = coll.second->getCurrentMetadataIfKnown();
@@ -89,7 +89,7 @@ private:
std::unique_ptr<CollectionShardingStateFactory> _factory;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateMap::_mutex");
CollectionsMap _collections;
};
diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
index a7f0f5f8dc5..336ca1f1761 100644
--- a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
+++ b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
@@ -58,7 +58,7 @@ public:
private:
executor::TaskExecutor* _getExecutor() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_taskExecutor) {
const std::string kExecName("CollectionRangeDeleter-TaskExecutor");
@@ -75,7 +75,7 @@ private:
}
// Serializes the instantiation of the task executor
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateFactoryShard::_mutex");
std::unique_ptr<executor::TaskExecutor> _taskExecutor{nullptr};
};
diff --git a/src/mongo/db/s/config/namespace_serializer.cpp b/src/mongo/db/s/config/namespace_serializer.cpp
index c132fe177b2..6c69eaa668d 100644
--- a/src/mongo/db/s/config/namespace_serializer.cpp
+++ b/src/mongo/db/s/config/namespace_serializer.cpp
@@ -49,7 +49,7 @@ NamespaceSerializer::ScopedLock::ScopedLock(StringData ns, NamespaceSerializer&
: _ns(ns.toString()), _nsSerializer(nsSerializer) {}
NamespaceSerializer::ScopedLock::~ScopedLock() {
- stdx::unique_lock<stdx::mutex> lock(_nsSerializer._mutex);
+ stdx::unique_lock<Latch> lock(_nsSerializer._mutex);
auto iter = _nsSerializer._inProgressMap.find(_ns);
iter->second->numWaiting--;
@@ -62,7 +62,7 @@ NamespaceSerializer::ScopedLock::~ScopedLock() {
}
NamespaceSerializer::ScopedLock NamespaceSerializer::lock(OperationContext* opCtx, StringData nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto iter = _inProgressMap.find(nss);
if (iter == _inProgressMap.end()) {
diff --git a/src/mongo/db/s/config/namespace_serializer.h b/src/mongo/db/s/config/namespace_serializer.h
index 7b7832ebbe7..aa50552e3f8 100644
--- a/src/mongo/db/s/config/namespace_serializer.h
+++ b/src/mongo/db/s/config/namespace_serializer.h
@@ -36,8 +36,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -71,7 +71,7 @@ private:
bool isInProgress = true;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
StringMap<std::shared_ptr<NSLock>> _inProgressMap;
};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 424db73a9d0..557529099ff 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -100,7 +100,7 @@ ShardingCatalogManager::~ShardingCatalogManager() {
}
void ShardingCatalogManager::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -114,7 +114,7 @@ void ShardingCatalogManager::startup() {
void ShardingCatalogManager::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
@@ -126,7 +126,7 @@ void ShardingCatalogManager::shutDown() {
Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configInitialized) {
return {ErrorCodes::AlreadyInitialized,
"Config database was previously loaded into memory"};
@@ -146,14 +146,14 @@ Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = true;
return Status::OK();
}
void ShardingCatalogManager::discardCachedConfigDatabaseInitializationState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = false;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index ef6aa80be6a..26a43e966b8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -34,13 +34,13 @@
#include "mongo/db/repl/optime_with.h"
#include "mongo/db/s/config/namespace_serializer.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -506,7 +506,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogManager::_mutex");
// True if shutDown() has been called. False, otherwise.
bool _inShutdown{false}; // (M)
diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp
index 7ea8c1e1345..a0a3d6068f9 100644
--- a/src/mongo/db/s/implicit_create_collection.cpp
+++ b/src/mongo/db/s/implicit_create_collection.cpp
@@ -46,8 +46,8 @@
#include "mongo/s/grid.h"
#include "mongo/s/request_types/create_collection_gen.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -73,7 +73,7 @@ public:
invariant(!opCtx->lockState()->isLocked());
{
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
while (_isInProgress) {
auto status = opCtx->waitForConditionOrInterruptNoAssert(_cvIsInProgress, lg);
if (!status.isOK()) {
@@ -85,7 +85,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_isInProgress = false;
_cvIsInProgress.notify_one();
});
@@ -128,7 +128,7 @@ public:
private:
const NamespaceString _ns;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializer::_mutex");
stdx::condition_variable _cvIsInProgress;
bool _isInProgress = false;
};
@@ -136,7 +136,7 @@ private:
class CreateCollectionSerializerMap {
public:
std::shared_ptr<CreateCollectionSerializer> getForNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto iter = _inProgressMap.find(ns.ns());
if (iter == _inProgressMap.end()) {
std::tie(iter, std::ignore) =
@@ -147,12 +147,12 @@ public:
}
void cleanupNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_inProgressMap.erase(ns.ns());
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializerMap::_mutex");
std::map<std::string, std::shared_ptr<CreateCollectionSerializer>> _inProgressMap;
};
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 4926fe86508..8364929ad36 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -186,7 +186,7 @@ public:
}
~RangePreserver() {
- stdx::lock_guard<stdx::mutex> managerLock(_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> managerLock(_metadataManager->_managerLock);
invariant(_metadataTracker->usageCounter != 0);
if (--_metadataTracker->usageCounter == 0) {
@@ -245,7 +245,7 @@ void MetadataManager::_clearAllCleanups(WithLock, Status status) {
boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty()) {
return boost::none;
@@ -282,7 +282,7 @@ boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
}
size_t MetadataManager::numberOfMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty())
return 0;
@@ -290,7 +290,7 @@ size_t MetadataManager::numberOfMetadataSnapshots() const {
}
int MetadataManager::numberOfEmptyMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
int emptyMetadataSnapshots = 0;
for (const auto& collMetadataTracker : _metadata) {
@@ -302,7 +302,7 @@ int MetadataManager::numberOfEmptyMetadataSnapshots() const {
}
void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
// Collection is becoming sharded
if (_metadata.empty()) {
@@ -365,7 +365,7 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
}
void MetadataManager::clearFilteringMetadata() {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_receivingChunks.clear();
_clearAllCleanups(lg);
_metadata.clear();
@@ -407,7 +407,7 @@ void MetadataManager::_retireExpiredMetadata(WithLock lock) {
}
void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end(); ++it) {
BSONArrayBuilder pendingBB(bb.subarrayStart());
@@ -418,7 +418,7 @@ void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
}
void MetadataManager::append(BSONObjBuilder* builder) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_rangesToClean.append(builder);
@@ -463,7 +463,7 @@ void MetadataManager::_pushListToClean(WithLock, std::list<Deletion> ranges) {
}
auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
if (_overlapsInUseChunk(lg, range)) {
@@ -480,7 +480,7 @@ auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotificati
}
void MetadataManager::forgetReceive(ChunkRange const& range) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
// This is potentially a partially received chunk, which needs to be cleaned up. We know none
@@ -499,7 +499,7 @@ void MetadataManager::forgetReceive(ChunkRange const& range) {
auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
-> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
auto* const activeMetadata = _metadata.back().get();
@@ -536,7 +536,7 @@ auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
}
size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
size_t count = 0;
for (auto& tracker : _metadata) {
count += tracker->orphans.size();
@@ -545,13 +545,13 @@ size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
}
size_t MetadataManager::numberOfRangesToClean() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
return _rangesToClean.size();
}
auto MetadataManager::trackOrphanedDataCleanup(ChunkRange const& range) const
-> boost::optional<CleanupNotification> {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
auto overlaps = _overlapsInUseCleanups(lg, range);
if (overlaps) {
return overlaps;
@@ -604,7 +604,7 @@ auto MetadataManager::_overlapsInUseCleanups(WithLock, ChunkRange const& range)
}
boost::optional<ChunkRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
return _metadata.back()->metadata->getNextOrphanRange(_receivingChunks, from);
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 26cb452ac28..ecea706ce41 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -240,7 +240,7 @@ private:
executor::TaskExecutor* const _executor;
// Mutex to protect the state below
- mutable stdx::mutex _managerLock;
+ mutable Mutex _managerLock = MONGO_MAKE_LATCH("MetadataManager::_managerLock");
// Contains a list of collection metadata for the same collection epoch, ordered in
// chronological order based on the refreshes that occurred. The entry at _metadata.back() is
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 3d5ee943804..1d9eca8dbb4 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -292,7 +292,7 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
// between cancellations for different migration sessions. It is thus possible that a second
// migration from different donor, but the same recipient would certainly abort an already
// running migration.
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = kCloning;
return Status::OK();
@@ -321,7 +321,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
}
iteration++;
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
const std::size_t cloneLocsRemaining = _cloneLocs.size();
@@ -551,14 +551,14 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
const repl::OpTime& prePostImageOpTime) {
switch (op) {
case 'd': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_deleted.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
case 'i':
case 'u': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_reload.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
@@ -574,7 +574,7 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
}
bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrackRequests() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_acceptingNewOperationTrackRequests) {
return false;
}
@@ -584,7 +584,7 @@ bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrac
}
void MigrationChunkClonerSourceLegacy::_drainAllOutstandingOperationTrackRequests(
- stdx::unique_lock<stdx::mutex>& lk) {
+ stdx::unique_lock<Latch>& lk) {
invariant(_state == kDone);
_acceptingNewOperationTrackRequests = false;
_allOutstandingOperationTrackRequestsDrained.wait(
@@ -598,7 +598,7 @@ void MigrationChunkClonerSourceLegacy::_incrementOutstandingOperationTrackReques
}
void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackRequests() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
--_outstandingOperationTrackRequests;
if (_outstandingOperationTrackRequests == 0) {
_allOutstandingOperationTrackRequestsDrained.notify_all();
@@ -606,7 +606,7 @@ void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackReques
}
uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return std::min(static_cast<uint64_t>(BSONObjMaxUserSize),
_averageObjectSizeForCloneLocs * _cloneLocs.size());
@@ -621,7 +621,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto iter = _cloneLocs.begin();
for (; iter != _cloneLocs.end(); ++iter) {
@@ -666,7 +666,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
{
// All clone data must have been drained before starting to fetch the incremental changes.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_cloneLocs.empty());
// The "snapshot" for delete and update list must be taken under a single lock. This is to
@@ -685,7 +685,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
builder->append("size", totalDocSize);
// Put back remaining ids we didn't consume
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_deleted.splice(_deleted.cbegin(), deleteList);
_reload.splice(_reload.cbegin(), updateList);
@@ -693,7 +693,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
}
void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_state = kDone;
_drainAllOutstandingOperationTrackRequests(lk);
@@ -800,7 +800,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
if (!isLargeChunk) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cloneLocs.insert(recordId);
}
@@ -829,7 +829,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
<< _args.getMaxKey()};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12;
return Status::OK();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 2818b8f538b..e5263466c11 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -39,11 +39,11 @@
#include "mongo/db/s/migration_chunk_cloner_source.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_source.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -285,7 +285,7 @@ private:
* function. Should only be used in the cleanup for this class. Should use a lock wrapped
* around this class's mutex.
*/
- void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<stdx::mutex>& lk);
+ void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<Latch>& lk);
/**
* Appends to the builder the list of _id of documents that were deleted during migration.
@@ -325,7 +325,7 @@ private:
std::unique_ptr<SessionCatalogMigrationSource> _sessionCatalogSource;
// Protects the entries below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationChunkClonerSourceLegacy::_mutex");
// The current state of the cloner
State _state{kNew};
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index b526b23816b..44f7ca273bc 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -223,12 +223,12 @@ MigrationDestinationManager* MigrationDestinationManager::get(OperationContext*
}
MigrationDestinationManager::State MigrationDestinationManager::getState() const {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return _state;
}
void MigrationDestinationManager::setState(State newState) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = newState;
_stateChangedCV.notify_all();
}
@@ -236,7 +236,7 @@ void MigrationDestinationManager::setState(State newState) {
void MigrationDestinationManager::_setStateFail(StringData msg) {
log() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -248,7 +248,7 @@ void MigrationDestinationManager::_setStateFail(StringData msg) {
void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
warning() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -258,7 +258,7 @@ void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
}
bool MigrationDestinationManager::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive(lk);
}
@@ -270,7 +270,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
OperationContext* opCtx,
bool waitForSteadyOrDone) {
if (waitForSteadyOrDone) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
try {
opCtx->waitForConditionOrInterruptFor(_stateChangedCV, lock, Seconds(1), [&]() -> bool {
return _state != READY && _state != CLONE && _state != CATCHUP;
@@ -281,7 +281,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
b.append("waited", true);
}
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
b.appendBool("active", _sessionId.is_initialized());
@@ -312,7 +312,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
BSONObj MigrationDestinationManager::getMigrationStatusReport() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_isActive(lk)) {
return migrationutil::makeMigrationStatusDocument(
_nss, _fromShard, _toShard, false, _min, _max);
@@ -327,7 +327,7 @@ Status MigrationDestinationManager::start(OperationContext* opCtx,
const StartChunkCloneRequest cloneRequest,
const OID& epoch,
const WriteConcernOptions& writeConcern) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_sessionId);
invariant(!_scopedReceiveChunk);
@@ -435,7 +435,7 @@ repl::OpTime MigrationDestinationManager::cloneDocumentsFromDonor(
}
Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
if (!_sessionId) {
return Status::OK();
@@ -456,7 +456,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
}
void MigrationDestinationManager::abortWithoutSessionIdCheck() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = ABORT;
_stateChangedCV.notify_all();
_errmsg = "aborted without session id check";
@@ -464,7 +464,7 @@ void MigrationDestinationManager::abortWithoutSessionIdCheck() {
Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessionId) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
@@ -710,7 +710,7 @@ void MigrationDestinationManager::_migrateThread() {
_forgetPending(opCtx.get(), ChunkRange(_min, _max));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessionId.reset();
_scopedReceiveChunk.reset();
_isActiveCV.notify_all();
@@ -822,7 +822,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
{
- stdx::lock_guard<stdx::mutex> statsLock(_mutex);
+ stdx::lock_guard<Latch> statsLock(_mutex);
_numCloned += batchNumCloned;
ShardingStatistics::get(opCtx).countDocsClonedOnRecipient.addAndFetch(
batchNumCloned);
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index afdc5c2f125..1833a024dcf 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -41,9 +41,9 @@
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_destination.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/timer.h"
@@ -178,7 +178,7 @@ private:
bool _isActive(WithLock) const;
// Mutex to guard all fields
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MigrationDestinationManager::_mutex");
// Migration session ID uniquely identifies the migration and indicates whether the prepare
// method has been called.
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.cpp b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
index 6a288834ce7..ecf63039105 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.cpp
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
@@ -36,7 +36,7 @@ namespace mongo {
NamespaceMetadataChangeNotifications::NamespaceMetadataChangeNotifications() = default;
NamespaceMetadataChangeNotifications::~NamespaceMetadataChangeNotifications() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_notificationsList.empty());
}
@@ -44,7 +44,7 @@ NamespaceMetadataChangeNotifications::ScopedNotification
NamespaceMetadataChangeNotifications::createNotification(const NamespaceString& nss) {
auto notifToken = std::make_shared<NotificationToken>(nss);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto& notifList = _notificationsList[nss];
notifToken->itToErase = notifList.insert(notifList.end(), notifToken);
@@ -53,7 +53,7 @@ NamespaceMetadataChangeNotifications::createNotification(const NamespaceString&
}
void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto mapIt = _notificationsList.find(nss);
if (mapIt == _notificationsList.end()) {
@@ -70,7 +70,7 @@ void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& n
void NamespaceMetadataChangeNotifications::_unregisterNotificationToken(
std::shared_ptr<NotificationToken> token) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!token->itToErase) {
return;
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.h b/src/mongo/db/s/namespace_metadata_change_notifications.h
index ba7c51e86a0..12df62bfb95 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.h
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -114,7 +114,7 @@ private:
void _unregisterNotificationToken(std::shared_ptr<NotificationToken> token);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceMetadataChangeNotifications::_mutex");
std::map<NamespaceString, NotificationsList> _notificationsList;
};
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index 9c5576c21be..7c4ab7d50a9 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -334,7 +334,7 @@ SessionCatalogMigrationDestination::~SessionCatalogMigrationDestination() {
void SessionCatalogMigrationDestination::start(ServiceContext* service) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == State::NotStarted);
_state = State::Migrating;
_isStateChanged.notify_all();
@@ -358,7 +358,7 @@ void SessionCatalogMigrationDestination::start(ServiceContext* service) {
}
void SessionCatalogMigrationDestination::finish() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != State::ErrorOccurred) {
_state = State::Committing;
_isStateChanged.notify_all();
@@ -393,7 +393,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
while (true) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::ErrorOccurred) {
return;
}
@@ -411,7 +411,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
if (oplogArray.isEmpty()) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::Committing) {
// The migration is considered done only when it gets an empty result from
// the source shard while this is in state committing. This is to make sure
@@ -432,7 +432,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
// We depleted the buffer at least once, transition to ready for commit.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: only transition to "ready to commit" if state is not error/force stop.
if (_state == State::Migrating) {
_state = State::ReadyToCommit;
@@ -473,19 +473,19 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
waitForWriteConcern(uniqueOpCtx.get(), lastResult.oplogTime, kMajorityWC, &unusedWCResult));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::Done;
_isStateChanged.notify_all();
}
}
std::string SessionCatalogMigrationDestination::getErrMsg() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _errMsg;
}
void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::ErrorOccurred;
_errMsg = errMsg.toString();
@@ -493,7 +493,7 @@ void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
}
SessionCatalogMigrationDestination::State SessionCatalogMigrationDestination::getState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/s/session_catalog_migration_destination.h b/src/mongo/db/s/session_catalog_migration_destination.h
index 89c43be2e62..b5a85fd6998 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.h
+++ b/src/mongo/db/s/session_catalog_migration_destination.h
@@ -36,9 +36,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -116,7 +116,7 @@ private:
stdx::thread _thread;
// Protects _state and _errMsg.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalogMigrationDestination::_mutex");
stdx::condition_variable _isStateChanged;
State _state = State::NotStarted;
std::string _errMsg; // valid only if _state == ErrorOccurred.
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 86f8a8a6cf6..30bf462209c 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -180,12 +180,12 @@ bool SessionCatalogMigrationSource::hasMoreOplog() {
return true;
}
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
return _hasNewWrites(lk);
}
void SessionCatalogMigrationSource::onCommitCloneStarted() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCommitStarted;
if (_newOplogNotification) {
@@ -195,7 +195,7 @@ void SessionCatalogMigrationSource::onCommitCloneStarted() {
}
void SessionCatalogMigrationSource::onCloneCleanup() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCleanup;
if (_newOplogNotification) {
@@ -206,14 +206,14 @@ void SessionCatalogMigrationSource::onCloneCleanup() {
SessionCatalogMigrationSource::OplogResult SessionCatalogMigrationSource::getLastFetchedOplog() {
{
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
if (_lastFetchedOplog) {
return OplogResult(_lastFetchedOplog, false);
}
}
{
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
return OplogResult(_lastFetchedNewWriteOplog, true);
}
}
@@ -229,7 +229,7 @@ bool SessionCatalogMigrationSource::fetchNextOplog(OperationContext* opCtx) {
std::shared_ptr<Notification<bool>> SessionCatalogMigrationSource::getNotificationForNewOplog() {
invariant(!_hasMoreOplogFromSessionCatalog());
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newOplogNotification) {
return _newOplogNotification;
@@ -292,13 +292,13 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
}
bool SessionCatalogMigrationSource::_hasMoreOplogFromSessionCatalog() {
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
return _lastFetchedOplog || !_lastFetchedOplogBuffer.empty() ||
!_sessionOplogIterators.empty() || _currentOplogIterator;
}
bool SessionCatalogMigrationSource::_fetchNextOplogFromSessionCatalog(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_sessionCloneMutex);
+ stdx::unique_lock<Latch> lk(_sessionCloneMutex);
if (!_lastFetchedOplogBuffer.empty()) {
_lastFetchedOplog = _lastFetchedOplogBuffer.back();
@@ -333,7 +333,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
EntryAtOpTimeType entryAtOpTimeType;
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newWriteOpTimeList.empty()) {
_lastFetchedNewWriteOplog.reset();
@@ -368,7 +368,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
}
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_lastFetchedNewWriteOplog = newWriteOplogEntry;
_newWriteOpTimeList.pop_front();
}
@@ -378,7 +378,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
void SessionCatalogMigrationSource::notifyNewWriteOpTime(repl::OpTime opTime,
EntryAtOpTimeType entryAtOpTimeType) {
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_newWriteOpTimeList.emplace_back(opTime, entryAtOpTimeType);
if (_newOplogNotification) {
diff --git a/src/mongo/db/s/session_catalog_migration_source.h b/src/mongo/db/s/session_catalog_migration_source.h
index 06093d4c8e8..df0d9d80259 100644
--- a/src/mongo/db/s/session_catalog_migration_source.h
+++ b/src/mongo/db/s/session_catalog_migration_source.h
@@ -37,9 +37,9 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/transaction_history_iterator.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -231,7 +231,8 @@ private:
// Protects _sessionCatalogCursor, _sessionOplogIterators, _currentOplogIterator,
// _lastFetchedOplogBuffer, _lastFetchedOplog
- stdx::mutex _sessionCloneMutex;
+ Mutex _sessionCloneMutex =
+ MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_sessionCloneMutex");
// List of remaining session records that needs to be cloned.
std::vector<std::unique_ptr<SessionOplogIterator>> _sessionOplogIterators;
@@ -248,7 +249,7 @@ private:
boost::optional<repl::OplogEntry> _lastFetchedOplog;
// Protects _newWriteTsList, _lastFetchedNewWriteOplog, _state, _newOplogNotification
- stdx::mutex _newOplogMutex;
+ Mutex _newOplogMutex = MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_newOplogMutex");
// Stores oplog opTime of new writes that are coming in.
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index dec279a747f..2cbbb83480c 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -341,7 +341,7 @@ ShardServerCatalogCacheLoader::~ShardServerCatalogCacheLoader() {
// Prevent further scheduling, then interrupt ongoing tasks.
_threadPool.shutdown();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_contexts.interrupt(ErrorCodes::InterruptedAtShutdown);
++_term;
}
@@ -355,7 +355,7 @@ void ShardServerCatalogCacheLoader::notifyOfCollectionVersionUpdate(const Namesp
}
void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role == ReplicaSetRole::None);
if (isPrimary) {
@@ -366,7 +366,7 @@ void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
}
void ShardServerCatalogCacheLoader::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
_contexts.interrupt(ErrorCodes::PrimarySteppedDown);
++_term;
@@ -374,7 +374,7 @@ void ShardServerCatalogCacheLoader::onStepDown() {
}
void ShardServerCatalogCacheLoader::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
++_term;
_role = ReplicaSetRole::Primary;
@@ -387,7 +387,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -403,7 +403,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
// We may have missed an OperationContextGroup interrupt since this operation
// began but before the OperationContext was added to the group. So we'll check
// that we're still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh routing table because replica set state changed or "
"the node is shutting down.",
@@ -430,7 +430,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -446,7 +446,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
// We may have missed an OperationContextGroup interrupt since this operation began
// but before the OperationContext was added to the group. So we'll check that we're
// still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh database because replica set state changed or the node "
"is shutting down.",
@@ -466,7 +466,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opCtx,
const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -517,7 +517,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC
void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -599,7 +599,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
// Get the max version the loader has.
const ChunkVersion maxLoaderVersion = [&] {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt != _collAndChunkTaskLists.end() &&
@@ -670,7 +670,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}
const auto termAfterRefresh = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _term;
}();
@@ -827,7 +827,7 @@ std::pair<bool, CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getE
const NamespaceString& nss,
const ChunkVersion& catalogCacheSinceVersion,
const long long term) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt == _collAndChunkTaskLists.end()) {
@@ -862,7 +862,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleCollAndChun
OperationContext* opCtx, const NamespaceString& nss, collAndChunkTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _collAndChunkTaskLists[nss];
auto wasEmpty = list.empty();
@@ -884,7 +884,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
DBTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _dbTaskLists[dbName.toString()];
auto wasEmpty = list.empty();
@@ -918,7 +918,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -940,7 +940,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collAndChunkTaskLists.erase(nss);
}
return;
@@ -967,7 +967,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -989,7 +989,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dbTaskLists.erase(name);
}
return;
@@ -1002,7 +1002,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
OperationContext* opCtx, const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const collAndChunkTask& task = _collAndChunkTaskLists[nss].front();
invariant(task.dropped || !task.collectionAndChangedChunks->changedChunks.empty());
@@ -1038,7 +1038,7 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const DBTask& task = _dbTaskLists[dbName.toString()].front();
@@ -1203,7 +1203,7 @@ void ShardServerCatalogCacheLoader::DbTaskList::pop_front() {
}
void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
@@ -1211,7 +1211,7 @@ void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompl
}
void ShardServerCatalogCacheLoader::DbTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h
index 9e998415793..86b1739254b 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.h
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h
@@ -202,7 +202,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -312,7 +312,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -482,7 +482,7 @@ private:
NamespaceMetadataChangeNotifications _namespaceNotifications;
// Protects the class state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardServerCatalogCacheLoader::_mutex");
// This value is bumped every time the set of currently scheduled tasks should no longer be
// running. This includes, replica set state transitions and shutdown.
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 0abb64e96cc..b3f38be0881 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -308,7 +308,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity(
auto const shardingState = ShardingState::get(opCtx);
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- stdx::unique_lock<stdx::mutex> ul(_initSynchronizationMutex);
+ stdx::unique_lock<Latch> ul(_initSynchronizationMutex);
if (shardingState->enabled()) {
uassert(40371, "", shardingState->shardId() == shardIdentity.getShardName());
diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h
index a205d68d1b2..496a6e072fe 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.h
+++ b/src/mongo/db/s/sharding_initialization_mongod.h
@@ -113,7 +113,8 @@ public:
private:
// This mutex ensures that only one thread at a time executes the sharding
// initialization/teardown sequence
- stdx::mutex _initSynchronizationMutex;
+ Mutex _initSynchronizationMutex =
+ MONGO_MAKE_LATCH("ShardingInitializationMongod::_initSynchronizationMutex");
// Function for initializing the sharding environment components (i.e. everything on the Grid)
ShardingEnvironmentInitFunc _initFunc;
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index b9c7e634a53..37e5f8930fa 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -57,7 +57,7 @@ ShardingState* ShardingState::get(OperationContext* operationContext) {
}
void ShardingState::setInitialized(ShardId shardId, OID clusterId) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_shardId = std::move(shardId);
@@ -71,7 +71,7 @@ void ShardingState::setInitialized(Status failedStatus) {
invariant(!failedStatus.isOK());
log() << "Failed to initialize sharding components" << causedBy(failedStatus);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_initializationStatus = std::move(failedStatus);
@@ -79,7 +79,7 @@ void ShardingState::setInitialized(Status failedStatus) {
}
boost::optional<Status> ShardingState::initializationStatus() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (_getInitializationState() == InitializationState::kNew)
return boost::none;
@@ -105,13 +105,13 @@ Status ShardingState::canAcceptShardedCommands() const {
ShardId ShardingState::shardId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _shardId;
}
OID ShardingState::clusterId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _clusterId;
}
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 4b78d0bdfb4..ab3430fb5ec 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -32,8 +32,8 @@
#include <string>
#include "mongo/bson/oid.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -136,7 +136,7 @@ private:
}
// Protects state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingState::_mutex");
// State of the initialization of the sharding state along with any potential errors
AtomicWord<unsigned> _initializationState{static_cast<uint32_t>(InitializationState::kNew)};
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index e45bb27a0f4..3e5d531618e 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -140,7 +140,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _participantsDurable (optional)
// Output: _participantsDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participants);
_step = Step::kWritingParticipantList;
@@ -167,7 +167,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
.thenRunOn(Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor())
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_participantsDurable = true;
}
@@ -178,7 +178,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decision (optional)
// Output: _decision is set
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participantsDurable);
_step = Step::kWaitingForVotes;
@@ -196,7 +196,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
_serviceContext, *_sendPrepareScheduler, _lsid, _txnNumber, *_participants)
.then([this](PrepareVoteConsensus consensus) mutable {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decision = consensus.decision();
}
@@ -219,7 +219,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decisionDurable (optional)
// Output: _decisionDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decision);
_step = Step::kWritingDecision;
@@ -243,7 +243,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
})
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decisionDurable = true;
}
@@ -251,7 +251,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Input: _decisionDurable
// Output: (none)
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decisionDurable);
_step = Step::kWaitingForDecisionAcks;
@@ -292,7 +292,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Do a best-effort attempt (i.e., writeConcern w:1) to delete the coordinator's durable
// state.
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_step = Step::kDeletingCoordinatorDoc;
@@ -354,7 +354,7 @@ SharedSemiFuture<CommitDecision> TransactionCoordinator::getDecision() const {
}
Future<void> TransactionCoordinator::onCompletion() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_completionPromisesFired)
return Future<void>::makeReady();
@@ -373,7 +373,7 @@ void TransactionCoordinator::cancelIfCommitNotYetStarted() {
}
bool TransactionCoordinator::_reserveKickOffCommitPromise() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_kickOffCommitPromiseSet)
return false;
@@ -394,7 +394,7 @@ void TransactionCoordinator::_done(Status status) {
LOG(3) << txn::txnIdToString(_lsid, _txnNumber) << " Two-phase commit completed with "
<< redact(status);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto tickSource = _serviceContext->getTickSource();
@@ -507,7 +507,7 @@ std::string TransactionCoordinator::_twoPhaseCommitInfoForLog(
}
TransactionCoordinator::Step TransactionCoordinator::getStep() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _step;
}
@@ -516,7 +516,7 @@ void TransactionCoordinator::reportState(BSONObjBuilder& parent) const {
TickSource* tickSource = _serviceContext->getTickSource();
TickSource::Tick currentTick = tickSource->getTicks();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
BSONObjBuilder lsidBuilder(doc.subobjStart("lsid"));
_lsid.serialize(&lsidBuilder);
@@ -563,7 +563,7 @@ std::string TransactionCoordinator::toString(Step step) const {
}
void TransactionCoordinator::_updateAssociatedClient(Client* client) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_transactionCoordinatorMetricsObserver->updateLastClientInfo(client);
}
diff --git a/src/mongo/db/s/transaction_coordinator.h b/src/mongo/db/s/transaction_coordinator.h
index e52cdb12fa3..ebc055bd575 100644
--- a/src/mongo/db/s/transaction_coordinator.h
+++ b/src/mongo/db/s/transaction_coordinator.h
@@ -166,7 +166,7 @@ private:
std::unique_ptr<txn::AsyncWorkScheduler> _sendPrepareScheduler;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinator::_mutex");
// Tracks which step of the 2PC coordination is currently (or was most recently) executing
Step _step{Step::kInactive};
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index 87e2252435e..2ff5ca5be39 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -51,14 +51,14 @@ void TransactionCoordinatorCatalog::exitStepUp(Status status) {
<< causedBy(status);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_stepUpCompletionStatus);
_stepUpCompletionStatus = std::move(status);
_stepUpCompleteCV.notify_all();
}
void TransactionCoordinatorCatalog::onStepDown() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
@@ -82,7 +82,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
LOG(3) << "Inserting coordinator " << lsid.getId() << ':' << txnNumber
<< " into in-memory catalog";
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (!forStepUp) {
_waitForStepUpToComplete(ul, opCtx);
}
@@ -110,7 +110,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
OperationContext* opCtx, const LogicalSessionId& lsid, TxnNumber txnNumber) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
std::shared_ptr<TransactionCoordinator> coordinatorToReturn;
@@ -130,7 +130,7 @@ std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
boost::optional<std::pair<TxnNumber, std::shared_ptr<TransactionCoordinator>>>
TransactionCoordinatorCatalog::getLatestOnSession(OperationContext* opCtx,
const LogicalSessionId& lsid) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -153,7 +153,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
LOG(3) << "Removing coordinator " << lsid.getId() << ':' << txnNumber
<< " from in-memory catalog";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -178,7 +178,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
}
void TransactionCoordinatorCatalog::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
while (!_noActiveCoordinatorsCV.wait_for(
ul, stdx::chrono::seconds{5}, [this] { return _coordinatorsBySession.empty(); })) {
@@ -189,11 +189,11 @@ void TransactionCoordinatorCatalog::join() {
}
std::string TransactionCoordinatorCatalog::toString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _toString(lk);
}
-void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk,
+void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<Latch>& lk,
OperationContext* opCtx) {
invariant(lk.owns_lock());
opCtx->waitForConditionOrInterrupt(
@@ -216,7 +216,7 @@ std::string TransactionCoordinatorCatalog::_toString(WithLock wl) const {
}
void TransactionCoordinatorCatalog::filter(FilterPredicate predicate, FilterVisitor visitor) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto sessionIt = _coordinatorsBySession.begin(); sessionIt != _coordinatorsBySession.end();
++sessionIt) {
auto& lsid = sessionIt->first;
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.h b/src/mongo/db/s/transaction_coordinator_catalog.h
index 5768c69bb3c..057c5dfb575 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.h
+++ b/src/mongo/db/s/transaction_coordinator_catalog.h
@@ -125,7 +125,7 @@ private:
* Blocks in an interruptible wait until the catalog is not marked as having a stepup in
* progress.
*/
- void _waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void _waitForStepUpToComplete(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
/**
* Removes the coordinator with the given session id and transaction number from the catalog, if
@@ -142,7 +142,7 @@ private:
std::string _toString(WithLock wl) const;
// Protects the state below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorCatalog::_mutex");
// Contains TransactionCoordinator objects by session id and transaction number. May contain
// more than one coordinator per session. All coordinators for a session that do not correspond
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 9c0d51b07b4..627b4fd2aab 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -60,14 +60,14 @@ AsyncWorkScheduler::AsyncWorkScheduler(ServiceContext* serviceContext)
AsyncWorkScheduler::~AsyncWorkScheduler() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_quiesced(lg));
}
if (!_parent)
return;
- stdx::lock_guard<stdx::mutex> lg(_parent->_mutex);
+ stdx::lock_guard<Latch> lg(_parent->_mutex);
_parent->_childSchedulers.erase(_itToRemove);
_parent->_notifyAllTasksComplete(lg);
_parent = nullptr;
@@ -129,7 +129,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
auto pf = makePromiseFuture<ResponseStatus>();
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledCommandHandle =
@@ -157,7 +157,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
} else {
promise->setError([&] {
if (status == ErrorCodes::CallbackCanceled) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
return _shutdownStatus.isOK() ? status : _shutdownStatus;
}
return status;
@@ -172,7 +172,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusWith<ResponseStatus> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -182,7 +182,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
auto child = stdx::make_unique<AsyncWorkScheduler>(_serviceContext);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
child->shutdown(_shutdownStatus);
@@ -195,7 +195,7 @@ std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
void AsyncWorkScheduler::shutdown(Status status) {
invariant(!status.isOK());
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
return;
@@ -216,7 +216,7 @@ void AsyncWorkScheduler::shutdown(Status status) {
}
void AsyncWorkScheduler::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_allListsEmptyCV.wait(ul, [&] {
return _activeOpContexts.empty() && _activeHandles.empty() && _childSchedulers.empty();
});
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index eb769319aad..a1f25c84744 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -78,7 +78,7 @@ public:
auto pf = makePromiseFuture<ReturnType>();
auto taskCompletionPromise = std::make_shared<Promise<ReturnType>>(std::move(pf.promise));
try {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledWorkHandle = uassertStatusOK(_executor->scheduleWorkAt(
@@ -119,7 +119,7 @@ public:
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -210,7 +210,7 @@ private:
ChildIteratorsList::iterator _itToRemove;
// Mutex to protect the shared state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AsyncWorkScheduler::_mutex");
// If shutdown() is called, this contains the first status that was passed to it and is an
// indication that no more operations can be scheduled
@@ -294,7 +294,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
* The first few fields have fixed values. *
******************************************************/
// Protects all state in the SharedBlock.
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("SharedBlock::mutex");
// If any response returns an error prior to a response setting shouldStopIteration to
// ShouldStopIteration::kYes, the promise will be set with that error rather than the global
@@ -332,7 +332,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
for (auto&& localFut : futures) {
std::move(localFut)
.then([sharedBlock](IndividualResult res) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->shouldStopIteration =
@@ -340,14 +340,14 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
}
})
.onError([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->status = s;
}
})
.getAsync([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
sharedBlock->numOutstandingResponses--;
if (sharedBlock->numOutstandingResponses == 0) {
// Unlock before emplacing the result in case any continuations do expensive
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index 969bfd4338d..f033e14c14a 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -173,7 +173,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
Milliseconds recoveryDelayForTesting) {
joinPreviousRound();
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -236,7 +236,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
void TransactionCoordinatorService::onStepDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_catalogAndScheduler)
return;
@@ -251,7 +251,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
if (!isPrimary)
return;
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -262,7 +262,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
std::shared_ptr<TransactionCoordinatorService::CatalogAndScheduler>
TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassert(
ErrorCodes::NotMaster, "Transaction coordinator is not a primary", _catalogAndScheduler);
diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h
index 89031214474..aa1a8bfdd12 100644
--- a/src/mongo/db/s/transaction_coordinator_service.h
+++ b/src/mongo/db/s/transaction_coordinator_service.h
@@ -146,7 +146,7 @@ private:
std::shared_ptr<CatalogAndScheduler> _catalogAndSchedulerToCleanup;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorService::_mutex");
// The catalog + scheduler instantiated at the last step-up attempt. When nullptr, it means
// onStepUp has not been called yet after the last stepDown (or construction).
diff --git a/src/mongo/db/s/wait_for_majority_service.cpp b/src/mongo/db/s/wait_for_majority_service.cpp
index 0625a84b611..f41ed83c630 100644
--- a/src/mongo/db/s/wait_for_majority_service.cpp
+++ b/src/mongo/db/s/wait_for_majority_service.cpp
@@ -141,7 +141,7 @@ SharedSemiFuture<void> WaitForMajorityService::waitUntilMajority(const repl::OpT
void WaitForMajorityService::_periodicallyWaitForMajority(ServiceContext* service) {
ThreadClient tc("waitForMajority", service);
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_inShutDown) {
auto opCtx = tc->makeOperationContext();
diff --git a/src/mongo/db/s/wait_for_majority_service.h b/src/mongo/db/s/wait_for_majority_service.h
index 970b475d0d3..90ec771bd40 100644
--- a/src/mongo/db/s/wait_for_majority_service.h
+++ b/src/mongo/db/s/wait_for_majority_service.h
@@ -36,7 +36,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/future.h"
@@ -74,7 +74,7 @@ private:
*/
void _periodicallyWaitForMajority(ServiceContext* service);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMaorityService::_mutex");
// Contains an ordered list of opTimes to wait to be majority comitted.
OpTimeWaitingMap _queuedOpTimes;
diff --git a/src/mongo/db/s/wait_for_majority_service_test.cpp b/src/mongo/db/s/wait_for_majority_service_test.cpp
index d904d253af1..ca89ac04c8b 100644
--- a/src/mongo/db/s/wait_for_majority_service_test.cpp
+++ b/src/mongo/db/s/wait_for_majority_service_test.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/wait_for_majority_service.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -64,7 +64,7 @@ public:
}
void finishWaitingOneOpTime() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isTestReady = true;
_isTestReadyCV.notify_one();
@@ -74,7 +74,7 @@ public:
}
Status waitForWriteConcernStub(OperationContext* opCtx, const repl::OpTime& opTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waitForMajorityCallCount++;
_callCountChangedCV.notify_one();
@@ -97,7 +97,7 @@ public:
}
const repl::OpTime& getLastOpTimeWaited() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTimeWaited;
}
@@ -109,7 +109,7 @@ public:
private:
WaitForMajorityService _waitForMajorityService;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMajorityServiceTest::_mutex");
stdx::condition_variable _isTestReadyCV;
stdx::condition_variable _finishWaitingOneOpTimeCV;
stdx::condition_variable _callCountChangedCV;
diff --git a/src/mongo/db/server_recovery.cpp b/src/mongo/db/server_recovery.cpp
index f7133127f40..c44515a3358 100644
--- a/src/mongo/db/server_recovery.cpp
+++ b/src/mongo/db/server_recovery.cpp
@@ -48,17 +48,17 @@ bool SizeRecoveryState::collectionNeedsSizeAdjustment(const std::string& ident)
}
bool SizeRecoveryState::collectionAlwaysNeedsSizeAdjustment(const std::string& ident) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _collectionsAlwaysNeedingSizeAdjustment.count(ident) > 0;
}
void SizeRecoveryState::markCollectionAsAlwaysNeedsSizeAdjustment(const std::string& ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.insert(ident);
}
void SizeRecoveryState::clearStateBeforeRecovery() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.clear();
}
} // namespace mongo
diff --git a/src/mongo/db/server_recovery.h b/src/mongo/db/server_recovery.h
index fbd89f56360..3b9d87a8065 100644
--- a/src/mongo/db/server_recovery.h
+++ b/src/mongo/db/server_recovery.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
/**
@@ -81,7 +81,7 @@ public:
void clearStateBeforeRecovery();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SizeRecoveryState::_mutex");
std::set<std::string> _collectionsAlwaysNeedingSizeAdjustment;
};
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index c7ed9d53083..c3d486fa100 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -95,7 +95,7 @@ ServiceContext::ServiceContext()
_preciseClockSource(stdx::make_unique<SystemClockSource>()) {}
ServiceContext::~ServiceContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& client : _clients) {
severe() << "Client " << client->desc() << " still exists while destroying ServiceContext@"
<< static_cast<void*>(this);
@@ -160,7 +160,7 @@ ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc,
std::unique_ptr<Client> client(new Client(std::move(desc), this, std::move(session)));
onCreate(client.get(), _clientObservers);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_clients.insert(client.get()).second);
}
return UniqueClient(client.release());
@@ -224,7 +224,7 @@ void ServiceContext::setServiceExecutor(std::unique_ptr<transport::ServiceExecut
void ServiceContext::ClientDeleter::operator()(Client* client) const {
ServiceContext* const service = client->getServiceContext();
{
- stdx::lock_guard<stdx::mutex> lk(service->_mutex);
+ stdx::lock_guard<Latch> lk(service->_mutex);
invariant(service->_clients.erase(client));
}
onDestroy(client, service->_clientObservers);
@@ -290,7 +290,7 @@ Client* ServiceContext::LockedClientsCursor::next() {
}
void ServiceContext::setKillAllOperations() {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
// Ensure that all newly created operation contexts will immediately be in the interrupted state
_globalKill.store(true);
@@ -331,17 +331,17 @@ void ServiceContext::unsetKillAllOperations() {
}
void ServiceContext::registerKillOpListener(KillOpListenerInterface* listener) {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
_killOpListeners.push_back(listener);
}
void ServiceContext::waitForStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupCompleteCondVar.wait(lk, [this] { return _startupComplete; });
}
void ServiceContext::notifyStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupComplete = true;
lk.unlock();
_startupCompleteCondVar.notify_all();
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index ff2a6342903..fb350baad55 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -37,10 +37,10 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/session.h"
@@ -163,7 +163,7 @@ public:
Client* next();
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
ClientSet::const_iterator _curr;
ClientSet::const_iterator _end;
};
@@ -530,7 +530,7 @@ private:
std::unique_ptr<ClientObserver> _observer;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceContext::_mutex");
/**
* The periodic runner.
diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp
index 4ac6fc023e2..d4248a7409f 100644
--- a/src/mongo/db/service_context_test_fixture.cpp
+++ b/src/mongo/db/service_context_test_fixture.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/client.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/diagnostic_info.h"
namespace mongo {
diff --git a/src/mongo/db/service_liaison_mock.cpp b/src/mongo/db/service_liaison_mock.cpp
index 10fd0322d8e..960be4e37d3 100644
--- a/src/mongo/db/service_liaison_mock.cpp
+++ b/src/mongo/db/service_liaison_mock.cpp
@@ -43,12 +43,12 @@ MockServiceLiaisonImpl::MockServiceLiaisonImpl() {
}
LogicalSessionIdSet MockServiceLiaisonImpl::getActiveOpSessions() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _activeSessions;
}
LogicalSessionIdSet MockServiceLiaisonImpl::getOpenCursorSessions(OperationContext* opCtx) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _cursorSessions;
}
@@ -65,32 +65,32 @@ void MockServiceLiaisonImpl::scheduleJob(PeriodicRunner::PeriodicJob job) {
void MockServiceLiaisonImpl::addCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::removeCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clearCursorSession() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.clear();
}
void MockServiceLiaisonImpl::add(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.clear();
}
diff --git a/src/mongo/db/service_liaison_mock.h b/src/mongo/db/service_liaison_mock.h
index 6d500ae5682..76af1a8ebf0 100644
--- a/src/mongo/db/service_liaison_mock.h
+++ b/src/mongo/db/service_liaison_mock.h
@@ -33,8 +33,8 @@
#include "mongo/db/service_liaison.h"
#include "mongo/executor/async_timer_mock.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/time_support.h"
@@ -87,7 +87,7 @@ private:
boost::optional<SessionKiller::Matcher> _matcher;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MockServiceLiaisonImpl::_mutex");
LogicalSessionIdSet _activeSessions;
LogicalSessionIdSet _cursorSessions;
};
diff --git a/src/mongo/db/service_liaison_mongod.cpp b/src/mongo/db/service_liaison_mongod.cpp
index 94e1fbd9217..6e26c6f16e7 100644
--- a/src/mongo/db/service_liaison_mongod.cpp
+++ b/src/mongo/db/service_liaison_mongod.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/cursor_manager.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongod.h b/src/mongo/db/service_liaison_mongod.h
index b1060425f6f..3cf8864b5eb 100644
--- a/src/mongo/db/service_liaison_mongod.h
+++ b/src/mongo/db/service_liaison_mongod.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongod::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/service_liaison_mongos.cpp b/src/mongo/db/service_liaison_mongos.cpp
index 666ca06ea68..9abe73ea5c5 100644
--- a/src/mongo/db/service_liaison_mongos.cpp
+++ b/src/mongo/db/service_liaison_mongos.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/service_liaison_mongos.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongos.h b/src/mongo/db/service_liaison_mongos.h
index ab40801557d..22fc7032d73 100644
--- a/src/mongo/db/service_liaison_mongos.h
+++ b/src/mongo/db/service_liaison_mongos.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongos::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/session_catalog.cpp b/src/mongo/db/session_catalog.cpp
index 59108a84b21..838c805b48f 100644
--- a/src/mongo/db/session_catalog.cpp
+++ b/src/mongo/db/session_catalog.cpp
@@ -48,7 +48,7 @@ const auto operationSessionDecoration =
} // namespace
SessionCatalog::~SessionCatalog() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (const auto& entry : _sessions) {
ObservableSession session(lg, entry.second->session);
invariant(!session.currentOperation());
@@ -57,7 +57,7 @@ SessionCatalog::~SessionCatalog() {
}
void SessionCatalog::reset_forTest() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_sessions.clear();
}
@@ -78,7 +78,7 @@ SessionCatalog::ScopedCheckedOutSession SessionCatalog::_checkOutSession(Operati
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
invariant(!opCtx->lockState()->isLocked());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, *opCtx->getLogicalSessionId());
// Wait until the session is no longer checked out and until the previously scheduled kill has
@@ -105,7 +105,7 @@ SessionCatalog::SessionToKill SessionCatalog::checkOutSessionForKill(OperationCo
invariant(!operationSessionDecoration(opCtx));
invariant(!opCtx->getTxnNumber());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, killToken.lsidToKill);
invariant(ObservableSession(ul, sri->session)._killed());
@@ -129,7 +129,7 @@ void SessionCatalog::scanSession(const LogicalSessionId& lsid,
std::unique_ptr<SessionRuntimeInfo> sessionToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
if (it != _sessions.end()) {
auto& sri = it->second;
@@ -150,7 +150,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
std::vector<std::unique_ptr<SessionRuntimeInfo>> sessionsToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
LOG(2) << "Beginning scanSessions. Scanning " << _sessions.size() << " sessions.";
@@ -172,7 +172,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
}
SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& lsid) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
uassert(ErrorCodes::NoSuchSession, "Session not found", it != _sessions.end());
@@ -181,7 +181,7 @@ SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& ls
}
size_t SessionCatalog::size() const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
return _sessions.size();
}
@@ -197,7 +197,7 @@ SessionCatalog::SessionRuntimeInfo* SessionCatalog::_getOrCreateSessionRuntimeIn
void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri,
boost::optional<KillToken> killToken) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
// Make sure we have exactly the same session on the map and that it is still associated with an
// operation context (meaning checked-out)
diff --git a/src/mongo/db/session_catalog.h b/src/mongo/db/session_catalog.h
index 029a3881c1f..ce301933d40 100644
--- a/src/mongo/db/session_catalog.h
+++ b/src/mongo/db/session_catalog.h
@@ -37,8 +37,8 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/session.h"
#include "mongo/db/session_killer.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -153,7 +153,7 @@ private:
void _releaseSession(SessionRuntimeInfo* sri, boost::optional<KillToken> killToken);
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalog::_mutex");
// Owns the Session objects for all current Sessions.
SessionRuntimeInfoMap _sessions;
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index a5512625dbf..151924e2709 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -599,9 +599,9 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ConcurrentCheckOutAndKill) {
// The main thread won't check in the session until it's killed.
{
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cond;
- stdx::unique_lock<stdx::mutex> lock(m);
+ stdx::unique_lock<Latch> lock(m);
ASSERT_EQ(ErrorCodes::InternalError,
_opCtx->waitForConditionOrInterruptNoAssert(cond, lock));
}
diff --git a/src/mongo/db/session_killer.cpp b/src/mongo/db/session_killer.cpp
index 2f92bf6dbf2..c7acd2d074c 100644
--- a/src/mongo/db/session_killer.cpp
+++ b/src/mongo/db/session_killer.cpp
@@ -50,7 +50,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
Client::setCurrent(sc->makeClient("SessionKiller"));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// While we're not in shutdown
while (!_inShutdown) {
@@ -72,7 +72,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
SessionKiller::~SessionKiller() {
DESTRUCTOR_GUARD([&] {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
_killerCV.notify_one();
@@ -138,7 +138,7 @@ SessionKiller* SessionKiller::get(OperationContext* ctx) {
std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
OperationContext* opCtx, const KillAllSessionsByPatternSet& toKill) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Save a shared_ptr to the current reapResults (I.e. the next thing to get killed).
auto reapResults = _reapResults;
@@ -164,7 +164,7 @@ std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
return {reapResults.result, reapResults.result->get_ptr()};
}
-void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk) {
+void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk) {
// Pull our current workload onto the stack. Swap it for empties.
decltype(_nextToReap) nextToReap;
decltype(_reapResults) reapResults;
diff --git a/src/mongo/db/session_killer.h b/src/mongo/db/session_killer.h
index 5df03edcaee..57faebefff9 100644
--- a/src/mongo/db/session_killer.h
+++ b/src/mongo/db/session_killer.h
@@ -36,9 +36,9 @@
#include "mongo/base/status_with.h"
#include "mongo/db/kill_sessions.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/net/hostandport.h"
@@ -125,13 +125,13 @@ private:
std::shared_ptr<boost::optional<Result>> result;
};
- void _periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk);
+ void _periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk);
KillFunc _killFunc;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionKiller::_mutex");
stdx::condition_variable _callerCV;
stdx::condition_variable _killerCV;
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
index 17741f31dda..f89916aace2 100644
--- a/src/mongo/db/sessions_collection_config_server.cpp
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -96,7 +96,7 @@ Status SessionsCollectionConfigServer::setupSessionsCollection(OperationContext*
return {ErrorCodes::ShardingStateNotInitialized, "sharding state is not yet initialized"};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
{
auto res = _shardCollectionIfNeeded(opCtx);
if (!res.isOK()) {
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index bdfac76abff..701d055772a 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection_sharded.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -64,7 +64,7 @@ private:
Status _shardCollectionIfNeeded(OperationContext* opCtx);
Status _generateIndexesIfNeeded(OperationContext* opCtx);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionsCollectionConfigServer::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_mock.cpp b/src/mongo/db/sessions_collection_mock.cpp
index de53f9a30ce..6d7ccfbc09a 100644
--- a/src/mongo/db/sessions_collection_mock.cpp
+++ b/src/mongo/db/sessions_collection_mock.cpp
@@ -59,22 +59,22 @@ Status MockSessionsCollectionImpl::removeRecords(const LogicalSessionIdSet& sess
}
void MockSessionsCollectionImpl::add(LogicalSessionRecord record) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.insert({record.getId(), std::move(record)});
}
void MockSessionsCollectionImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.erase(lsid);
}
bool MockSessionsCollectionImpl::has(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _sessions.find(lsid) != _sessions.end();
}
void MockSessionsCollectionImpl::clearSessions() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.clear();
}
@@ -92,7 +92,7 @@ Status MockSessionsCollectionImpl::_refreshSessions(const LogicalSessionRecordSe
}
Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& sessions) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
_sessions.erase(lsid);
}
@@ -103,7 +103,7 @@ Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& ses
StatusWith<LogicalSessionIdSet> MockSessionsCollectionImpl::findRemovedSessions(
OperationContext* opCtx, const LogicalSessionIdSet& sessions) {
LogicalSessionIdSet lsids;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
if (_sessions.find(lsid) == _sessions.end()) {
lsids.emplace(lsid);
diff --git a/src/mongo/db/sessions_collection_mock.h b/src/mongo/db/sessions_collection_mock.h
index 4f0f3e408c4..e9137c787db 100644
--- a/src/mongo/db/sessions_collection_mock.h
+++ b/src/mongo/db/sessions_collection_mock.h
@@ -31,8 +31,8 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -88,7 +88,7 @@ private:
Status _refreshSessions(const LogicalSessionRecordSet& sessions);
Status _removeRecords(const LogicalSessionIdSet& sessions);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockSessionsCollectionImpl::_mutex");
SessionMap _sessions;
RefreshHook _refresh;
diff --git a/src/mongo/db/sessions_collection_rs.h b/src/mongo/db/sessions_collection_rs.h
index fbd8a4f74a2..bb4452116d6 100644
--- a/src/mongo/db/sessions_collection_rs.h
+++ b/src/mongo/db/sessions_collection_rs.h
@@ -36,6 +36,7 @@
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/db/snapshot_window_util.cpp b/src/mongo/db/snapshot_window_util.cpp
index 75d59c4abbd..0cf9a0d9022 100644
--- a/src/mongo/db/snapshot_window_util.cpp
+++ b/src/mongo/db/snapshot_window_util.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/snapshot_window_options.h"
#include "mongo/db/storage/storage_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -55,7 +55,7 @@ namespace SnapshotWindowUtil {
// another, since they act on and modify the same storage parameters. Further guards the static
// variables "_snapshotWindowLastDecreasedAt" and "_snapshotWindowLastIncreasedAt" used in
// increaseTargetSnapshotWindowSize() and decreaseSnapshowWindow().
-stdx::mutex snapshotWindowMutex;
+Mutex snapshotWindowMutex;
namespace {
@@ -92,7 +92,7 @@ void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
// Tracks the last time that the snapshot window was increased so that it does not go up so fast
// that the storage engine does not have time to improve snapshot availability.
@@ -150,7 +150,7 @@ void decreaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
StorageEngine* engine = opCtx->getServiceContext()->getStorageEngine();
if (engine && engine->isCacheUnderPressure(opCtx)) {
diff --git a/src/mongo/db/stats/server_write_concern_metrics.cpp b/src/mongo/db/stats/server_write_concern_metrics.cpp
index c36431ca3f3..bfc14025d73 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.cpp
+++ b/src/mongo/db/stats/server_write_concern_metrics.cpp
@@ -58,7 +58,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForInserts(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_insertMetrics.recordWriteConcern(writeConcernOptions, numInserts);
}
@@ -68,7 +68,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForUpdate(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_updateMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -78,7 +78,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForDelete(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_deleteMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -87,7 +87,7 @@ BSONObj ServerWriteConcernMetrics::toBSON() const {
return BSONObj();
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
BSONObjBuilder builder;
diff --git a/src/mongo/db/stats/server_write_concern_metrics.h b/src/mongo/db/stats/server_write_concern_metrics.h
index e6faaaf1641..32fa98d4042 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.h
+++ b/src/mongo/db/stats/server_write_concern_metrics.h
@@ -96,7 +96,7 @@ private:
StringMap<std::uint64_t> wTagCounts;
};
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ServerWriteConcernMetrics::_mutex");
WriteConcernMetricsForOperationType _insertMetrics;
WriteConcernMetricsForOperationType _updateMetrics;
WriteConcernMetricsForOperationType _deleteMetrics;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
index 5419a88535b..c4bd6edc05f 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
@@ -92,7 +92,7 @@ std::unique_ptr<mongo::RecordStore> KVEngine::getRecordStore(OperationContext* o
}
bool KVEngine::trySwapMaster(StringStore& newMaster, uint64_t version) {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
invariant(!newMaster.hasBranch() && !_master.hasBranch());
if (_masterVersion != version)
return false;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/biggie/biggie_kv_engine.h
index 5d0d3c1be46..f3d4f19c811 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.h
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.h
@@ -155,7 +155,7 @@ public:
* Returns a pair of the current version and copy of tree of the master.
*/
std::pair<uint64_t, StringStore> getMasterInfo() {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
return std::make_pair(_masterVersion, _master);
}
@@ -171,7 +171,7 @@ private:
std::map<std::string, bool> _idents; // TODO : replace with a query to _master.
std::unique_ptr<VisibilityManager> _visibilityManager;
- mutable stdx::mutex _masterLock;
+ mutable Mutex _masterLock = MONGO_MAKE_LATCH("KVEngine::_masterLock");
StringStore _master;
uint64_t _masterVersion = 0;
};
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index 8cd4ae9d893..6db2596ff40 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -120,7 +120,7 @@ bool RecordStore::isCapped() const {
}
void RecordStore::setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -311,7 +311,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo
auto endIt = workingCopy->upper_bound(_postfix);
while (recordIt != endIt) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_cappedCallback) {
// Documents are guaranteed to have a RecordId at the end of the KeyString, unlike
// unique indexes.
@@ -404,11 +404,11 @@ void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* wo
auto recordIt = workingCopy->lower_bound(_prefix);
// Ensure only one thread at a time can do deletes, otherwise they'll conflict.
- stdx::lock_guard<stdx::mutex> cappedDeleterLock(_cappedDeleterMutex);
+ stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex);
while (_cappedAndNeedDelete(opCtx, workingCopy)) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
RecordId rid = RecordId(extractRecordId(recordIt->first));
if (_isOplog && _visibilityManager->isFirstHidden(rid)) {
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h
index 5f62b5ab5e8..a52c42b8eac 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.h
+++ b/src/mongo/db/storage/biggie/biggie_record_store.h
@@ -38,7 +38,7 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace biggie {
@@ -144,10 +144,11 @@ private:
std::string _prefix;
std::string _postfix;
- mutable stdx::mutex _cappedCallbackMutex; // Guards _cappedCallback
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback
CappedCallback* _cappedCallback;
- mutable stdx::mutex _cappedDeleterMutex;
+ mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex");
AtomicWord<long long> _highestRecordId{1};
AtomicWord<long long> _numRecords{0};
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
index e638638aa4d..5b356256f51 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
@@ -56,7 +56,7 @@ public:
virtual void rollback() {
_visibilityManager->dealtWithRecord(_rid);
- stdx::lock_guard<stdx::mutex> lk(_rs->_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_rs->_cappedCallbackMutex);
if (_rs->_cappedCallback)
_rs->_cappedCallback->notifyCappedWaitersIfNeeded();
}
@@ -68,7 +68,7 @@ private:
};
void VisibilityManager::dealtWithRecord(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.erase(rid);
_opsBecameVisibleCV.notify_all();
}
@@ -76,7 +76,7 @@ void VisibilityManager::dealtWithRecord(RecordId rid) {
void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
RecordStore* rs,
RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.insert(rid);
opCtx->recoveryUnit()->registerChange(new VisibilityManagerChange(this, rs, rid));
@@ -85,13 +85,13 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
}
RecordId VisibilityManager::getAllCommittedRecord() {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
return _uncommittedRecords.empty() ? _highestSeen
: RecordId(_uncommittedRecords.begin()->repr() - 1);
}
bool VisibilityManager::isFirstHidden(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
if (_uncommittedRecords.empty())
return false;
return *_uncommittedRecords.begin() == rid;
@@ -100,7 +100,7 @@ bool VisibilityManager::isFirstHidden(RecordId rid) {
void VisibilityManager::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) {
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
- stdx::unique_lock<stdx::mutex> lock(_stateLock);
+ stdx::unique_lock<Latch> lock(_stateLock);
const RecordId waitFor = _highestSeen;
opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lock, [&] {
return _uncommittedRecords.empty() || *_uncommittedRecords.begin() > waitFor;
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.h b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
index 387b7edc0d0..e0d798aa13b 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.h
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
@@ -76,7 +76,8 @@ public:
void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx);
private:
- mutable stdx::mutex _stateLock; // Protects the values below.
+ mutable Mutex _stateLock =
+ MONGO_MAKE_LATCH("VisibilityManager::_stateLock"); // Protects the values below.
RecordId _highestSeen = RecordId();
// Used to wait for all earlier oplog writes to be visible.
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 69e263b7e9d..8e3d75c6640 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -149,7 +149,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents.erase(_ident);
}
@@ -164,7 +164,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents[_ident] = _entry;
}
@@ -469,7 +469,7 @@ void DurableCatalogImpl::init(OperationContext* opCtx) {
}
std::vector<NamespaceString> DurableCatalogImpl::getAllCollections() const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
std::vector<NamespaceString> result;
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
result.push_back(NamespaceString(it->first));
@@ -485,7 +485,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
const string ident = _newUniqueIdent(nss, "collection");
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[nss.toString()];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists, "collection already exists");
@@ -515,7 +515,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
}
std::string DurableCatalogImpl::getCollectionIdent(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end());
return it->second.ident;
@@ -534,7 +534,7 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx,
RecordId* out) const {
RecordId dl;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << nss);
dl = it->second.storedLoc;
@@ -632,7 +632,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
fassert(28522, status.isOK());
}
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator fromIt = _idents.find(fromNss.toString());
invariant(fromIt != _idents.end());
@@ -648,7 +648,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const NamespaceString& nss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator it = _idents.find(nss.toString());
if (it == _idents.end()) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found");
@@ -667,7 +667,7 @@ std::vector<std::string> DurableCatalogImpl::getAllIdentsForDB(StringData db) co
std::vector<std::string> v;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
NamespaceString ns(it->first);
if (ns.db() != db)
@@ -735,7 +735,7 @@ StatusWith<std::string> DurableCatalogImpl::newOrphanedIdent(OperationContext* o
NamespaceString::kOrphanCollectionPrefix + identNs)
.ns();
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[ns];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists,
diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h
index a00680456bc..5b00df9f1e0 100644
--- a/src/mongo/db/storage/durable_catalog_impl.h
+++ b/src/mongo/db/storage/durable_catalog_impl.h
@@ -40,7 +40,7 @@
#include "mongo/db/storage/bson_collection_catalog_entry.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/kv/kv_prefix.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -272,7 +272,7 @@ private:
};
typedef std::map<std::string, Entry> NSToIdentMap;
NSToIdentMap _idents;
- mutable stdx::mutex _identsLock;
+ mutable Mutex _identsLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_identsLock");
// Manages the feature document that may be present in the DurableCatalogImpl. '_featureTracker'
// is guaranteed to be non-null after DurableCatalogImpl::init() is called.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 6e921284506..6eed77b808e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -44,7 +44,7 @@ namespace mongo {
RecoveryUnit* EphemeralForTestEngine::newRecoveryUnit() {
return new EphemeralForTestRecoveryUnit([this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
JournalListener::Token token = _journalListener->getToken();
_journalListener->onDurable(token);
});
@@ -56,14 +56,14 @@ Status EphemeralForTestEngine::createRecordStore(OperationContext* opCtx,
const CollectionOptions& options) {
// Register the ident in the `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getRecordStore`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
OperationContext* opCtx, StringData ns, StringData ident, const CollectionOptions& options) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (options.capped) {
return stdx::make_unique<EphemeralForTestRecordStore>(
ns,
@@ -78,7 +78,7 @@ std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
std::unique_ptr<RecordStore> EphemeralForTestEngine::makeTemporaryRecordStore(
OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return stdx::make_unique<EphemeralForTestRecordStore>(ident, &_dataMap[ident]);
}
@@ -89,7 +89,7 @@ Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx
const IndexDescriptor* desc) {
// Register the ident in `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getSortedDataInterface`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
@@ -97,7 +97,7 @@ Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx
SortedDataInterface* EphemeralForTestEngine::getSortedDataInterface(OperationContext* opCtx,
StringData ident,
const IndexDescriptor* desc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()),
desc->unique(),
desc->parentNS(),
@@ -107,7 +107,7 @@ SortedDataInterface* EphemeralForTestEngine::getSortedDataInterface(OperationCon
}
Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap.erase(ident);
return Status::OK();
}
@@ -119,7 +119,7 @@ int64_t EphemeralForTestEngine::getIdentSize(OperationContext* opCtx, StringData
std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext* opCtx) const {
std::vector<std::string> all;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) {
all.push_back(it->first);
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index 04480585d29..f61f903ba98 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -33,7 +33,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -103,13 +103,12 @@ public:
virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
return _dataMap.find(ident) != _dataMap.end();
- ;
}
std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
void setJournalListener(JournalListener* jl) final {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -128,7 +127,7 @@ public:
private:
typedef StringMap<std::shared_ptr<void>> DataMap;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("EphemeralForTestEngine::_mutex");
DataMap _dataMap; // All actual data is owned in here
// Notified when we write as everything is considered "journalled" since repl depends on it.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index 87701b81eb1..f1947d5ab70 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -35,7 +35,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp
index cf3f7f1c7ab..52893fef0ce 100644
--- a/src/mongo/db/storage/flow_control.cpp
+++ b/src/mongo/db/storage/flow_control.cpp
@@ -172,7 +172,7 @@ double FlowControl::_getLocksPerOp() {
Sample backOne;
std::size_t numSamples;
{
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
numSamples = _sampledOpsApplied.size();
if (numSamples >= 2) {
backTwo = _sampledOpsApplied[numSamples - 2];
@@ -406,7 +406,7 @@ std::int64_t FlowControl::_approximateOpsBetween(Timestamp prevTs, Timestamp cur
std::int64_t prevApplied = -1;
std::int64_t currApplied = -1;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
for (auto&& sample : _sampledOpsApplied) {
if (prevApplied == -1 && prevTs.asULL() <= std::get<0>(sample)) {
prevApplied = std::get<1>(sample);
@@ -434,7 +434,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
_numOpsSinceStartup += opsApplied;
if (_numOpsSinceStartup - _lastSample <
static_cast<std::size_t>(gFlowControlSamplePeriod.load())) {
@@ -476,7 +476,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
void FlowControl::_trimSamples(const Timestamp trimTo) {
int numTrimmed = 0;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
// Always leave at least two samples for calculating `locksPerOp`.
while (_sampledOpsApplied.size() > 2 &&
std::get<0>(_sampledOpsApplied.front()) < trimTo.asULL()) {
diff --git a/src/mongo/db/storage/flow_control.h b/src/mongo/db/storage/flow_control.h
index 64f0d0b1d00..17b465b9d21 100644
--- a/src/mongo/db/storage/flow_control.h
+++ b/src/mongo/db/storage/flow_control.h
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_coordinator_fwd.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -125,7 +125,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _isLaggedTimeMicros{0};
- mutable stdx::mutex _sampledOpsMutex;
+ mutable Mutex _sampledOpsMutex = MONGO_MAKE_LATCH("FlowControl::_sampledOpsMutex");
std::deque<Sample> _sampledOpsApplied;
// These values are used in the sampling process.
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
index ef5b441d989..44337fffc49 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
@@ -46,7 +46,7 @@ KVDropPendingIdentReaper::KVDropPendingIdentReaper(KVEngine* engine) : _engine(e
void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestamp,
const NamespaceString& nss,
StringData ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingIdents.equal_range(dropTimestamp);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -65,7 +65,7 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam
}
boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingIdents.cbegin();
if (it == _dropPendingIdents.cend()) {
return boost::none;
@@ -74,7 +74,7 @@ boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp()
}
std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::set<std::string> idents;
for (const auto& entry : _dropPendingIdents) {
const auto& identInfo = entry.second;
@@ -87,7 +87,7 @@ std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, const Timestamp& ts) {
DropPendingIdents toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingIdents.cbegin();
it != _dropPendingIdents.cend() && it->first < ts;
++it) {
@@ -125,7 +125,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropTimestamp()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto& timestampAndIdentInfo : toDrop) {
const auto& dropTimestamp = timestampAndIdentInfo.first;
// This may return zero if _dropPendingIdents was cleared using clearDropPendingState().
@@ -135,7 +135,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
}
void KVDropPendingIdentReaper::clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingIdents.clear();
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
index c249d9af0ba..75f13690a3d 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
@@ -38,7 +38,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -113,7 +113,7 @@ private:
KVEngine* const _engine;
// Guards access to member variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("KVDropPendingIdentReaper::_mutex");
// Drop-pending idents. Ordered by drop timestamp.
DropPendingIdents _dropPendingIdents;
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 6b88dc22c3b..1a54a82f6a1 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -31,7 +31,7 @@
namespace mongo {
int64_t KVPrefix::_nextValue = 0;
-stdx::mutex KVPrefix::_nextValueMutex;
+Mutex KVPrefix::_nextValueMutex = MONGO_MAKE_LATCH();
const KVPrefix KVPrefix::kNotPrefixed = KVPrefix(-1);
std::string KVPrefix::toString() const {
@@ -54,7 +54,7 @@ std::string KVPrefix::toString() const {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
_nextValue = largestPrefix._value + 1;
}
@@ -67,7 +67,7 @@ std::string KVPrefix::toString() const {
}
/* static */ KVPrefix KVPrefix::generateNextPrefix() {
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index 6a785dc19db..45a1e891c0e 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -33,7 +33,7 @@
#include "mongo/bson/util/builder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
explicit KVPrefix(int64_t value) : _value(value) {}
int64_t _value;
- static stdx::mutex _nextValueMutex;
+ static Mutex _nextValueMutex;
static int64_t _nextValue;
};
diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp
index 25f4fd1d106..4e6efc0c636 100644
--- a/src/mongo/db/storage/kv/storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/storage_engine_test.cpp
@@ -430,13 +430,13 @@ TEST_F(TimestampKVEngineTest, TimestampListeners) {
}
TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
bool changes[4] = {false, false, false, false};
TimestampListener first(checkpoint, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[0]) {
changes[0] = true;
cv.notify_all();
@@ -444,7 +444,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener second(oldest, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[1]) {
changes[1] = true;
cv.notify_all();
@@ -452,7 +452,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener third(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[2]) {
changes[2] = true;
cv.notify_all();
@@ -460,7 +460,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener fourth(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[3]) {
changes[3] = true;
cv.notify_all();
@@ -473,7 +473,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
_storageEngine->getTimestampMonitor()->addListener(&fourth);
// Wait until all 4 listeners get notified at least once.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] {
for (auto const& change : changes) {
if (!change) {
diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.h b/src/mongo/db/storage/mobile/mobile_kv_engine.h
index 9e8e0a53333..7efaa0f33b3 100644
--- a/src/mongo/db/storage/mobile/mobile_kv_engine.h
+++ b/src/mongo/db/storage/mobile/mobile_kv_engine.h
@@ -35,7 +35,7 @@
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session_pool.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/string_map.h"
@@ -125,7 +125,7 @@ public:
std::vector<std::string> getAllIdents(OperationContext* opCtx) const override;
void setJournalListener(JournalListener* jl) override {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -144,7 +144,7 @@ public:
private:
void maybeVacuum(Client* client, Date_t deadline);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MobileKVEngine::_mutex");
void _initDBPath(const std::string& path);
std::int32_t _setSQLitePragma(const std::string& pragma, sqlite3* session);
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.cpp b/src/mongo/db/storage/mobile/mobile_record_store.cpp
index a28d26dcb3d..67deb71f1aa 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.cpp
+++ b/src/mongo/db/storage/mobile/mobile_record_store.cpp
@@ -233,7 +233,7 @@ void MobileRecordStore::_initDataSizeIfNeeded_inlock(OperationContext* opCtx) co
}
long long MobileRecordStore::dataSize(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_initDataSizeIfNeeded_inlock(opCtx);
return _dataSize;
}
@@ -255,7 +255,7 @@ void MobileRecordStore::_initNumRecsIfNeeded_inlock(OperationContext* opCtx) con
}
long long MobileRecordStore::numRecords(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_initNumRecsIfNeeded_inlock(opCtx);
return _numRecs;
}
@@ -451,7 +451,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_rs->_numRecsMutex);
_rs->_numRecs -= _diff;
}
@@ -461,7 +461,7 @@ private:
};
void MobileRecordStore::_changeNumRecs(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
opCtx->recoveryUnit()->registerChange(new NumRecsChange(this, diff));
_initNumRecsIfNeeded_inlock(opCtx);
_numRecs += diff;
@@ -472,7 +472,7 @@ bool MobileRecordStore::_resetNumRecsIfNeeded(OperationContext* opCtx, int64_t n
int64_t currNumRecs = numRecords(opCtx);
if (currNumRecs != newNumRecs) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_numRecs = newNumRecs;
}
return wasReset;
@@ -488,7 +488,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_rs->_dataSizeMutex);
_rs->_dataSize -= _diff;
}
@@ -498,7 +498,7 @@ private:
};
void MobileRecordStore::_changeDataSize(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
opCtx->recoveryUnit()->registerChange(new DataSizeChange(this, diff));
_initDataSizeIfNeeded_inlock(opCtx);
_dataSize += diff;
@@ -510,7 +510,7 @@ bool MobileRecordStore::_resetDataSizeIfNeeded(OperationContext* opCtx, int64_t
if (currDataSize != _dataSize) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_dataSize = newDataSize;
}
return wasReset;
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.h b/src/mongo/db/storage/mobile/mobile_record_store.h
index ac2d7e4e229..6b7f23ca0da 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.h
+++ b/src/mongo/db/storage/mobile/mobile_record_store.h
@@ -179,7 +179,7 @@ private:
bool _resetNumRecsIfNeeded(OperationContext* opCtx, int64_t newNumRecs);
mutable int64_t _numRecs;
- mutable stdx::mutex _numRecsMutex;
+ mutable Mutex _numRecsMutex = MONGO_MAKE_LATCH("MobileRecordStore::_numRecsMutex");
mutable bool _isNumRecsInitialized = false;
/**
@@ -200,7 +200,7 @@ private:
bool _resetDataSizeIfNeeded(OperationContext* opCtx, int64_t newDataSize);
mutable int64_t _dataSize;
- mutable stdx::mutex _dataSizeMutex;
+ mutable Mutex _dataSizeMutex = MONGO_MAKE_LATCH("MobileRecordStore::_dataSizeMutex");
mutable bool _isDataSizeInitialized = false;
};
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.cpp b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
index 1e73885d8b0..a37d1028698 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.cpp
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
@@ -43,7 +43,7 @@
#include "mongo/db/storage/mobile/mobile_session_pool.h"
#include "mongo/db/storage/mobile/mobile_sqlite_statement.h"
#include "mongo/db/storage/mobile/mobile_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -105,7 +105,7 @@ MobileSessionPool::~MobileSessionPool() {
}
std::unique_ptr<MobileSession> MobileSessionPool::getSession(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We should never be able to get here after _shuttingDown is set, because no new operations
// should be allowed to start.
@@ -141,13 +141,13 @@ void MobileSessionPool::releaseSession(MobileSession* session) {
if (!failedDropsQueue.isEmpty())
failedDropsQueue.execAndDequeueOp(session);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessions.push_back(session->getSession());
_releasedSessionNotifier.notify_one();
}
void MobileSessionPool::shutDown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shuttingDown = true;
// Retrieve the operation context from the thread's client if the client exists.
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 08586e0ece8..031953cdfb3 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -37,7 +37,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class MobileSession;
@@ -58,7 +58,7 @@ public:
private:
AtomicWord<bool> _isEmpty;
- stdx::mutex _queueMutex;
+ Mutex _queueMutex = MONGO_MAKE_LATCH("MobileDelayedOpQueue::_queueMutex");
std::queue<std::string> _opQueryQueue;
};
@@ -107,7 +107,7 @@ private:
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
- stdx::mutex _mutex;
+ Mutex _mutex;
stdx::condition_variable _releasedSessionNotifier;
std::string _path;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 8527910addf..aec7bc83e1f 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -864,7 +864,7 @@ StorageEngineImpl::TimestampMonitor::TimestampMonitor(KVEngine* engine, Periodic
StorageEngineImpl::TimestampMonitor::~TimestampMonitor() {
log() << "Timestamp monitor shutting down";
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
invariant(_listeners.empty());
}
@@ -876,7 +876,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
"TimestampMonitor",
[&](Client* client) {
{
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (_listeners.empty()) {
return;
}
@@ -943,7 +943,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
}
void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestamp newTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
for (auto& listener : _listeners) {
if (listener->getType() == type) {
listener->notify(newTimestamp);
@@ -952,7 +952,7 @@ void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestam
}
void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) != _listeners.end()) {
bool listenerAlreadyRegistered = true;
invariant(!listenerAlreadyRegistered);
@@ -961,7 +961,7 @@ void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listene
}
void StorageEngineImpl::TimestampMonitor::removeListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) == _listeners.end()) {
bool listenerNotRegistered = true;
invariant(!listenerNotRegistered);
diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h
index be500784520..097c16ac2bc 100644
--- a/src/mongo/db/storage/storage_engine_impl.h
+++ b/src/mongo/db/storage/storage_engine_impl.h
@@ -44,9 +44,9 @@
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_engine_interface.h"
#include "mongo/db/storage/temporary_record_store.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -286,7 +286,7 @@ public:
MonitoredTimestamps _currentTimestamps;
// Protects access to _listeners below.
- stdx::mutex _monitorMutex;
+ Mutex _monitorMutex = MONGO_MAKE_LATCH("TimestampMonitor::_monitorMutex");
std::vector<TimestampListener*> _listeners;
// Periodic runner that the timestamp monitor schedules its job on.
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 1695bced3fc..a284f4a2eb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -186,7 +186,7 @@ public:
while (!_shuttingDown.load()) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// Check every 10 seconds or sooner in the debug builds
_condvar.wait_for(lock, stdx::chrono::seconds(kDebugBuild ? 1 : 10));
@@ -201,7 +201,7 @@ public:
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the session sweeper thread early, we do not want the shutdown
// to wait for us too long.
_condvar.notify_one();
@@ -213,7 +213,7 @@ private:
WiredTigerSessionCache* _sessionCache;
AtomicWord<bool> _shuttingDown{false};
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerSessionSweeper::_mutex"); // protects _condvar
// The session sweeper thread idles on this condition variable for a particular time duration
// between cleaning up expired sessions. It can be triggered early to expediate shutdown.
stdx::condition_variable _condvar;
@@ -291,7 +291,7 @@ public:
while (!_shuttingDown.load()) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_condvar.wait_for(lock,
stdx::chrono::seconds(static_cast<std::int64_t>(
@@ -362,7 +362,7 @@ public:
if (oplogNeededForRollback.isOK()) {
// Now that the checkpoint is durable, publish the oplog needed to recover
// from it.
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
_oplogNeededForCrashRecovery.store(
oplogNeededForRollback.getValue().asULL());
}
@@ -407,7 +407,7 @@ public:
_hasTriggeredFirstStableCheckpoint = true;
log() << "Triggering the first stable checkpoint. Initial Data: " << initialData
<< " PrevStable: " << prevStable << " CurrStable: " << currStable;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.notify_one();
}
}
@@ -421,14 +421,14 @@ public:
* _oplogNeededForCrashRecovery will not change during assignment.
*/
void assignOplogNeededForCrashRecoveryTo(boost::optional<Timestamp>* timestamp) {
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
*timestamp = Timestamp(_oplogNeededForCrashRecovery.load());
}
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the checkpoint thread early, to take a final checkpoint before shutting
// down, if one has not coincidentally just been taken.
_condvar.notify_one();
@@ -440,7 +440,8 @@ private:
WiredTigerKVEngine* _wiredTigerKVEngine;
WiredTigerSessionCache* _sessionCache;
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_mutex");
+ ; // protects _condvar
// The checkpoint thread idles on this condition variable for a particular time duration between
// taking checkpoints. It can be triggered early to expediate immediate checkpointing.
stdx::condition_variable _condvar;
@@ -449,7 +450,8 @@ private:
bool _hasTriggeredFirstStableCheckpoint = false;
- stdx::mutex _oplogNeededForCrashRecoveryMutex;
+ Mutex _oplogNeededForCrashRecoveryMutex =
+ MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_oplogNeededForCrashRecoveryMutex");
AtomicWord<std::uint64_t> _oplogNeededForCrashRecovery;
};
@@ -1025,7 +1027,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
uassert(51034, "Cannot open backup cursor with in-memory mode.", !isEphemeral());
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_checkpointThread->assignOplogNeededForCrashRecoveryTo(&_oplogPinnedByBackup);
auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
@@ -1060,7 +1062,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
void WiredTigerKVEngine::endNonBlockingBackup(OperationContext* opCtx) {
_backupSession.reset();
// Oplog truncation thread can now remove the pinned oplog.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_oplogPinnedByBackup = boost::none;
_backupCursor = nullptr;
}
@@ -1101,7 +1103,7 @@ void WiredTigerKVEngine::syncSizeInfo(bool sync) const {
void WiredTigerKVEngine::setOldestActiveTransactionTimestampCallback(
StorageEngine::OldestActiveTransactionTimestampCallback callback) {
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
_oldestActiveTransactionTimestampCallback = std::move(callback);
};
@@ -1396,7 +1398,7 @@ Status WiredTigerKVEngine::dropIdent(OperationContext* opCtx, StringData ident)
if (ret == EBUSY) {
// this is expected, queue it up
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_front(uri);
}
_sessionCache->closeCursorsForQueuedDrops();
@@ -1415,7 +1417,7 @@ std::list<WiredTigerCachedCursor> WiredTigerKVEngine::filterCursorsWithQueuedDro
std::list<WiredTigerCachedCursor>* cache) {
std::list<WiredTigerCachedCursor> toDrop;
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
return toDrop;
@@ -1449,7 +1451,7 @@ bool WiredTigerKVEngine::haveDropsQueued() const {
_previousCheckedDropsQueued = now;
// Don't wait for the mutex: if we can't get it, report that no drops are queued.
- stdx::unique_lock<stdx::mutex> lk(_identToDropMutex, stdx::defer_lock);
+ stdx::unique_lock<Latch> lk(_identToDropMutex, stdx::defer_lock);
return lk.try_lock() && !_identToDrop.empty();
}
@@ -1459,7 +1461,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
WiredTigerSession session(_conn);
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
numInQueue = _identToDrop.size();
}
@@ -1472,7 +1474,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
for (int i = 0; i < numToDelete; i++) {
string uri;
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
break;
uri = _identToDrop.front();
@@ -1483,7 +1485,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
LOG(1) << "WT queued drop of " << uri << " res " << ret;
if (ret == EBUSY) {
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_back(uri);
} else {
invariantWTOK(ret);
@@ -1875,7 +1877,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::getOplogNeededForRollback() const {
auto stableTimestamp = _stableTimestamp.load();
// Only one thread can set or execute this callback.
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
boost::optional<Timestamp> oldestActiveTransactionTimestamp;
if (_oldestActiveTransactionTimestampCallback) {
auto status = _oldestActiveTransactionTimestampCallback(Timestamp(stableTimestamp));
@@ -1908,7 +1910,7 @@ boost::optional<Timestamp> WiredTigerKVEngine::getOplogNeededForCrashRecovery()
Timestamp WiredTigerKVEngine::getPinnedOplog() const {
{
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
if (!storageGlobalParams.allowOplogTruncation) {
// If oplog truncation is not allowed, then return the min timestamp so that no history
// is
@@ -1951,14 +1953,14 @@ bool WiredTigerKVEngine::supportsReadConcernMajority() const {
void WiredTigerKVEngine::startOplogManager(OperationContext* opCtx,
const std::string& uri,
WiredTigerRecordStore* oplogRecordStore) {
- stdx::lock_guard<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::lock_guard<Latch> lock(_oplogManagerMutex);
if (_oplogManagerCount == 0)
_oplogManager->start(opCtx, uri, oplogRecordStore);
_oplogManagerCount++;
}
void WiredTigerKVEngine::haltOplogManager() {
- stdx::unique_lock<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::unique_lock<Latch> lock(_oplogManagerMutex);
invariant(_oplogManagerCount > 0);
_oplogManagerCount--;
if (_oplogManagerCount == 0) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index b80f0698a59..9dff80d35b4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -43,8 +43,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/elapsed_tracker.h"
namespace mongo {
@@ -411,7 +411,8 @@ private:
std::uint64_t _getCheckpointTimestamp() const;
- mutable stdx::mutex _oldestActiveTransactionTimestampCallbackMutex;
+ mutable Mutex _oldestActiveTransactionTimestampCallbackMutex =
+ MONGO_MAKE_LATCH("::_oldestActiveTransactionTimestampCallbackMutex");
StorageEngine::OldestActiveTransactionTimestampCallback
_oldestActiveTransactionTimestampCallback;
@@ -422,7 +423,7 @@ private:
ClockSource* const _clockSource;
// Mutex to protect use of _oplogManagerCount by this instance of KV engine.
- mutable stdx::mutex _oplogManagerMutex;
+ mutable Mutex _oplogManagerMutex = MONGO_MAKE_LATCH("::_oplogManagerMutex");
std::size_t _oplogManagerCount = 0;
std::unique_ptr<WiredTigerOplogManager> _oplogManager;
@@ -453,15 +454,16 @@ private:
std::string _rsOptions;
std::string _indexOptions;
- mutable stdx::mutex _dropAllQueuesMutex;
- mutable stdx::mutex _identToDropMutex;
+ mutable Mutex _dropAllQueuesMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_dropAllQueuesMutex");
+ mutable Mutex _identToDropMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_identToDropMutex");
std::list<std::string> _identToDrop;
mutable Date_t _previousCheckedDropsQueued;
std::unique_ptr<WiredTigerSession> _backupSession;
WT_CURSOR* _backupCursor;
- mutable stdx::mutex _oplogPinnedByBackupMutex;
+ mutable Mutex _oplogPinnedByBackupMutex =
+ MONGO_MAKE_LATCH("WiredTigerKVEngine::_oplogPinnedByBackupMutex");
boost::optional<Timestamp> _oplogPinnedByBackup;
Timestamp _recoveryTimestamp;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index 4403c93f296..788310ab273 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -73,7 +73,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
// Need to obtain the mutex before starting the thread, as otherwise it may race ahead
// see _shuttingDown as true and quit prematurely.
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_oplogJournalThread = stdx::thread(&WiredTigerOplogManager::_oplogJournalThreadLoop,
this,
WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(),
@@ -85,7 +85,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
void WiredTigerOplogManager::halt() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
invariant(_isRunning);
_shuttingDown = true;
_isRunning = false;
@@ -119,7 +119,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// Close transaction before we wait.
opCtx->recoveryUnit()->abandonSnapshot();
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
// Prevent any scheduled journal flushes from being delayed and blocking this wait excessively.
_opsWaitingForVisibility++;
@@ -147,7 +147,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
}
void WiredTigerOplogManager::triggerJournalFlush() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
if (!_opsWaitingForJournal) {
_opsWaitingForJournal = true;
_opsWaitingForJournalCV.notify_one();
@@ -162,7 +162,7 @@ void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* ses
// forward cursors. The timestamp is used to hide oplog entries that might be committed but
// have uncommitted entries ahead of them.
while (true) {
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
{
MONGO_IDLE_THREAD_BLOCK;
_opsWaitingForJournalCV.wait(lk,
@@ -239,7 +239,7 @@ std::uint64_t WiredTigerOplogManager::getOplogReadTimestamp() const {
}
void WiredTigerOplogManager::setOplogReadTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_setOplogReadTimestamp(lk, ts.asULL());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
index 9a82985fc28..d4ee1cbf83b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -60,7 +60,7 @@ public:
void halt();
bool isRunning() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
return _isRunning && !_shuttingDown;
}
@@ -89,7 +89,8 @@ private:
void _setOplogReadTimestamp(WithLock, uint64_t newTimestamp);
stdx::thread _oplogJournalThread;
- mutable stdx::mutex _oplogVisibilityStateMutex;
+ mutable Mutex _oplogVisibilityStateMutex =
+ MONGO_MAKE_LATCH("WiredTigerOplogManager::_oplogVisibilityStateMutex");
mutable stdx::condition_variable
_opsWaitingForJournalCV; // Signaled to trigger a journal flush.
mutable stdx::condition_variable
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index d04a8edf30a..153e25d5e42 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -147,7 +147,7 @@ public:
_oplogStones->_currentRecords.store(0);
_oplogStones->_currentBytes.store(0);
- stdx::lock_guard<stdx::mutex> lk(_oplogStones->_mutex);
+ stdx::lock_guard<Latch> lk(_oplogStones->_mutex);
_oplogStones->_stones.clear();
}
@@ -159,7 +159,7 @@ private:
WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs)
: _rs(rs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(rs->isCapped());
invariant(rs->cappedMaxSize() > 0);
@@ -184,13 +184,13 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTi
}
bool WiredTigerRecordStore::OplogStones::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
return _isDead;
}
void WiredTigerRecordStore::OplogStones::kill() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
_isDead = true;
}
_oplogReclaimCv.notify_one();
@@ -198,11 +198,11 @@ void WiredTigerRecordStore::OplogStones::kill() {
void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
// Wait until kill() is called or there are too many oplog stones.
- stdx::unique_lock<stdx::mutex> lock(_oplogReclaimMutex);
+ stdx::unique_lock<Latch> lock(_oplogReclaimMutex);
while (!_isDead) {
{
MONGO_IDLE_THREAD_BLOCK;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasExcessStones_inlock()) {
// There are now excess oplog stones. However, there it may be necessary to keep
// additional oplog.
@@ -225,7 +225,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
boost::optional<WiredTigerRecordStore::OplogStones::Stone>
WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!hasExcessStones_inlock()) {
return {};
@@ -235,12 +235,12 @@ WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
}
void WiredTigerRecordStore::OplogStones::popOldestStone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stones.pop_front();
}
void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRecord) {
- stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::try_to_lock);
+ stdx::unique_lock<Latch> lk(_mutex, stdx::try_to_lock);
if (!lk) {
// Someone else is either already creating a new stone or popping the oldest one. In the
// latter case, we let the next insert trigger the new stone's creation.
@@ -281,7 +281,7 @@ void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* o
void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
int64_t recordsRemoved, int64_t bytesRemoved, RecordId firstRemovedId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int64_t numStonesToRemove = 0;
int64_t recordsInStonesToRemove = 0;
@@ -311,7 +311,7 @@ void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
void WiredTigerRecordStore::OplogStones::setMinBytesPerStone(int64_t size) {
invariant(size > 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Only allow changing the minimum bytes per stone if no data has been inserted.
invariant(_stones.size() == 0 && _currentRecords.load() == 0);
@@ -472,7 +472,7 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
}
void WiredTigerRecordStore::OplogStones::adjust(int64_t maxSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const unsigned int oplogStoneSize =
std::max(gOplogStoneSizeMB * 1024 * 1024, BSONObjMaxInternalSize);
@@ -727,7 +727,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
WiredTigerRecordStore::~WiredTigerRecordStore() {
{
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_shuttingDown = true;
}
@@ -798,7 +798,7 @@ const char* WiredTigerRecordStore::name() const {
}
bool WiredTigerRecordStore::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
return _shuttingDown;
}
@@ -1075,7 +1075,7 @@ int64_t WiredTigerRecordStore::_cappedDeleteAsNeeded_inlock(OperationContext* op
++docsRemoved;
sizeSaved += old_value.size;
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_shuttingDown)
break;
@@ -1349,12 +1349,12 @@ bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const {
}
bool WiredTigerRecordStore::haveCappedWaiters() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
return _cappedCallback && _cappedCallback->haveCappedWaiters();
}
void WiredTigerRecordStore::notifyCappedWaitersIfNeeded() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
// This wakes up cursors blocking in await_data.
if (_cappedCallback) {
_cappedCallback->notifyCappedWaitersIfNeeded();
@@ -1710,7 +1710,7 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) {
}
// Only one thread needs to do this.
- stdx::lock_guard<stdx::mutex> lk(_initNextIdMutex);
+ stdx::lock_guard<Latch> lk(_initNextIdMutex);
if (_nextIdNum.load() > 0) {
return;
}
@@ -1831,7 +1831,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx,
// Compute the number and associated sizes of the records to delete.
{
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
do {
if (_cappedCallback) {
uassertStatusOK(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 9737bd700a3..c7ddb0c24ae 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -43,8 +43,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/fail_point_service.h"
@@ -220,7 +220,7 @@ public:
Status updateCappedSize(OperationContext* opCtx, long long cappedSize) final;
void setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -366,16 +366,19 @@ private:
RecordId _cappedFirstRecord;
AtomicWord<long long> _cappedSleep;
AtomicWord<long long> _cappedSleepMS;
+
+ // guards _cappedCallback and _shuttingDown
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("WiredTigerRecordStore::_cappedCallbackMutex");
CappedCallback* _cappedCallback;
bool _shuttingDown;
- mutable stdx::mutex _cappedCallbackMutex; // guards _cappedCallback and _shuttingDown
// See comment in ::cappedDeleteAsNeeded
int _cappedDeleteCheckCount;
mutable stdx::timed_mutex _cappedDeleterMutex;
// Protects initialization of the _nextIdNum.
- mutable stdx::mutex _initNextIdMutex;
+ mutable Mutex _initNextIdMutex = MONGO_MAKE_LATCH("WiredTigerRecordStore::_initNextIdMutex");
AtomicWord<long long> _nextIdNum{0};
WiredTigerSizeStorer* _sizeStorer; // not owned, can be NULL
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index cd6207313c9..1c47b70f40f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -56,7 +56,7 @@ namespace mongo {
namespace {
std::set<NamespaceString> _backgroundThreadNamespaces;
-stdx::mutex _backgroundThreadMutex;
+Mutex _backgroundThreadMutex;
class OplogTruncaterThread : public BackgroundJob {
public:
@@ -153,7 +153,7 @@ bool initRsOplogBackgroundThread(StringData ns) {
return false;
}
- stdx::lock_guard<stdx::mutex> lock(_backgroundThreadMutex);
+ stdx::lock_guard<Latch> lock(_backgroundThreadMutex);
NamespaceString nss(ns);
if (_backgroundThreadNamespaces.count(nss)) {
log() << "OplogTruncaterThread " << ns << " already started";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
index 99697caac08..0dcb844f1d0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
@@ -33,8 +33,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -104,7 +104,7 @@ public:
//
size_t numStones() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _stones.size();
}
@@ -134,7 +134,7 @@ private:
WiredTigerRecordStore* _rs;
- stdx::mutex _oplogReclaimMutex;
+ Mutex _oplogReclaimMutex;
stdx::condition_variable _oplogReclaimCv;
// True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the "local"
@@ -151,7 +151,8 @@ private:
// oplog during start up, if any.
AtomicWord<bool> _processBySampling; // Whether the oplog was sampled or scanned.
- mutable stdx::mutex _mutex; // Protects against concurrent access to the deque of oplog stones.
+ // Protects against concurrent access to the deque of oplog stones.
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogStones::_mutex");
std::deque<OplogStones::Stone> _stones; // front = oldest, back = newest.
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 1fee696bc79..b86748b09f1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -256,7 +256,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC
UniqueWiredTigerSession session = getSession();
WT_SESSION* s = session->getSession();
{
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
auto config = stableCheckpoint ? "use_timestamp=true" : "use_timestamp=false";
invariantWTOK(s->checkpoint(s, config));
@@ -269,7 +269,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC
uint32_t start = _lastSyncTime.load();
// Do the remainder in a critical section that ensures only a single thread at a time
// will attempt to synchronize.
- stdx::unique_lock<stdx::mutex> lk(_lastSyncMutex);
+ stdx::unique_lock<Latch> lk(_lastSyncMutex);
uint32_t current = _lastSyncTime.loadRelaxed(); // synchronized with writes through mutex
if (current != start) {
// Someone else synced already since we read lastSyncTime, so we're done!
@@ -281,7 +281,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC
// This gets the token (OpTime) from the last write, before flushing (either the journal, or a
// checkpoint), and then reports that token (OpTime) as a durable write.
- stdx::unique_lock<stdx::mutex> jlk(_journalListenerMutex);
+ stdx::unique_lock<Latch> jlk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
// Initialize on first use.
@@ -304,7 +304,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC
void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(OperationContext* opCtx,
std::uint64_t lastCount) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
if (lastCount == _prepareCommitOrAbortCounter.loadRelaxed()) {
opCtx->waitForConditionOrInterrupt(_prepareCommittedOrAbortedCond, lk, [&] {
return _prepareCommitOrAbortCounter.loadRelaxed() > lastCount;
@@ -313,14 +313,14 @@ void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(Operatio
}
void WiredTigerSessionCache::notifyPreparedUnitOfWorkHasCommittedOrAborted() {
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
_prepareCommitOrAbortCounter.fetchAndAdd(1);
_prepareCommittedOrAbortedCond.notify_all();
}
void WiredTigerSessionCache::closeAllCursors(const std::string& uri) {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeAllCursors(uri);
}
@@ -330,14 +330,14 @@ void WiredTigerSessionCache::closeCursorsForQueuedDrops() {
// Increment the cursor epoch so that all cursors from this epoch are closed.
_cursorEpoch.fetchAndAdd(1);
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeCursorsForQueuedDrops(_engine);
}
}
size_t WiredTigerSessionCache::getIdleSessionsCount() {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
return _sessions.size();
}
@@ -349,7 +349,7 @@ void WiredTigerSessionCache::closeExpiredIdleSessions(int64_t idleTimeMillis) {
auto cutoffTime = _clockSource->now() - Milliseconds(idleTimeMillis);
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
// Discard all sessions that became idle before the cutoff time
for (auto it = _sessions.begin(); it != _sessions.end();) {
auto session = *it;
@@ -369,7 +369,7 @@ void WiredTigerSessionCache::closeAll() {
SessionCache swap;
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
_epoch.fetchAndAdd(1);
_sessions.swap(swap);
}
@@ -389,7 +389,7 @@ UniqueWiredTigerSession WiredTigerSessionCache::getSession() {
invariant(!(_shuttingDown.loadRelaxed() & kShuttingDownMask));
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (!_sessions.empty()) {
// Get the most recently used session so that if we discard sessions, we're
// discarding older ones
@@ -456,7 +456,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
session->setIdleExpireTime(_clockSource->now());
if (session->_getEpoch() == currentEpoch) { // check outside of lock to reduce contention
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (session->_getEpoch() == _epoch.load()) { // recheck inside the lock for correctness
returnedToCache = true;
_sessions.push_back(session);
@@ -473,7 +473,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
void WiredTigerSessionCache::setJournalListener(JournalListener* jl) {
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
_journalListener = jl;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index aff9ef8d874..5c44b4cbd08 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -37,7 +37,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
@@ -324,7 +324,7 @@ private:
AtomicWord<unsigned> _shuttingDown;
static const uint32_t kShuttingDownMask = 1 << 31;
- stdx::mutex _cacheLock;
+ Mutex _cacheLock = MONGO_MAKE_LATCH("WiredTigerSessionCache::_cacheLock");
typedef std::vector<WiredTigerSession*> SessionCache;
SessionCache _sessions;
@@ -336,15 +336,16 @@ private:
// Counter and critical section mutex for waitUntilDurable
AtomicWord<unsigned> _lastSyncTime;
- stdx::mutex _lastSyncMutex;
+ Mutex _lastSyncMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_lastSyncMutex");
// Mutex and cond var for waiting on prepare commit or abort.
- stdx::mutex _prepareCommittedOrAbortedMutex;
+ Mutex _prepareCommittedOrAbortedMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionCache::_prepareCommittedOrAbortedMutex");
stdx::condition_variable _prepareCommittedOrAbortedCond;
AtomicWord<std::uint64_t> _prepareCommitOrAbortCounter{0};
// Protects _journalListener.
- stdx::mutex _journalListenerMutex;
+ Mutex _journalListenerMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_journalListenerMutex");
// Notified when we commit to the journal.
JournalListener* _journalListener = &NoOpJournalListener::instance;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 1cf95a4ad7d..e0847eb0444 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -64,7 +64,7 @@ WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn,
}
WiredTigerSizeStorer::~WiredTigerSizeStorer() {
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
_cursor->close(_cursor);
}
@@ -74,7 +74,7 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
return;
// Ordering is important: as the entry may be flushed concurrently, set the dirty flag last.
- stdx::lock_guard<stdx::mutex> lk(_bufferMutex);
+ stdx::lock_guard<Latch> lk(_bufferMutex);
auto& entry = _buffer[uri];
// During rollback it is possible to get a new SizeInfo. In that case clear the dirty flag,
// so the SizeInfo can be destructed without triggering the dirty check invariant.
@@ -90,13 +90,13 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(StringData uri) const {
{
// Check if we can satisfy the read from the buffer.
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
Buffer::const_iterator it = _buffer.find(uri);
if (it != _buffer.end())
return it->second;
}
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
// Intentionally ignoring return value.
ON_BLOCK_EXIT([&] { _cursor->reset(_cursor); });
@@ -123,7 +123,7 @@ std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(Strin
void WiredTigerSizeStorer::flush(bool syncToDisk) {
Buffer buffer;
{
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
_buffer.swap(buffer);
}
@@ -131,13 +131,13 @@ void WiredTigerSizeStorer::flush(bool syncToDisk) {
return; // Nothing to do.
Timer t;
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
{
// On failure, place entries back into the map, unless a newer value already exists.
ON_BLOCK_EXIT([this, &buffer]() {
this->_cursor->reset(this->_cursor);
if (!buffer.empty()) {
- stdx::lock_guard<stdx::mutex> bufferLock(this->_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(this->_bufferMutex);
for (auto& it : buffer)
this->_buffer.try_emplace(it.first, it.second);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 0063b2b6cfe..c143b9e87f5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -36,7 +36,7 @@
#include "mongo/base/string_data.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -98,12 +98,13 @@ private:
const WiredTigerSession _session;
const bool _readOnly;
// Guards _cursor. Acquire *before* _bufferMutex.
- mutable stdx::mutex _cursorMutex;
+ mutable Mutex _cursorMutex = MONGO_MAKE_LATCH("WiredTigerSessionStorer::_cursorMutex");
WT_CURSOR* _cursor; // pointer is const after constructor
using Buffer = StringMap<std::shared_ptr<SizeInfo>>;
- mutable stdx::mutex _bufferMutex; // Guards _buffer
+ mutable Mutex _bufferMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionStorer::_bufferMutex"); // Guards _buffer
Buffer _buffer;
};
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index 7216bc1727b..dd7c6ce52b5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -42,14 +42,14 @@
namespace mongo {
void WiredTigerSnapshotManager::setCommittedSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
invariant(!_committedSnapshot || *_committedSnapshot <= timestamp);
_committedSnapshot = timestamp;
}
void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
if (timestamp.isNull())
_localSnapshot = boost::none;
else
@@ -57,12 +57,12 @@ void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
}
boost::optional<Timestamp> WiredTigerSnapshotManager::getLocalSnapshot() {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
return _localSnapshot;
}
void WiredTigerSnapshotManager::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
_committedSnapshot = boost::none;
}
@@ -71,7 +71,7 @@ boost::optional<Timestamp> WiredTigerSnapshotManager::getMinSnapshotForNextCommi
return boost::none;
}
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
return _committedSnapshot;
}
@@ -81,7 +81,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnCommittedSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
uassert(ErrorCodes::ReadConcernMajorityNotAvailableYet,
"Committed view disappeared while running operation",
_committedSnapshot);
@@ -99,7 +99,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnLocalSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
invariant(_localSnapshot);
LOG(3) << "begin_transaction on local snapshot " << _localSnapshot.get().toString();
auto status = txnOpen.setReadSnapshot(_localSnapshot.get());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index 75c9777a502..1726a7d4c2b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -35,7 +35,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/storage/snapshot_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -91,11 +91,13 @@ public:
private:
// Snapshot to use for reads at a commit timestamp.
- mutable stdx::mutex _committedSnapshotMutex; // Guards _committedSnapshot.
+ mutable Mutex _committedSnapshotMutex = // Guards _committedSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_committedSnapshotMutex");
boost::optional<Timestamp> _committedSnapshot;
// Snapshot to use for reads at a local stable timestamp.
- mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
+ mutable Mutex _localSnapshotMutex = // Guards _localSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_localSnapshotMutex");
boost::optional<Timestamp> _localSnapshot;
};
} // namespace mongo
diff --git a/src/mongo/db/time_proof_service.cpp b/src/mongo/db/time_proof_service.cpp
index 756d0397d5f..7e29f0b2254 100644
--- a/src/mongo/db/time_proof_service.cpp
+++ b/src/mongo/db/time_proof_service.cpp
@@ -57,7 +57,7 @@ TimeProofService::Key TimeProofService::generateRandomKey() {
}
TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const Key& key) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto timeCeil = LogicalTime(Timestamp(time.asTimestamp().asULL() | kRangeMask));
if (_cache && _cache->hasProof(timeCeil, key)) {
return _cache->_proof;
@@ -82,7 +82,7 @@ Status TimeProofService::checkProof(LogicalTime time, const TimeProof& proof, co
}
void TimeProofService::resetCache() {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (_cache) {
_cache = boost::none;
}
diff --git a/src/mongo/db/time_proof_service.h b/src/mongo/db/time_proof_service.h
index f7ca66ab3c5..43b6d97a681 100644
--- a/src/mongo/db/time_proof_service.h
+++ b/src/mongo/db/time_proof_service.h
@@ -32,7 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/crypto/sha1_block.h"
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -90,7 +90,7 @@ private:
};
// protects _cache
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("TimeProofService::_cacheMutex");
// one-entry cache
boost::optional<CacheEntry> _cache;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 4252cc1cfb5..f13388e1892 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -133,7 +133,7 @@ public:
db.getCursor().write<LittleEndian<uint32_t>>(size);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_written += size;
}
@@ -150,7 +150,7 @@ public:
} catch (...) {
auto status = exceptionToStatus();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_result = status;
}
});
@@ -173,7 +173,7 @@ public:
// If we couldn't push our packet begin the process of failing the recording
_pcqPipe.producer.close();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the result was otherwise okay, mark it as failed due to the queue blocking. If
// it failed for another reason, don't overwrite that.
@@ -187,7 +187,7 @@ public:
}
Status shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_inShutdown) {
_inShutdown = true;
@@ -203,7 +203,7 @@ public:
}
BSONObj getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_trafficStats.setBufferedBytes(_pcqPipe.controller.getStats().queueDepth);
_trafficStats.setCurrentFileSize(_written);
return _trafficStats.toBSON();
@@ -251,7 +251,7 @@ private:
MultiProducerSingleConsumerQueue<TrafficRecordingPacket, CostFunction>::Pipe _pcqPipe;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Recording::_mutex");
bool _inShutdown = false;
TrafficRecorderStats _trafficStats;
size_t _written = 0;
@@ -282,7 +282,7 @@ void TrafficRecorder::start(const StartRecordingTraffic& options) {
!gTrafficRecordingDirectory.empty());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording already active", !_recording);
@@ -299,7 +299,7 @@ void TrafficRecorder::stop() {
_shouldRecord.store(false);
auto recording = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording not active", _recording);
@@ -314,7 +314,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
const Message& message) {
if (shouldAlwaysRecordTraffic) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_recording) {
StartRecordingTraffic options;
@@ -347,7 +347,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
// We couldn't queue
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the recording isn't the one we have in hand bail (its been ended, or a new one has
// been created
@@ -360,7 +360,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
std::shared_ptr<TrafficRecorder::Recording> TrafficRecorder::_getCurrentRecording() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _recording;
}
diff --git a/src/mongo/db/traffic_recorder.h b/src/mongo/db/traffic_recorder.h
index 8bd261cbfb4..964b95fdf80 100644
--- a/src/mongo/db/traffic_recorder.h
+++ b/src/mongo/db/traffic_recorder.h
@@ -34,8 +34,8 @@
#include "mongo/db/service_context.h"
#include "mongo/db/traffic_recorder_gen.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/message.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/session.h"
namespace mongo {
@@ -72,7 +72,7 @@ private:
AtomicWord<bool> _shouldRecord;
// The mutex only protects the last recording shared_ptr
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TrafficRecorder::_mutex");
std::shared_ptr<Recording> _recording;
};
diff --git a/src/mongo/db/ttl_collection_cache.cpp b/src/mongo/db/ttl_collection_cache.cpp
index 94eedea7f27..64181c34154 100644
--- a/src/mongo/db/ttl_collection_cache.cpp
+++ b/src/mongo/db/ttl_collection_cache.cpp
@@ -46,19 +46,19 @@ TTLCollectionCache& TTLCollectionCache::get(ServiceContext* ctx) {
}
void TTLCollectionCache::registerCollection(const NamespaceString& collectionNS) {
- stdx::lock_guard<stdx::mutex> lock(_ttlCollectionsLock);
+ stdx::lock_guard<Latch> lock(_ttlCollectionsLock);
_ttlCollections.push_back(collectionNS.ns());
}
void TTLCollectionCache::unregisterCollection(const NamespaceString& collectionNS) {
- stdx::lock_guard<stdx::mutex> lock(_ttlCollectionsLock);
+ stdx::lock_guard<Latch> lock(_ttlCollectionsLock);
auto collIter = std::find(_ttlCollections.begin(), _ttlCollections.end(), collectionNS.ns());
fassert(40220, collIter != _ttlCollections.end());
_ttlCollections.erase(collIter);
}
std::vector<std::string> TTLCollectionCache::getCollections() {
- stdx::lock_guard<stdx::mutex> lock(_ttlCollectionsLock);
+ stdx::lock_guard<Latch> lock(_ttlCollectionsLock);
return _ttlCollections;
}
}; // namespace mongo
diff --git a/src/mongo/db/ttl_collection_cache.h b/src/mongo/db/ttl_collection_cache.h
index 80839d272bf..80584528c8c 100644
--- a/src/mongo/db/ttl_collection_cache.h
+++ b/src/mongo/db/ttl_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
/**
* Caches the set of collections containing a TTL index.
@@ -51,7 +51,7 @@ public:
std::vector<std::string> getCollections();
private:
- std::vector<std::string> _ttlCollections;
- stdx::mutex _ttlCollectionsLock;
+ Mutex _ttlCollectionsLock = MONGO_MAKE_LATCH("TTLCollectionCache::_ttlCollectionsLock");
+ std::vector<std::string> _ttlCollections; // <CollectionUUID, IndexName>
};
} // namespace mongo
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index 6019a012b1a..f56422e36b5 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -87,7 +87,7 @@ Status ViewCatalog::reload(OperationContext* opCtx, ViewCatalogLookupBehavior lo
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _reload(lk, opCtx, ViewCatalogLookupBehavior::kValidateDurableViews);
}
@@ -147,7 +147,7 @@ Status ViewCatalog::_reload(WithLock,
}
void ViewCatalog::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_viewMap.clear();
_viewGraph.clear();
@@ -172,7 +172,7 @@ void ViewCatalog::iterate(OperationContext* opCtx, ViewIteratorCallback callback
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
for (auto&& view : _viewMap) {
callback(*view.second);
@@ -389,7 +389,7 @@ Status ViewCatalog::createView(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -422,7 +422,7 @@ Status ViewCatalog::modifyView(OperationContext* opCtx,
const BSONArray& pipeline) {
invariant(opCtx->lockState()->isDbLockedForMode(viewName.db(), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -461,7 +461,7 @@ Status ViewCatalog::dropView(OperationContext* opCtx, const NamespaceString& vie
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
ON_BLOCK_EXIT([this] { _ignoreExternalChange = false; });
@@ -515,7 +515,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookup(OperationContext* opCtx, Str
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_valid && opCtx->getClient()->isFromUserConnection()) {
// We want to avoid lookups on invalid collection names.
if (!NamespaceString::validCollectionName(ns)) {
@@ -537,7 +537,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookupWithoutValidatingDurableViews
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lookup(lk, opCtx, ns, ViewCatalogLookupBehavior::kAllowInvalidDurableViews);
}
@@ -547,7 +547,7 @@ StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* opCtx,
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_requireValidCatalog(lock);
diff --git a/src/mongo/db/views/view_catalog.h b/src/mongo/db/views/view_catalog.h
index 45b5b1fef8e..af1e24cdebf 100644
--- a/src/mongo/db/views/view_catalog.h
+++ b/src/mongo/db/views/view_catalog.h
@@ -43,8 +43,8 @@
#include "mongo/db/views/resolved_view.h"
#include "mongo/db/views/view.h"
#include "mongo/db/views/view_graph.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -199,7 +199,7 @@ private:
*/
void _requireValidCatalog(WithLock);
- stdx::mutex _mutex; // Protects all members.
+ Mutex _mutex = MONGO_MAKE_LATCH("ViewCatalog::_mutex"); // Protects all members.
ViewMap _viewMap;
ViewMap _viewMapBackup;
std::unique_ptr<DurableViewCatalog> _durable;
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index b35bbda4d32..7ef0add11dc 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -199,12 +199,12 @@ class PendingValue {
public:
PendingValue(int initialValue) : _value(initialValue) {}
void set(int newValue) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_value = newValue;
_condition.notify_all();
}
void await(int expectedValue) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_value != expectedValue) {
_condition.wait(lk);
}
@@ -212,7 +212,7 @@ public:
private:
int _value;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PendingValue::_mutex");
mutable stdx::condition_variable _condition;
};
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index a426df6a2a9..44e3b6e0e5a 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -51,9 +51,9 @@
#include "mongo/db/storage/storage_engine_init.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/dbtests/framework_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/dbdirectclient_factory.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp
index 0717e8951aa..e2d4e532472 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.cpp
+++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp
@@ -60,7 +60,7 @@ ConnectionString::ConnectionHook* MockConnRegistry::getConnStrHook() {
}
void MockConnRegistry::addServer(MockRemoteDBServer* server) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
const std::string hostName(server->getServerAddress());
fassert(16533, _registry.count(hostName) == 0);
@@ -69,17 +69,17 @@ void MockConnRegistry::addServer(MockRemoteDBServer* server) {
}
bool MockConnRegistry::removeServer(const std::string& hostName) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
return _registry.erase(hostName) == 1;
}
void MockConnRegistry::clear() {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
_registry.clear();
}
std::unique_ptr<MockDBClientConnection> MockConnRegistry::connect(const std::string& connStr) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
fassert(16534, _registry.count(connStr) == 1);
return stdx::make_unique<MockDBClientConnection>(_registry[connStr], true);
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.h b/src/mongo/dbtests/mock/mock_conn_registry.h
index 5796a0b7f73..feb8eb86517 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.h
+++ b/src/mongo/dbtests/mock/mock_conn_registry.h
@@ -114,7 +114,7 @@ private:
MockConnHook _mockConnStrHook;
// protects _registry
- stdx::mutex _registryMutex;
+ Mutex _registryMutex = MONGO_MAKE_LATCH("MockConnRegistry::_registryMutex");
stdx::unordered_map<std::string, MockRemoteDBServer*> _registry;
};
} // namespace mongo
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index b419c09e036..cf7363061bc 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -239,7 +239,7 @@ private:
Hotel(int nRooms) : _nRooms(nRooms), _checkedIn(0), _maxRooms(0) {}
void checkIn() {
- stdx::lock_guard<stdx::mutex> lk(_frontDesk);
+ stdx::lock_guard<Latch> lk(_frontDesk);
_checkedIn++;
verify(_checkedIn <= _nRooms);
if (_checkedIn > _maxRooms)
@@ -247,12 +247,12 @@ private:
}
void checkOut() {
- stdx::lock_guard<stdx::mutex> lk(_frontDesk);
+ stdx::lock_guard<Latch> lk(_frontDesk);
_checkedIn--;
verify(_checkedIn >= 0);
}
- stdx::mutex _frontDesk;
+ Mutex _frontDesk = MONGO_MAKE_LATCH("Hotel::_frontDesk");
int _nRooms;
int _checkedIn;
int _maxRooms;
diff --git a/src/mongo/embedded/index_builds_coordinator_embedded.cpp b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
index 110838b6dcc..db72fbe7608 100644
--- a/src/mongo/embedded/index_builds_coordinator_embedded.cpp
+++ b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
@@ -68,7 +68,7 @@ IndexBuildsCoordinatorEmbedded::startIndexBuild(OperationContext* opCtx,
}
auto replState = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
diff --git a/src/mongo/embedded/periodic_runner_embedded.cpp b/src/mongo/embedded/periodic_runner_embedded.cpp
index 41ae8a49e93..68c94fa541d 100644
--- a/src/mongo/embedded/periodic_runner_embedded.cpp
+++ b/src/mongo/embedded/periodic_runner_embedded.cpp
@@ -52,14 +52,14 @@ PeriodicRunnerEmbedded::PeriodicRunnerEmbedded(ServiceContext* svc, ClockSource*
auto PeriodicRunnerEmbedded::makeJob(PeriodicJob job) -> JobAnchor {
auto impl = std::make_shared<PeriodicJobImpl>(std::move(job), this->_clockSource, this);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_jobs.push_back(impl);
std::push_heap(_jobs.begin(), _jobs.end(), PeriodicJobSorter());
return JobAnchor(impl);
}
bool PeriodicRunnerEmbedded::tryPump() {
- stdx::unique_lock<stdx::mutex> lock(_mutex, stdx::try_to_lock);
+ stdx::unique_lock<Latch> lock(_mutex, stdx::try_to_lock);
if (!lock.owns_lock())
return false;
@@ -71,7 +71,7 @@ bool PeriodicRunnerEmbedded::tryPump() {
PeriodicJobImpl::ExecutionStatus jobExecStatus;
{
- stdx::lock_guard<stdx::mutex> jobLock(job._mutex);
+ stdx::lock_guard<Latch> jobLock(job._mutex);
jobExecStatus = job._execStatus;
}
@@ -104,7 +104,7 @@ bool PeriodicRunnerEmbedded::tryPump() {
// only variable that can be changed from other threads.
PeriodicJobImpl::ExecutionStatus jobExecStatus;
{
- stdx::lock_guard<stdx::mutex> jobLock(job._mutex);
+ stdx::lock_guard<Latch> jobLock(job._mutex);
jobExecStatus = job._execStatus;
}
@@ -142,19 +142,19 @@ PeriodicRunnerEmbedded::PeriodicJobImpl::PeriodicJobImpl(PeriodicJob job,
: _job(std::move(job)), _clockSource(source), _periodicRunner(runner) {}
void PeriodicRunnerEmbedded::PeriodicJobImpl::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kNotScheduled);
_execStatus = PeriodicJobImpl::ExecutionStatus::kRunning;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::pause() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kRunning);
_execStatus = PeriodicJobImpl::ExecutionStatus::kPaused;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::resume() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kPaused);
_execStatus = PeriodicJobImpl::ExecutionStatus::kRunning;
}
@@ -162,21 +162,21 @@ void PeriodicRunnerEmbedded::PeriodicJobImpl::resume() {
void PeriodicRunnerEmbedded::PeriodicJobImpl::stop() {
// Also take the master lock, the job lock is not held while executing the job and we must make
// sure the user can invalidate it after this call.
- stdx::lock_guard<stdx::mutex> masterLock(_periodicRunner->_mutex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> masterLock(_periodicRunner->_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (isAlive(lk)) {
_stopWithMasterAndJobLock(masterLock, lk);
}
}
Milliseconds PeriodicRunnerEmbedded::PeriodicJobImpl::getPeriod() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _job.interval;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::setPeriod(Milliseconds ms) {
- stdx::lock_guard<stdx::mutex> masterLk(_periodicRunner->_mutex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> masterLk(_periodicRunner->_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_job.interval = ms;
diff --git a/src/mongo/embedded/periodic_runner_embedded.h b/src/mongo/embedded/periodic_runner_embedded.h
index 6d82c50db44..a8549fb0bba 100644
--- a/src/mongo/embedded/periodic_runner_embedded.h
+++ b/src/mongo/embedded/periodic_runner_embedded.h
@@ -33,7 +33,7 @@
#include <vector>
#include "mongo/db/service_context_fwd.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/periodic_runner.h"
@@ -88,7 +88,7 @@ private:
// The mutex is protecting _execStatus, the variable that can be accessed from other
// threads.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex");
// The current execution status of the job.
ExecutionStatus _execStatus{ExecutionStatus::kNotScheduled};
@@ -102,7 +102,7 @@ private:
std::vector<std::shared_ptr<PeriodicJobImpl>> _jobs;
std::vector<std::shared_ptr<PeriodicJobImpl>> _Pausedjobs;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicRunnerEmbedded::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/executor/async_multicaster.cpp b/src/mongo/executor/async_multicaster.cpp
index 24e72527d51..eb13a1ce385 100644
--- a/src/mongo/executor/async_multicaster.cpp
+++ b/src/mongo/executor/async_multicaster.cpp
@@ -37,8 +37,8 @@
#include "mongo/base/status.h"
#include "mongo/db/operation_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -60,7 +60,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
struct State {
State(size_t leftToDo) : leftToDo(leftToDo) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("State::mutex");
stdx::condition_variable cv;
size_t leftToDo;
size_t running = 0;
@@ -71,7 +71,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
auto state = std::make_shared<State>(servers.size());
for (const auto& server : servers) {
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
// spin up no more than maxConcurrency tasks at once
opCtx->waitForConditionOrInterrupt(
state->cv, lk, [&] { return state->running < _options.maxConcurrency; });
@@ -80,7 +80,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
uassertStatusOK(_executor->scheduleRemoteCommand(
RemoteCommandRequest{server, theDbName, theCmdObj, opCtx, timeoutMillis},
[state](const TaskExecutor::RemoteCommandCallbackArgs& cbData) {
- stdx::lock_guard<stdx::mutex> lk(state->mutex);
+ stdx::lock_guard<Latch> lk(state->mutex);
state->out.emplace_back(
std::forward_as_tuple(cbData.request.target, cbData.response));
@@ -96,7 +96,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
}));
}
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
opCtx->waitForConditionOrInterrupt(state->cv, lk, [&] { return state->leftToDo == 0; });
return std::move(state->out);
diff --git a/src/mongo/executor/async_multicaster.h b/src/mongo/executor/async_multicaster.h
index c2bc9e0be93..63eaaa4993d 100644
--- a/src/mongo/executor/async_multicaster.h
+++ b/src/mongo/executor/async_multicaster.h
@@ -34,7 +34,7 @@
#include "mongo/executor/remote_command_response.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/executor/async_timer_mock.cpp b/src/mongo/executor/async_timer_mock.cpp
index 0ba1712a189..c11b3ee4c83 100644
--- a/src/mongo/executor/async_timer_mock.cpp
+++ b/src/mongo/executor/async_timer_mock.cpp
@@ -47,7 +47,7 @@ void AsyncTimerMockImpl::cancel() {
void AsyncTimerMockImpl::asyncWait(AsyncTimerInterface::Handler handler) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_timeLeft != kZeroMilliseconds) {
_handlers.push_back(handler);
return;
@@ -65,7 +65,7 @@ void AsyncTimerMockImpl::fastForward(Milliseconds time) {
// While holding the lock, change the time and remove
// handlers that have expired
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (time >= _timeLeft) {
_timeLeft = kZeroMilliseconds;
tmp.swap(_handlers);
@@ -81,7 +81,7 @@ void AsyncTimerMockImpl::fastForward(Milliseconds time) {
}
Milliseconds AsyncTimerMockImpl::timeLeft() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _timeLeft;
}
@@ -90,7 +90,7 @@ void AsyncTimerMockImpl::expireAfter(Milliseconds expiration) {
// While holding the lock, reset the time and remove all handlers
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_timeLeft = expiration;
tmp.swap(_handlers);
}
@@ -102,14 +102,14 @@ void AsyncTimerMockImpl::expireAfter(Milliseconds expiration) {
}
int AsyncTimerMockImpl::jobs() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _handlers.size();
}
void AsyncTimerMockImpl::_callAllHandlers(std::error_code ec) {
std::vector<AsyncTimerInterface::Handler> tmp;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
tmp.swap(_handlers);
}
diff --git a/src/mongo/executor/async_timer_mock.h b/src/mongo/executor/async_timer_mock.h
index 13463b679d7..5e3a83e3275 100644
--- a/src/mongo/executor/async_timer_mock.h
+++ b/src/mongo/executor/async_timer_mock.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/executor/async_timer_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_set.h"
namespace mongo {
@@ -84,7 +84,7 @@ public:
private:
void _callAllHandlers(std::error_code ec);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AsyncTimerMockImpl::_mutex");
Milliseconds _timeLeft;
std::vector<AsyncTimerInterface::Handler> _handlers;
};
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index c2705ae9e1d..a2006875884 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -186,7 +186,7 @@ protected:
size_t target = 0;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("LimitController::_mutex");
stdx::unordered_map<PoolId, PoolData> _poolData;
};
diff --git a/src/mongo/executor/connection_pool.h b/src/mongo/executor/connection_pool.h
index 70ee3c652c5..d94196e8e06 100644
--- a/src/mongo/executor/connection_pool.h
+++ b/src/mongo/executor/connection_pool.h
@@ -34,8 +34,8 @@
#include "mongo/executor/egress_tag_closer.h"
#include "mongo/executor/egress_tag_closer_manager.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
@@ -255,7 +255,7 @@ private:
std::shared_ptr<ControllerInterface> _controller;
// The global mutex for specific pool access and the generation counter
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ConnectionPool::_mutex");
PoolId _nextPoolId = 0;
stdx::unordered_map<HostAndPort, std::shared_ptr<SpecificPool>> _pools;
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index e2f7711cca7..c3816eab43c 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -56,7 +56,7 @@ void TLTypeFactory::shutdown() {
// Stop any attempt to schedule timers in the future
_inShutdown.store(true);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
log() << "Killing all outstanding egress activity.";
for (auto collar : _collars) {
@@ -65,12 +65,12 @@ void TLTypeFactory::shutdown() {
}
void TLTypeFactory::fasten(Type* type) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_collars.insert(type);
}
void TLTypeFactory::release(Type* type) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_collars.erase(type);
type->_wasReleased = true;
diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h
index 7297713b92b..f5bf54ff081 100644
--- a/src/mongo/executor/connection_pool_tl.h
+++ b/src/mongo/executor/connection_pool_tl.h
@@ -79,7 +79,7 @@ private:
std::unique_ptr<NetworkConnectionHook> _onConnectHook;
const ConnectionPool::Options _connPoolOptions;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TLTypeFactory::_mutex");
AtomicWord<bool> _inShutdown{false};
stdx::unordered_set<Type*> _collars;
};
diff --git a/src/mongo/executor/egress_tag_closer_manager.cpp b/src/mongo/executor/egress_tag_closer_manager.cpp
index 5ec544843f5..60473466e7f 100644
--- a/src/mongo/executor/egress_tag_closer_manager.cpp
+++ b/src/mongo/executor/egress_tag_closer_manager.cpp
@@ -48,19 +48,19 @@ EgressTagCloserManager& EgressTagCloserManager::get(ServiceContext* svc) {
}
void EgressTagCloserManager::add(EgressTagCloser* etc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_egressTagClosers.insert(etc);
}
void EgressTagCloserManager::remove(EgressTagCloser* etc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_egressTagClosers.erase(etc);
}
void EgressTagCloserManager::dropConnections(transport::Session::TagMask tags) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->dropConnections(tags);
@@ -68,7 +68,7 @@ void EgressTagCloserManager::dropConnections(transport::Session::TagMask tags) {
}
void EgressTagCloserManager::dropConnections(const HostAndPort& hostAndPort) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->dropConnections(hostAndPort);
@@ -78,7 +78,7 @@ void EgressTagCloserManager::dropConnections(const HostAndPort& hostAndPort) {
void EgressTagCloserManager::mutateTags(
const HostAndPort& hostAndPort,
const stdx::function<transport::Session::TagMask(transport::Session::TagMask)>& mutateFunc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->mutateTags(hostAndPort, mutateFunc);
diff --git a/src/mongo/executor/egress_tag_closer_manager.h b/src/mongo/executor/egress_tag_closer_manager.h
index 0899b915389..e45cad5335e 100644
--- a/src/mongo/executor/egress_tag_closer_manager.h
+++ b/src/mongo/executor/egress_tag_closer_manager.h
@@ -31,8 +31,8 @@
#include "mongo/db/service_context.h"
#include "mongo/executor/egress_tag_closer.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/transport/session.h"
#include "mongo/util/net/hostandport.h"
@@ -64,7 +64,7 @@ public:
const stdx::function<transport::Session::TagMask(transport::Session::TagMask)>& mutateFunc);
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("EgressTagCloserManager::_mutex");
stdx::unordered_set<EgressTagCloser*> _egressTagClosers;
};
diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp
index f897d814409..644ea630973 100644
--- a/src/mongo/executor/network_interface_integration_test.cpp
+++ b/src/mongo/executor/network_interface_integration_test.cpp
@@ -166,14 +166,14 @@ public:
RemoteCommandResponse response;
};
IsMasterData waitForIsMaster() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isMasterCond.wait(lk, [this] { return _isMasterResult != boost::none; });
return std::move(*_isMasterResult);
}
bool hasIsMaster() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isMasterResult != boost::none;
}
@@ -185,7 +185,7 @@ private:
Status validateHost(const HostAndPort& host,
const BSONObj& request,
const RemoteCommandResponse& isMasterReply) override {
- stdx::lock_guard<stdx::mutex> lk(_parent->_mutex);
+ stdx::lock_guard<Latch> lk(_parent->_mutex);
_parent->_isMasterResult = IsMasterData{request, isMasterReply};
_parent->_isMasterCond.notify_all();
return Status::OK();
@@ -203,7 +203,7 @@ private:
NetworkInterfaceTest* _parent;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTest::_mutex");
stdx::condition_variable _isMasterCond;
boost::optional<IsMasterData> _isMasterResult;
};
diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h
index 05742d67cc0..58c908b2851 100644
--- a/src/mongo/executor/network_interface_mock.h
+++ b/src/mongo/executor/network_interface_mock.h
@@ -35,10 +35,10 @@
#include <vector>
#include "mongo/executor/network_interface.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/clock_source.h"
@@ -361,7 +361,7 @@ private:
// Mutex that synchronizes access to mutable data in this class and its subclasses.
// Fields guarded by the mutex are labled (M), below, and those that are read-only
// in multi-threaded execution, and so unsynchronized, are labeled (R).
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
// Condition signaled to indicate that the network processing thread should wake up.
stdx::condition_variable _shouldWakeNetworkCondition; // (M)
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index 66ad4c596df..c861e33bd0c 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -66,7 +66,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
auto server = fixture.getServers()[0];
std::atomic<int> remainingOps(operations); // NOLINT
- stdx::mutex mtx;
+ auto mtx = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
Timer t;
@@ -81,7 +81,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
if (--remainingOps) {
return func();
}
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
cv.notify_one();
};
@@ -93,7 +93,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
func();
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
cv.wait(lk, [&] { return remainingOps.load() == 0; });
return t.millis();
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index 787bd0a6dac..f40a298aea1 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -49,7 +49,7 @@ NetworkInterfaceThreadPool::~NetworkInterfaceThreadPool() {
void NetworkInterfaceThreadPool::_dtorImpl() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_tasks.empty())
return;
@@ -63,7 +63,7 @@ void NetworkInterfaceThreadPool::_dtorImpl() {
}
void NetworkInterfaceThreadPool::startup() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_started) {
severe() << "Attempting to start pool, but it has already started";
fassertFailed(34358);
@@ -75,7 +75,7 @@ void NetworkInterfaceThreadPool::startup() {
void NetworkInterfaceThreadPool::shutdown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
@@ -84,7 +84,7 @@ void NetworkInterfaceThreadPool::shutdown() {
void NetworkInterfaceThreadPool::join() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_joining) {
severe() << "Attempted to join pool more than once";
@@ -100,13 +100,13 @@ void NetworkInterfaceThreadPool::join() {
_net->signalWorkAvailable();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_joiningCondition.wait(
lk, [&] { return _tasks.empty() && (_consumeState == ConsumeState::kNeutral); });
}
void NetworkInterfaceThreadPool::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
task({ErrorCodes::ShutdownInProgress, "Shutdown in progress"});
@@ -127,7 +127,7 @@ void NetworkInterfaceThreadPool::schedule(Task task) {
* allows us to use the network interface's threads as our own pool, which should reduce context
* switches if our tasks are getting scheduled by network interface tasks.
*/
-void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk) {
+void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<Latch> lk) {
if ((_consumeState != ConsumeState::kNeutral) || _tasks.empty())
return;
@@ -140,7 +140,7 @@ void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk
_consumeState = ConsumeState::kScheduled;
lk.unlock();
auto ret = _net->schedule([this](Status status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_consumeState != ConsumeState::kScheduled)
return;
@@ -149,7 +149,7 @@ void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk
invariant(ret.isOK() || ErrorCodes::isShutdownError(ret.code()));
}
-void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<stdx::mutex> lk) noexcept {
+void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<Latch> lk) noexcept {
_consumeState = ConsumeState::kConsuming;
const auto consumingTasksGuard = makeGuard([&] { _consumeState = ConsumeState::kNeutral; });
diff --git a/src/mongo/executor/network_interface_thread_pool.h b/src/mongo/executor/network_interface_thread_pool.h
index 51771393032..3a295b26a5b 100644
--- a/src/mongo/executor/network_interface_thread_pool.h
+++ b/src/mongo/executor/network_interface_thread_pool.h
@@ -32,8 +32,8 @@
#include <cstdint>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
namespace mongo {
@@ -60,14 +60,14 @@ public:
void schedule(Task task) override;
private:
- void _consumeTasks(stdx::unique_lock<stdx::mutex> lk);
- void _consumeTasksInline(stdx::unique_lock<stdx::mutex> lk) noexcept;
+ void _consumeTasks(stdx::unique_lock<Latch> lk);
+ void _consumeTasksInline(stdx::unique_lock<Latch> lk) noexcept;
void _dtorImpl();
NetworkInterface* const _net;
// Protects all of the pool state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceThreadPool::_mutex");
stdx::condition_variable _joiningCondition;
std::vector<Task> _tasks;
bool _started = false;
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index 2cc0fc9244f..d8961be0341 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -65,7 +65,7 @@ std::string NetworkInterfaceTL::getDiagnosticString() {
void NetworkInterfaceTL::appendConnectionStats(ConnectionPoolStats* stats) const {
auto pool = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _pool.get();
}();
if (pool)
@@ -74,7 +74,7 @@ void NetworkInterfaceTL::appendConnectionStats(ConnectionPoolStats* stats) const
NetworkInterface::Counters NetworkInterfaceTL::getCounters() const {
invariant(getTestCommandsEnabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _counters;
}
@@ -83,7 +83,7 @@ std::string NetworkInterfaceTL::getHostName() {
}
void NetworkInterfaceTL::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_svcCtx) {
_tl = _svcCtx->getTransportLayer();
}
@@ -144,19 +144,19 @@ bool NetworkInterfaceTL::inShutdown() const {
}
void NetworkInterfaceTL::waitForWork() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_workReadyCond.wait(lk, [this] { return _isExecutorRunnable; });
}
void NetworkInterfaceTL::waitForWorkUntil(Date_t when) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_workReadyCond.wait_until(lk, when.toSystemTimePoint(), [this] { return _isExecutorRunnable; });
}
void NetworkInterfaceTL::signalWorkAvailable() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_isExecutorRunnable) {
_isExecutorRunnable = true;
_workReadyCond.notify_one();
@@ -401,7 +401,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_counters.timedOut++;
}
@@ -449,7 +449,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (swr.isOK() && swr.getValue().status.isOK()) {
_counters.succeeded++;
} else {
@@ -467,7 +467,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
void NetworkInterfaceTL::cancelCommand(const TaskExecutor::CallbackHandle& cbHandle,
const BatonHandle& baton) {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
auto it = _inProgress.find(cbHandle);
if (it == _inProgress.end()) {
return;
@@ -485,7 +485,7 @@ void NetworkInterfaceTL::cancelCommand(const TaskExecutor::CallbackHandle& cbHan
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_counters.canceled++;
}
@@ -528,7 +528,7 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
std::make_shared<AlarmState>(when, cbHandle, _reactor->makeTimer(), std::move(pf.promise));
{
- stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
+ stdx::lock_guard<Latch> lk(_inProgressMutex);
// If a user has already scheduled an alarm with a handle, make sure they intentionally
// override it by canceling and setting a new one.
@@ -546,7 +546,7 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
}
void NetworkInterfaceTL::cancelAlarm(const TaskExecutor::CallbackHandle& cbHandle) {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
auto iter = _inProgressAlarms.find(cbHandle);
@@ -566,7 +566,7 @@ void NetworkInterfaceTL::cancelAlarm(const TaskExecutor::CallbackHandle& cbHandl
void NetworkInterfaceTL::_cancelAllAlarms() {
auto alarms = [&] {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
return std::exchange(_inProgressAlarms, {});
}();
@@ -599,7 +599,7 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
// Erase the AlarmState from the map.
{
- stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
+ stdx::lock_guard<Latch> lk(_inProgressMutex);
auto iter = _inProgressAlarms.find(state->cbHandle);
if (iter == _inProgressAlarms.end()) {
diff --git a/src/mongo/executor/network_interface_tl.h b/src/mongo/executor/network_interface_tl.h
index 15fdf391876..ee27fdd410f 100644
--- a/src/mongo/executor/network_interface_tl.h
+++ b/src/mongo/executor/network_interface_tl.h
@@ -147,7 +147,7 @@ private:
std::unique_ptr<transport::TransportLayer> _ownedTransportLayer;
transport::ReactorHandle _reactor;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTL::_mutex");
ConnectionPool::Options _connPoolOpts;
std::unique_ptr<NetworkConnectionHook> _onConnectHook;
std::shared_ptr<ConnectionPool> _pool;
@@ -165,7 +165,7 @@ private:
AtomicWord<State> _state;
stdx::thread _ioThread;
- stdx::mutex _inProgressMutex;
+ Mutex _inProgressMutex = MONGO_MAKE_LATCH("NetworkInterfaceTL::_inProgressMutex");
stdx::unordered_map<TaskExecutor::CallbackHandle, std::weak_ptr<CommandState>> _inProgress;
stdx::unordered_map<TaskExecutor::CallbackHandle, std::shared_ptr<AlarmState>>
_inProgressAlarms;
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index 6f2b4823139..e6b92999b05 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -226,7 +226,7 @@ private:
[id, work = std::forward<Work>(work), self = shared_from_this()](const auto& cargs) {
using ArgsT = std::decay_t<decltype(cargs)>;
- stdx::unique_lock<stdx::mutex> lk(self->_mutex);
+ stdx::unique_lock<Latch> lk(self->_mutex);
auto doWorkAndNotify = [&](const ArgsT& x) noexcept {
lk.unlock();
@@ -302,7 +302,7 @@ private:
}
}
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ScopedTaskExecutor::_mutex");
bool _inShutdown = false;
std::shared_ptr<TaskExecutor> _executor;
size_t _id = 0;
diff --git a/src/mongo/executor/scoped_task_executor.h b/src/mongo/executor/scoped_task_executor.h
index 1c42e867fc7..202cae241af 100644
--- a/src/mongo/executor/scoped_task_executor.h
+++ b/src/mongo/executor/scoped_task_executor.h
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/if_constexpr.h"
diff --git a/src/mongo/executor/thread_pool_mock.cpp b/src/mongo/executor/thread_pool_mock.cpp
index 191537cebff..fb809990e49 100644
--- a/src/mongo/executor/thread_pool_mock.cpp
+++ b/src/mongo/executor/thread_pool_mock.cpp
@@ -43,7 +43,7 @@ ThreadPoolMock::ThreadPoolMock(NetworkInterfaceMock* net, int32_t prngSeed, Opti
: _options(std::move(options)), _prng(prngSeed), _net(net) {}
ThreadPoolMock::~ThreadPoolMock() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_joining)
return;
@@ -53,13 +53,13 @@ ThreadPoolMock::~ThreadPoolMock() {
void ThreadPoolMock::startup() {
LOG(1) << "Starting pool";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_started);
invariant(!_worker.joinable());
_started = true;
_worker = stdx::thread([this] {
_options.onCreateThread();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
LOG(1) << "Starting to consume tasks";
while (!_joining) {
@@ -77,17 +77,17 @@ void ThreadPoolMock::startup() {
}
void ThreadPoolMock::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown(lk);
}
void ThreadPoolMock::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_join(lk);
}
void ThreadPoolMock::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
@@ -98,7 +98,7 @@ void ThreadPoolMock::schedule(Task task) {
_tasks.emplace_back(std::move(task));
}
-void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<Latch>& lk) {
auto next = static_cast<size_t>(_prng.nextInt64(static_cast<int64_t>(_tasks.size())));
if (next + 1 != _tasks.size()) {
std::swap(_tasks[next], _tasks.back());
@@ -114,14 +114,14 @@ void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<stdx::mutex>& lk) {
lk.lock();
}
-void ThreadPoolMock::_shutdown(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_shutdown(stdx::unique_lock<Latch>& lk) {
LOG(1) << "Shutting down pool";
_inShutdown = true;
_net->signalWorkAvailable();
}
-void ThreadPoolMock::_join(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_join(stdx::unique_lock<Latch>& lk) {
LOG(1) << "Joining pool";
_joining = true;
diff --git a/src/mongo/executor/thread_pool_mock.h b/src/mongo/executor/thread_pool_mock.h
index e3baaa07273..7f95d480e7a 100644
--- a/src/mongo/executor/thread_pool_mock.h
+++ b/src/mongo/executor/thread_pool_mock.h
@@ -32,9 +32,9 @@
#include <cstdint>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
@@ -73,14 +73,14 @@ public:
void schedule(Task task) override;
private:
- void _consumeOneTask(stdx::unique_lock<stdx::mutex>& lk);
- void _shutdown(stdx::unique_lock<stdx::mutex>& lk);
- void _join(stdx::unique_lock<stdx::mutex>& lk);
+ void _consumeOneTask(stdx::unique_lock<Latch>& lk);
+ void _shutdown(stdx::unique_lock<Latch>& lk);
+ void _join(stdx::unique_lock<Latch>& lk);
// These are the options with which the pool was configured at construction time.
const Options _options;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ThreadPoolMock::_mutex");
stdx::thread _worker;
std::vector<Task> _tasks;
PseudoRandom _prng;
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 808b2a7350c..f07e1d476c5 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -140,20 +140,20 @@ ThreadPoolTaskExecutor::ThreadPoolTaskExecutor(std::unique_ptr<ThreadPoolInterfa
ThreadPoolTaskExecutor::~ThreadPoolTaskExecutor() {
shutdown();
- auto lk = _join(stdx::unique_lock<stdx::mutex>(_mutex));
+ auto lk = _join(stdx::unique_lock<Latch>(_mutex));
invariant(_state == shutdownComplete);
}
void ThreadPoolTaskExecutor::startup() {
_net->startup();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == preStart);
_setState_inlock(running);
_pool->startup();
}
void ThreadPoolTaskExecutor::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
invariant(_networkInProgressQueue.empty());
invariant(_sleepersQueue.empty());
@@ -176,10 +176,10 @@ void ThreadPoolTaskExecutor::shutdown() {
}
void ThreadPoolTaskExecutor::join() {
- _join(stdx::unique_lock<stdx::mutex>(_mutex));
+ _join(stdx::unique_lock<Latch>(_mutex));
}
-stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<stdx::mutex> lk) {
+stdx::unique_lock<Latch> ThreadPoolTaskExecutor::_join(stdx::unique_lock<Latch> lk) {
_stateChange.wait(lk, [this] {
// All tasks are spliced into the _poolInProgressQueue immediately after we accept them.
// This occurs in scheduleIntoPool_inlock.
@@ -223,7 +223,7 @@ stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<s
EventHandle event;
setEventForHandle(&event, std::move(eventState));
signalEvent_inlock(event, std::move(lk));
- lk = stdx::unique_lock<stdx::mutex>(_mutex);
+ lk = stdx::unique_lock<Latch>(_mutex);
}
lk.unlock();
_net->shutdown();
@@ -237,7 +237,7 @@ stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<s
}
void ThreadPoolTaskExecutor::appendDiagnosticBSON(BSONObjBuilder* b) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// ThreadPool details
// TODO: fill in
@@ -264,7 +264,7 @@ StatusWith<TaskExecutor::EventHandle> ThreadPoolTaskExecutor::makeEvent() {
auto el = makeSingletonEventList();
EventHandle event;
setEventForHandle(&event, el.front());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return {ErrorCodes::ShutdownInProgress, "Shutdown in progress"};
}
@@ -273,7 +273,7 @@ StatusWith<TaskExecutor::EventHandle> ThreadPoolTaskExecutor::makeEvent() {
}
void ThreadPoolTaskExecutor::signalEvent(const EventHandle& event) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
signalEvent_inlock(event, std::move(lk));
}
@@ -284,7 +284,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::onEvent(const E
}
// Unsure if we'll succeed yet, so pass an empty CallbackFn.
auto wq = makeSingletonWorkQueue({}, nullptr);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
auto cbHandle = enqueueCallbackState_inlock(&eventState->waiters, &wq);
if (!cbHandle.isOK()) {
@@ -304,7 +304,7 @@ StatusWith<stdx::cv_status> ThreadPoolTaskExecutor::waitForEvent(OperationContex
invariant(opCtx);
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// std::condition_variable::wait() can wake up spuriously, so we have to loop until the event
// is signalled or we time out.
@@ -323,7 +323,7 @@ StatusWith<stdx::cv_status> ThreadPoolTaskExecutor::waitForEvent(OperationContex
void ThreadPoolTaskExecutor::waitForEvent(const EventHandle& event) {
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!eventState->isSignaledFlag) {
eventState->isSignaledCondition.wait(lk);
@@ -334,7 +334,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWork(Ca
// Unsure if we'll succeed yet, so pass an empty CallbackFn.
auto wq = makeSingletonWorkQueue({}, nullptr);
WorkQueue temp;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto cbHandle = enqueueCallbackState_inlock(&temp, &wq);
if (!cbHandle.isOK()) {
return cbHandle;
@@ -352,7 +352,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
}
auto wq = makeSingletonWorkQueue(std::move(work), nullptr, when);
wq.front()->isTimerOperation = true;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto cbHandle = enqueueCallbackState_inlock(&_sleepersQueue, &wq);
if (!cbHandle.isOK()) {
return cbHandle;
@@ -366,7 +366,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
}
auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (cbState->canceled.load()) {
return;
}
@@ -455,7 +455,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
},
baton);
wq.front()->isNetworkOperation = true;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto swCbHandle = enqueueCallbackState_inlock(&_networkInProgressQueue, &wq);
if (!swCbHandle.isOK())
return swCbHandle;
@@ -471,7 +471,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) {
remoteCommandFinished(cbData, cb, scheduledRequest, response);
};
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return;
}
@@ -491,7 +491,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
void ThreadPoolTaskExecutor::cancel(const CallbackHandle& cbHandle) {
invariant(cbHandle.isValid());
auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return;
}
@@ -527,7 +527,7 @@ void ThreadPoolTaskExecutor::wait(const CallbackHandle& cbHandle, Interruptible*
if (cbState->isFinished.load()) {
return;
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!cbState->finishedCondition) {
cbState->finishedCondition.emplace();
}
@@ -569,7 +569,7 @@ ThreadPoolTaskExecutor::EventList ThreadPoolTaskExecutor::makeSingletonEventList
}
void ThreadPoolTaskExecutor::signalEvent_inlock(const EventHandle& event,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
invariant(!eventState->isSignaledFlag);
@@ -580,20 +580,20 @@ void ThreadPoolTaskExecutor::signalEvent_inlock(const EventHandle& event,
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
scheduleIntoPool_inlock(fromQueue, fromQueue->begin(), fromQueue->end(), std::move(lk));
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& iter,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
scheduleIntoPool_inlock(fromQueue, iter, std::next(iter), std::move(lk));
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& begin,
const WorkQueue::iterator& end,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
dassert(fromQueue != &_poolInProgressQueue);
std::vector<std::shared_ptr<CallbackState>> todo(begin, end);
_poolInProgressQueue.splice(_poolInProgressQueue.end(), *fromQueue, begin, end);
@@ -626,7 +626,7 @@ void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
} else {
_pool->schedule([this, cbState](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
cbState->canceled.store(1);
} else {
@@ -659,7 +659,7 @@ void ThreadPoolTaskExecutor::runCallback(std::shared_ptr<CallbackState> cbStateA
callback(std::move(args));
}
cbStateArg->isFinished.store(true);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_poolInProgressQueue.erase(cbStateArg->iter);
if (cbStateArg->finishedCondition) {
cbStateArg->finishedCondition->notify_all();
diff --git a/src/mongo/executor/thread_pool_task_executor.h b/src/mongo/executor/thread_pool_task_executor.h
index 8285785d748..ad3ff832866 100644
--- a/src/mongo/executor/thread_pool_task_executor.h
+++ b/src/mongo/executor/thread_pool_task_executor.h
@@ -32,9 +32,9 @@
#include <memory>
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/baton.h"
#include "mongo/util/fail_point_service.h"
@@ -149,13 +149,13 @@ private:
/**
* Signals the given event.
*/
- void signalEvent_inlock(const EventHandle& event, stdx::unique_lock<stdx::mutex> lk);
+ void signalEvent_inlock(const EventHandle& event, stdx::unique_lock<Latch> lk);
/**
* Schedules all items from "fromQueue" into the thread pool and moves them into
* _poolInProgressQueue.
*/
- void scheduleIntoPool_inlock(WorkQueue* fromQueue, stdx::unique_lock<stdx::mutex> lk);
+ void scheduleIntoPool_inlock(WorkQueue* fromQueue, stdx::unique_lock<Latch> lk);
/**
* Schedules the given item from "fromQueue" into the thread pool and moves it into
@@ -163,7 +163,7 @@ private:
*/
void scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& iter,
- stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> lk);
/**
* Schedules entries from "begin" through "end" in "fromQueue" into the thread pool
@@ -172,7 +172,7 @@ private:
void scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& begin,
const WorkQueue::iterator& end,
- stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> lk);
/**
* Executes the callback specified by "cbState".
@@ -181,7 +181,7 @@ private:
bool _inShutdown_inlock() const;
void _setState_inlock(State newState);
- stdx::unique_lock<stdx::mutex> _join(stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> _join(stdx::unique_lock<Latch> lk);
// The network interface used for remote command execution and waiting.
std::shared_ptr<NetworkInterface> _net;
@@ -190,7 +190,7 @@ private:
std::shared_ptr<ThreadPoolInterface> _pool;
// Mutex guarding all remaining fields.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ThreadPoolTaskExecutor::_mutex");
// Queue containing all items currently scheduled into the thread pool but not yet completed.
WorkQueue _poolInProgressQueue;
diff --git a/src/mongo/idl/mutable_observer_registry.h b/src/mongo/idl/mutable_observer_registry.h
index 89ceaf2b2fb..2dfc0241f34 100644
--- a/src/mongo/idl/mutable_observer_registry.h
+++ b/src/mongo/idl/mutable_observer_registry.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/functional.h"
namespace mongo {
@@ -66,7 +66,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MutableObserverRegistry::_mutex");
std::vector<unique_function<void(const T&)>> _registry;
};
diff --git a/src/mongo/logger/console.cpp b/src/mongo/logger/console.cpp
index ce8c053779c..18e509a998f 100644
--- a/src/mongo/logger/console.cpp
+++ b/src/mongo/logger/console.cpp
@@ -44,7 +44,7 @@ namespace mongo {
namespace {
stdx::mutex& consoleMutex() {
- static stdx::mutex instance;
+ static stdx::mutex instance; // NOLINT
return instance;
}
diff --git a/src/mongo/logger/console.h b/src/mongo/logger/console.h
index 7becb076ef7..ecc6b6556ab 100644
--- a/src/mongo/logger/console.h
+++ b/src/mongo/logger/console.h
@@ -31,7 +31,7 @@
#include <iosfwd>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
diff --git a/src/mongo/logger/log_component_settings.cpp b/src/mongo/logger/log_component_settings.cpp
index f1da736be2c..6adba4da495 100644
--- a/src/mongo/logger/log_component_settings.cpp
+++ b/src/mongo/logger/log_component_settings.cpp
@@ -61,7 +61,7 @@ LogSeverity LogComponentSettings::getMinimumLogSeverity(LogComponent component)
void LogComponentSettings::setMinimumLoggedSeverity(LogComponent component, LogSeverity severity) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
_setMinimumLoggedSeverityInLock(component, severity);
}
@@ -99,7 +99,7 @@ void LogComponentSettings::_setMinimumLoggedSeverityInLock(LogComponent componen
void LogComponentSettings::clearMinimumLoggedSeverity(LogComponent component) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
// LogComponent::kDefault must always be configured.
if (component == LogComponent::kDefault) {
diff --git a/src/mongo/logger/log_component_settings.h b/src/mongo/logger/log_component_settings.h
index 23440129d54..3b85f174b31 100644
--- a/src/mongo/logger/log_component_settings.h
+++ b/src/mongo/logger/log_component_settings.h
@@ -32,7 +32,7 @@
#include "mongo/logger/log_component.h"
#include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logger {
@@ -87,7 +87,7 @@ private:
// A mutex to synchronize writes to the severity arrays. This mutex is to synchronize changes to
// the entire array, and the atomics are to synchronize individual elements.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("LogComponentSettings::_mtx");
// True if a log severity is explicitly set for a component.
// This differentiates between unconfigured components and components that happen to have
diff --git a/src/mongo/logger/log_severity_limiter.h b/src/mongo/logger/log_severity_limiter.h
index 689de448926..027393c2adf 100644
--- a/src/mongo/logger/log_severity_limiter.h
+++ b/src/mongo/logger/log_severity_limiter.h
@@ -31,7 +31,7 @@
#include "mongo/logger/log_severity.h"
#include "mongo/logger/logstream_builder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/time_support.h"
@@ -79,7 +79,7 @@ public:
LogSeverity nextFor(const KeyT& key) {
auto now = Date_t::now();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto& cutoff = _cutoffByKey[key];
if (now > cutoff) {
@@ -97,7 +97,7 @@ private:
LogSeverity _limitedLogSeverity;
LogSeverity _normalLogSeverity;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("LogSeverityLimiter::_mutex");
stdx::unordered_map<KeyT, Date_t> _cutoffByKey;
};
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index ac2d12fdd44..1da701c93f7 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -170,7 +170,7 @@ Status RamLogAppender::append(const logger::MessageEventEphemeral& event) {
RamLog* RamLog::get(const std::string& name) {
if (!_namedLock) {
// Guaranteed to happen before multi-threaded operation.
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
}
stdx::lock_guard<stdx::mutex> lk(*_namedLock);
@@ -215,7 +215,7 @@ MONGO_INITIALIZER(RamLogCatalog)(InitializerContext*) {
return Status(ErrorCodes::InternalError,
"Inconsistent intiailization of RamLogCatalog.");
}
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
_named = new RM();
}
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index 306dc36bff4..7a3ff8cdaaa 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -39,7 +39,7 @@
#include "mongo/logger/appender.h"
#include "mongo/logger/message_event.h"
#include "mongo/logger/tee.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -115,7 +115,7 @@ private:
const char* getLine_inlock(unsigned lineNumber) const;
- stdx::mutex _mutex; // Guards all non-static data.
+ stdx::mutex _mutex; // Guards all non-static data. // NOLINT
char lines[N][C];
unsigned h; // current position
unsigned n; // number of lines stores 0 o N
diff --git a/src/mongo/logger/rotatable_file_writer.h b/src/mongo/logger/rotatable_file_writer.h
index 83fe4716c9a..ab6fe578281 100644
--- a/src/mongo/logger/rotatable_file_writer.h
+++ b/src/mongo/logger/rotatable_file_writer.h
@@ -34,7 +34,7 @@
#include <string>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logger {
@@ -118,7 +118,7 @@ public:
Status _openFileStream(bool append);
RotatableFileWriter* _writer;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
};
/**
@@ -128,7 +128,7 @@ public:
private:
friend class RotatableFileWriter::Use;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RotatableFileWriter::_mutex");
std::string _fileName;
std::unique_ptr<std::ostream> _stream;
};
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 9dacc96a258..ef7e27aa6a9 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -96,7 +96,7 @@ BalancerConfiguration::BalancerConfiguration()
BalancerConfiguration::~BalancerConfiguration() = default;
BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getMode();
}
@@ -148,7 +148,7 @@ Status BalancerConfiguration::enableAutoSplit(OperationContext* opCtx, bool enab
}
bool BalancerConfiguration::shouldBalance() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff ||
_balancerSettings.getMode() == BalancerSettingsType::kAutoSplitOnly) {
return false;
@@ -158,7 +158,7 @@ bool BalancerConfiguration::shouldBalance() const {
}
bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff) {
return false;
}
@@ -167,12 +167,12 @@ bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
}
MigrationSecondaryThrottleOptions BalancerConfiguration::getSecondaryThrottle() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getSecondaryThrottle();
}
bool BalancerConfiguration::waitForDelete() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.waitForDelete();
}
@@ -214,7 +214,7 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx)
return settingsObjStatus.getStatus();
}
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
_balancerSettings = std::move(settings);
return Status::OK();
diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h
index 7bea190a61e..10b174e43e0 100644
--- a/src/mongo/s/balancer_configuration.h
+++ b/src/mongo/s/balancer_configuration.h
@@ -34,8 +34,8 @@
#include <cstdint>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -283,7 +283,8 @@ private:
Status _refreshAutoSplitSettings(OperationContext* opCtx);
// The latest read balancer settings and a mutex to protect its swaps
- mutable stdx::mutex _balancerSettingsMutex;
+ mutable Mutex _balancerSettingsMutex =
+ MONGO_MAKE_LATCH("BalancerConfiguration::_balancerSettingsMutex");
BalancerSettingsType _balancerSettings;
// Max chunk size after which a chunk would be considered jumbo and won't be moved. This value
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index 5dae286da5a..1a22526aa14 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -132,7 +132,7 @@ StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* opCtx,
GetPingFunc checkerFunc = noGetPingSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getPingReturnValue;
checkerFunc = _getPingChecker;
}
@@ -146,7 +146,7 @@ Status DistLockCatalogMock::ping(OperationContext* opCtx, StringData processID,
PingFunc checkerFunc = noPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _pingReturnValue;
checkerFunc = _pingChecker;
}
@@ -167,7 +167,7 @@ StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* opCtx,
GrabLockFunc checkerFunc = noGrabLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _grabLockReturnValue;
checkerFunc = _grabLockChecker;
}
@@ -188,7 +188,7 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
OvertakeLockFunc checkerFunc = noOvertakeLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _overtakeLockReturnValue;
checkerFunc = _overtakeLockChecker;
}
@@ -202,7 +202,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessi
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -218,7 +218,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx,
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -234,7 +234,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(
GetServerInfoFunc checkerFunc = noGetServerInfoSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getServerInfoReturnValue;
checkerFunc = _getServerInfoChecker;
}
@@ -249,7 +249,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* opCtx,
GetLockByTSFunc checkerFunc = noGetLockByTSSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByTSReturnValue;
checkerFunc = _getLockByTSChecker;
}
@@ -263,7 +263,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* opCtx
GetLockByNameFunc checkerFunc = noGetLockByNameSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByNameReturnValue;
checkerFunc = _getLockByNameChecker;
}
@@ -277,7 +277,7 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
StopPingFunc checkerFunc = noStopPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _stopPingReturnValue;
checkerFunc = _stopPingChecker;
}
@@ -288,67 +288,67 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
void DistLockCatalogMock::expectGrabLock(DistLockCatalogMock::GrabLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = checkerFunc;
_grabLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectNoGrabLock() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = noGrabLockFuncSet;
_grabLockReturnValue = kLocksTypeBadRetValue;
}
void DistLockCatalogMock::expectUnLock(DistLockCatalogMock::UnlockFunc checkerFunc,
Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_unlockChecker = checkerFunc;
_unlockReturnValue = returnThis;
}
void DistLockCatalogMock::expectPing(DistLockCatalogMock::PingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pingChecker = checkerFunc;
_pingReturnValue = returnThis;
}
void DistLockCatalogMock::expectStopPing(StopPingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stopPingChecker = checkerFunc;
_stopPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByTS(GetLockByTSFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByTSChecker = checkerFunc;
_getLockByTSReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByName(GetLockByNameFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByNameChecker = checkerFunc;
_getLockByNameReturnValue = returnThis;
}
void DistLockCatalogMock::expectOvertakeLock(OvertakeLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_overtakeLockChecker = checkerFunc;
_overtakeLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetPing(GetPingFunc checkerFunc,
StatusWith<LockpingsType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getPingChecker = checkerFunc;
_getPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetServerInfo(GetServerInfoFunc checkerFunc,
StatusWith<DistLockCatalog::ServerInfo> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getServerInfoChecker = checkerFunc;
_getServerInfoReturnValue = returnThis;
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index d407a9c523f..1649e767215 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -30,11 +30,11 @@
#pragma once
#include "mongo/base/status_with.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -190,7 +190,7 @@ public:
private:
// Protects all the member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DistLockCatalogMock::_mutex");
GrabLockFunc _grabLockChecker;
StatusWith<LocksType> _grabLockReturnValue;
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 1acf415e603..6d412a01f8d 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -93,7 +93,7 @@ void ReplSetDistLockManager::startUp() {
void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isShutDown = true;
_shutDownCV.notify_all();
}
@@ -117,7 +117,7 @@ std::string ReplSetDistLockManager::getProcessID() {
}
bool ReplSetDistLockManager::isShutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShutDown;
}
@@ -146,7 +146,7 @@ void ReplSetDistLockManager::doTask() {
std::deque<std::pair<DistLockHandle, boost::optional<std::string>>> toUnlockBatch;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
toUnlockBatch.swap(_unlockList);
}
@@ -181,7 +181,7 @@ void ReplSetDistLockManager::doTask() {
}
MONGO_IDLE_THREAD_BLOCK;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutDownCV.wait_for(lk, _pingInterval.toSystemDuration(), [this] { return _isShutDown; });
}
}
@@ -224,7 +224,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
const auto& serverInfo = serverInfoStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto pingIter = _pingHistory.find(lockDoc.getName());
if (pingIter == _pingHistory.end()) {
@@ -507,7 +507,7 @@ Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
const boost::optional<std::string>& name) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unlockList.push_back(std::make_pair(lockSessionID, name));
}
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset_dist_lock_manager.h
index 1814bd96677..1dfd878703b 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.h
@@ -34,12 +34,12 @@
#include <string>
#include "mongo/base/string_data.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/dist_lock_ping_info.h"
#include "mongo/stdx/chrono.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
@@ -132,7 +132,7 @@ private:
const Milliseconds _pingInterval; // (I)
const Milliseconds _lockExpiration; // (I)
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ReplSetDistLockManager::_mutex");
std::unique_ptr<stdx::thread> _execThread; // (S)
// Contains the list of locks queued for unlocking. Cases when unlock operation can
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index 499aa102f9b..516d396f67d 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -35,6 +35,7 @@
#include <vector>
#include "mongo/bson/json.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
#include "mongo/s/catalog/replset_dist_lock_manager.h"
@@ -45,7 +46,6 @@
#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/system_tick_source.h"
#include "mongo/util/tick_source_mock.h"
@@ -413,7 +413,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectGetLockByName([](StringData) {},
{ErrorCodes::LockNotFound, "not found!"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
OID unlockSessionIDPassed;
int unlockCallCount = 0;
@@ -421,7 +421,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -435,7 +435,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -558,7 +558,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
},
{ErrorCodes::ExceededMemoryLimit, "bad remote server"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
int unlockCallCount = 0;
OID unlockSessionIDPassed;
@@ -566,7 +566,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -580,7 +580,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -609,13 +609,13 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
* 3. Check that correct process is being pinged.
*/
TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable ping3TimesCV;
std::vector<std::string> processIDList;
getMockCatalog()->expectPing(
[&testMutex, &ping3TimesCV, &processIDList](StringData processIDArg, Date_t ping) {
- stdx::lock_guard<stdx::mutex> lk(testMutex);
+ stdx::lock_guard<Latch> lk(testMutex);
processIDList.push_back(processIDArg.toString());
if (processIDList.size() >= 3) {
@@ -626,7 +626,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (processIDList.size() < 3) {
didTimeout = ping3TimesCV.wait_for(lk, kJoinTimeout.toSystemDuration()) ==
stdx::cv_status::timeout;
@@ -659,7 +659,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
std::vector<OID> lockSessionIDPassed;
@@ -667,13 +667,13 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
if (lockSessionIDPassed.size() >= kUnlockErrorCount) {
getMockCatalog()->expectUnLock(
[&lockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
unlockCV.notify_all();
},
@@ -705,7 +705,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (lockSessionIDPassed.size() < kUnlockErrorCount) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -739,7 +739,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
* 5. Check that the lock session id used when lock was called matches with unlock.
*/
TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
std::vector<OID> lockSessionIDPassed;
std::map<OID, int> unlockIDMap; // id -> count
@@ -761,14 +761,14 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
getMockCatalog()->expectUnLock(
[this, &unlockIDMap, &testMutex, &unlockCV, &mapEntriesGreaterThanTwo](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockIDMap[lockSessionID]++;
// Wait until we see at least 2 unique lockSessionID more than twice.
if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
getMockCatalog()->expectUnLock(
[&testMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockCV.notify_all();
},
Status::OK());
@@ -792,7 +792,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
StringData processId,
Date_t time,
StringData why) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
lockSessionIDPassed.push_back(lockSessionIDArg);
},
retLockDoc);
@@ -804,7 +804,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (unlockIDMap.size() < 2 || !mapEntriesGreaterThanTwo(unlockIDMap)) {
didTimeout =
@@ -1739,11 +1739,11 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
OID unlockSessionIDPassed;
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
getMockCatalog()->expectUnLock(
[&unlockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
},
@@ -1756,7 +1756,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (!unlockSessionIDPassed.isSet()) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index a26142eb958..aa948ab03ba 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -171,7 +171,7 @@ ShardingCatalogClientImpl::ShardingCatalogClientImpl(
ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default;
void ShardingCatalogClientImpl::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -183,7 +183,7 @@ void ShardingCatalogClientImpl::startup() {
void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index ab4e9506594..2ebc0fe3377 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -31,9 +31,9 @@
#include "mongo/client/connection_string.h"
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -215,7 +215,7 @@ private:
// (R) Read only, can only be written during initialization.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogClientImpl::_mutex");
// Distributed lock manager singleton.
std::unique_ptr<DistLockManager> _distLockManager; // (R)
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index c2206848332..055f156de34 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -127,7 +127,7 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx
"SERVER-37398.");
try {
while (true) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto& dbEntry = _databases[dbName];
if (!dbEntry) {
@@ -217,7 +217,7 @@ CatalogCache::RefreshResult CatalogCache::_getCollectionRoutingInfoAt(
const auto dbInfo = std::move(swDbInfo.getValue());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -312,7 +312,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutin
void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const DatabaseVersion& databaseVersion) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
@@ -345,7 +345,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
// We received StaleShardVersion for a collection we thought was sharded. Either a migration
// occurred to or from a shard we contacted, or the collection was dropped.
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto nss = ccri._cm->getns();
const auto itDb = _collectionsByDb.find(nss.db());
@@ -369,7 +369,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
ChunkVersion targetCollectionVersion) const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
@@ -397,7 +397,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
}
void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
// The database was dropped.
@@ -407,7 +407,7 @@ void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
}
void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -421,7 +421,7 @@ void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
}
void CatalogCache::purgeCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -432,13 +432,13 @@ void CatalogCache::purgeCollection(const NamespaceString& nss) {
}
void CatalogCache::purgeDatabase(StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.erase(dbName);
_collectionsByDb.erase(dbName);
}
void CatalogCache::purgeAllDatabases() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.clear();
_collectionsByDb.clear();
}
@@ -449,7 +449,7 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
size_t numDatabaseEntries;
size_t numCollectionEntries{0};
{
- stdx::lock_guard<stdx::mutex> ul(_mutex);
+ stdx::lock_guard<Latch> ul(_mutex);
numDatabaseEntries = _databases.size();
for (const auto& entry : _collectionsByDb) {
numCollectionEntries += entry.second.size();
@@ -506,7 +506,7 @@ void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const auto refreshCallback = [ this, dbName, dbEntry, onRefreshFailed, onRefreshCompleted ](
OperationContext * opCtx, StatusWith<DatabaseType> swDbt) noexcept {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!swDbt.isOK()) {
onRefreshFailed(lg, swDbt.getStatus());
@@ -617,12 +617,12 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
onRefreshCompleted(Status::OK(), newRoutingInfo.get());
} catch (const DBException& ex) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
onRefreshFailed(lg, ex.toStatus());
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
collEntry->needsRefresh = false;
collEntry->refreshCompletionNotification->set(Status::OK());
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index e2d67ff68cd..75308568453 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -31,13 +31,13 @@
#include "mongo/base/string_data.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/database_version_gen.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -387,7 +387,7 @@ private:
using CollectionsByDbMap = StringMap<CollectionInfoMap>;
// Mutex to serialize access to the structures below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CatalogCache::_mutex");
// Map from DB name to the info for that database
DatabaseInfoMap _databases;
diff --git a/src/mongo/s/chunk_writes_tracker.cpp b/src/mongo/s/chunk_writes_tracker.cpp
index 807c526532d..abb20746650 100644
--- a/src/mongo/s/chunk_writes_tracker.cpp
+++ b/src/mongo/s/chunk_writes_tracker.cpp
@@ -52,7 +52,7 @@ bool ChunkWritesTracker::shouldSplit(uint64_t maxChunkSize) {
}
bool ChunkWritesTracker::acquireSplitLock() {
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
if (!_isLockedForSplitting) {
_isLockedForSplitting = true;
diff --git a/src/mongo/s/chunk_writes_tracker.h b/src/mongo/s/chunk_writes_tracker.h
index 141879375c4..85309c5b205 100644
--- a/src/mongo/s/chunk_writes_tracker.h
+++ b/src/mongo/s/chunk_writes_tracker.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -89,7 +89,7 @@ private:
/**
* Protects _splitState when starting a split.
*/
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("ChunkWritesTracker::_mtx");
/**
* Whether or not a current split is in progress for this chunk.
diff --git a/src/mongo/s/client/rs_local_client.cpp b/src/mongo/s/client/rs_local_client.cpp
index fdb386a3bcf..e1ec4917f8f 100644
--- a/src/mongo/s/client/rs_local_client.cpp
+++ b/src/mongo/s/client/rs_local_client.cpp
@@ -56,7 +56,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (lastOpTimeFromClient >= _lastOpTime) {
// It's always possible for lastOpTimeFromClient to be less than _lastOpTime if another
// thread started and completed a write through this ShardLocal (updating _lastOpTime)
@@ -66,7 +66,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
}
repl::OpTime RSLocalClient::_getLastOpTime() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTime;
}
diff --git a/src/mongo/s/client/rs_local_client.h b/src/mongo/s/client/rs_local_client.h
index 7bba5c7eaa0..7dabd19d454 100644
--- a/src/mongo/s/client/rs_local_client.h
+++ b/src/mongo/s/client/rs_local_client.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ private:
repl::OpTime _getLastOpTime();
// Guards _lastOpTime below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RSLocalClient::_mutex");
// Stores the optime that was generated by the last operation to perform a write that was run
// through _runCommand. Used in _exhaustiveFindOnConfig for waiting for that optime to be
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index 5ac9fe97bd0..e4c3e9a8dd6 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -63,19 +63,19 @@ class ClientConnections;
class ActiveClientConnections {
public:
void add(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.erase(cc);
}
void appendInfo(BSONObjBuilder* b) const;
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ActiveClientConnections::_mutex");
std::set<const ClientConnections*> _clientConnections;
} activeClientConnections;
@@ -331,7 +331,7 @@ void ActiveClientConnections::appendInfo(BSONObjBuilder* b) const {
BSONArrayBuilder arr(64 * 1024);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto* conn : _clientConnections) {
BSONObjBuilder bb(arr.subobjStart());
conn->appendInfo(bb);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index dfa120f49c3..38c4bc6976d 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -49,6 +49,7 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
@@ -56,7 +57,6 @@
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -199,12 +199,12 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
newConnString.type() == ConnectionString::CUSTOM); // For dbtests
// to prevent update config shard connection string during init
- stdx::unique_lock<stdx::mutex> lock(_reloadMutex);
+ stdx::unique_lock<Latch> lock(_reloadMutex);
_data.rebuildShardIfExists(newConnString, _shardFactory.get());
}
void ShardRegistry::init() {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
auto configShard =
_shardFactory->createShard(ShardRegistry::kConfigServerShardId, _initConfigServerCS);
@@ -279,12 +279,12 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
bool ShardRegistry::isUp() const {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
return _isUp;
}
bool ShardRegistry::reload(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
// Another thread is already in the process of reloading so no need to do duplicate work.
@@ -436,7 +436,7 @@ ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shar
}
void ShardRegistryData::swap(ShardRegistryData& other) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lookup.swap(other._lookup);
_rsLookup.swap(other._rsLookup);
_hostLookup.swap(other._hostLookup);
@@ -444,29 +444,29 @@ void ShardRegistryData::swap(ShardRegistryData& other) {
}
shared_ptr<Shard> ShardRegistryData::getConfigShard() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configShard;
}
void ShardRegistryData::addConfigShard(std::shared_ptr<Shard> shard) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configShard = shard;
_addShard(lk, shard, true);
}
shared_ptr<Shard> ShardRegistryData::findByRSName(const string& name) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto i = _rsLookup.find(name);
return (i != _rsLookup.end()) ? i->second : nullptr;
}
shared_ptr<Shard> ShardRegistryData::findByHostAndPort(const HostAndPort& hostAndPort) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return mapFindWithDefault(_hostLookup, hostAndPort);
}
shared_ptr<Shard> ShardRegistryData::findByShardId(const ShardId& shardId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _findByShardId(lk, shardId);
}
@@ -479,7 +479,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
// Need to copy, then sort by shardId.
std::vector<std::pair<ShardId, std::string>> shards;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
shards.reserve(_lookup.size());
for (auto&& shard : _lookup) {
shards.emplace_back(shard.first, shard.second->getConnString().toString());
@@ -495,7 +495,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
}
void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
const auto& s = i->second;
if (s->getId().toString() == "config") {
@@ -506,7 +506,7 @@ void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
}
void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
invariant(i->second);
auto res = diff.find(i->second->getId());
@@ -518,7 +518,7 @@ void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
void ShardRegistryData::rebuildShardIfExists(const ConnectionString& newConnString,
ShardFactory* factory) {
- stdx::unique_lock<stdx::mutex> updateConnStringLock(_mutex);
+ stdx::unique_lock<Latch> updateConnStringLock(_mutex);
auto it = _rsLookup.find(newConnString.getSetName());
if (it == _rsLookup.end()) {
return;
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 5456f7bd06d..43820a6f414 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -36,9 +36,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -114,7 +114,7 @@ private:
void _rebuildShard(WithLock, ConnectionString const& newConnString, ShardFactory* factory);
// Protects the lookup maps below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
using ShardMap = stdx::unordered_map<ShardId, std::shared_ptr<Shard>, ShardId::Hasher>;
@@ -286,7 +286,7 @@ private:
ShardRegistryData _data;
// Protects the _reloadState and _initConfigServerCS during startup.
- mutable stdx::mutex _reloadMutex;
+ mutable Mutex _reloadMutex = MONGO_MAKE_LATCH("ShardRegistry::_reloadMutex");
stdx::condition_variable _inReloadCV;
enum class ReloadState {
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 5e5f2617d47..253cf246c35 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -143,7 +143,7 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost,
}
void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
// A secondary may return a lastCommittedOpTime less than the latest seen so far.
if (lastCommittedOpTime > _lastCommittedOpTime) {
@@ -152,7 +152,7 @@ void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
}
LogicalTime ShardRemote::getLastCommittedOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
return _lastCommittedOpTime;
}
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 3b19fd8ab0f..cf1b7b2d3f8 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -34,7 +34,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -133,7 +133,8 @@ private:
/**
* Protects _lastCommittedOpTime.
*/
- mutable stdx::mutex _lastCommittedOpTimeMutex;
+ mutable Mutex _lastCommittedOpTimeMutex =
+ MONGO_MAKE_LATCH("ShardRemote::_lastCommittedOpTimeMutex");
/**
* Logical time representing the latest opTime timestamp known to be in this shard's majority
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index b9dff2f77cd..81cdb7a5ca2 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -63,14 +63,14 @@ namespace {
class ConnectionShardStatus {
public:
bool hasAnySequenceSet(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
}
bool getSequence(DBClientBase* conn, const string& ns, unsigned long long* sequence) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -85,18 +85,18 @@ public:
}
void setSequence(DBClientBase* conn, const string& ns, const unsigned long long& s) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map[conn->getConnectionId()][ns] = s;
}
void reset(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map.erase(conn->getConnectionId());
}
private:
// protects _map
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionShardStatus::_mutex");
// a map from a connection into ChunkManager's sequence number for each namespace
typedef map<unsigned long long, map<string, unsigned long long>> SequenceMap;
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index ed61976820a..1962272ca5d 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -56,14 +56,14 @@ ClusterIdentityLoader* ClusterIdentityLoader::get(OperationContext* operationCon
}
OID ClusterIdentityLoader::getClusterId() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_initializationState == InitializationState::kInitialized && _lastLoadResult.isOK());
return _lastLoadResult.getValue();
}
Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_initializationState == InitializationState::kInitialized) {
invariant(_lastLoadResult.isOK());
return Status::OK();
@@ -105,7 +105,7 @@ StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
}
void ClusterIdentityLoader::discardCachedClusterId() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_initializationState == InitializationState::kUninitialized) {
return;
diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h
index b5ee563d253..df2ed8b40d6 100644
--- a/src/mongo/s/cluster_identity_loader.h
+++ b/src/mongo/s/cluster_identity_loader.h
@@ -33,8 +33,8 @@
#include "mongo/bson/oid.h"
#include "mongo/db/repl/read_concern_args.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -94,7 +94,7 @@ private:
StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ClusterIdentityLoader::_mutex");
stdx::condition_variable _inReloadCV;
// Used to ensure that only one thread at a time attempts to reload the cluster ID from the
diff --git a/src/mongo/s/cluster_last_error_info.cpp b/src/mongo/s/cluster_last_error_info.cpp
index 4dd79d95fb4..2fe697df461 100644
--- a/src/mongo/s/cluster_last_error_info.cpp
+++ b/src/mongo/s/cluster_last_error_info.cpp
@@ -40,12 +40,12 @@ const Client::Decoration<std::shared_ptr<ClusterLastErrorInfo>> ClusterLastError
Client::declareDecoration<std::shared_ptr<ClusterLastErrorInfo>>();
void ClusterLastErrorInfo::addShardHost(const std::string& shardHost) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->shardHostsWritten.insert(shardHost);
}
void ClusterLastErrorInfo::addHostOpTime(ConnectionString connStr, HostOpTime stat) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->hostOpTimes[connStr] = stat;
}
@@ -56,13 +56,13 @@ void ClusterLastErrorInfo::addHostOpTimes(const HostOpTimeMap& hostOpTimes) {
}
void ClusterLastErrorInfo::newRequest() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::swap(_cur, _prev);
_cur->clear();
}
void ClusterLastErrorInfo::disableForCommand() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
RequestInfo* temp = _cur;
_cur = _prev;
_prev = temp;
diff --git a/src/mongo/s/cluster_last_error_info.h b/src/mongo/s/cluster_last_error_info.h
index 0cc07fa27ac..af13045099d 100644
--- a/src/mongo/s/cluster_last_error_info.h
+++ b/src/mongo/s/cluster_last_error_info.h
@@ -63,7 +63,7 @@ public:
* gets shards used on the previous request
*/
std::set<std::string>* getPrevShardHosts() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return &_prev->shardHostsWritten;
}
@@ -71,7 +71,7 @@ public:
* Gets the shards, hosts, and opTimes the client last wrote to with write commands.
*/
const HostOpTimeMap& getPrevHostOpTimes() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _prev->hostOpTimes;
}
@@ -89,7 +89,7 @@ private:
};
// Protects _infos, _cur, and _prev.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterLastErrorInfo::_mutex");
// We use 2 so we can flip for getLastError type operations.
RequestInfo _infos[2];
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 97e2ccef518..bda2bc6e929 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -96,12 +96,12 @@ void Grid::setShardingInitialized() {
}
Grid::CustomConnectionPoolStatsFn Grid::getCustomConnectionPoolStatsFn() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _customConnectionPoolStatsFn;
}
void Grid::setCustomConnectionPoolStatsFn(CustomConnectionPoolStatsFn statsFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_customConnectionPoolStatsFn || !statsFn);
_customConnectionPoolStatsFn = std::move(statsFn);
}
@@ -117,7 +117,7 @@ void Grid::setAllowLocalHost(bool allow) {
repl::OpTime Grid::configOpTime() const {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configOpTime;
}
@@ -141,7 +141,7 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
boost::optional<repl::OpTime> Grid::_advanceConfigOpTime(const repl::OpTime& opTime) {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configOpTime < opTime) {
repl::OpTime prev = _configOpTime;
_configOpTime = opTime;
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 794a93f262f..a1e47db40cf 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -30,12 +30,12 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -191,7 +191,7 @@ private:
AtomicWord<bool> _shardingInitialized{false};
// Protects _configOpTime.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Grid::_mutex");
// Last known highest opTime from the config server that should be used when doing reads.
// This value is updated any time a shard or mongos talks to a config server or a shard.
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 2d88c7b4f53..056c95f1306 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -115,12 +115,12 @@ AsyncResultsMerger::AsyncResultsMerger(OperationContext* opCtx,
}
AsyncResultsMerger::~AsyncResultsMerger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_remotesExhausted(lk) || _lifecycleState == kKillComplete);
}
bool AsyncResultsMerger::remotesExhausted() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _remotesExhausted(lk);
}
@@ -135,7 +135,7 @@ bool AsyncResultsMerger::_remotesExhausted(WithLock) const {
}
Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tailableMode != TailableModeEnum::kTailableAndAwaitData) {
return Status(ErrorCodes::BadValue,
@@ -155,12 +155,12 @@ Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
}
bool AsyncResultsMerger::ready() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _ready(lk);
}
void AsyncResultsMerger::detachFromOperationContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_opCtx = nullptr;
// If we were about ready to return a boost::none because a tailable cursor reached the end of
// the batch, that should no longer apply to the next use - when we are reattached to a
@@ -170,13 +170,13 @@ void AsyncResultsMerger::detachFromOperationContext() {
}
void AsyncResultsMerger::reattachToOperationContext(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_opCtx);
_opCtx = opCtx;
}
void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCursors) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Create a new entry in the '_remotes' list for each new shard, and add the first cursor batch
// to its buffer. This ensures the shard's initial high water mark is respected, if it exists.
for (auto&& remote : newCursors) {
@@ -189,7 +189,7 @@ void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCurso
}
BSONObj AsyncResultsMerger::getHighWaterMark() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto minPromisedSortKey = _getMinPromisedSortKey(lk);
if (!minPromisedSortKey.isEmpty() && !_ready(lk)) {
_highWaterMark = minPromisedSortKey;
@@ -272,7 +272,7 @@ bool AsyncResultsMerger::_readyUnsorted(WithLock) {
}
StatusWith<ClusterQueryResult> AsyncResultsMerger::nextReady() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
dassert(_ready(lk));
if (_lifecycleState != kAlive) {
return Status(ErrorCodes::IllegalOperation, "AsyncResultsMerger killed");
@@ -400,7 +400,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
auto callbackStatus =
_executor->scheduleRemoteCommand(request, [this, remoteIndex](auto const& cbData) {
- stdx::lock_guard<stdx::mutex> lk(this->_mutex);
+ stdx::lock_guard<Latch> lk(this->_mutex);
this->_handleBatchResponse(lk, cbData, remoteIndex);
});
@@ -413,7 +413,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
}
Status AsyncResultsMerger::scheduleGetMores() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _scheduleGetMores(lk);
}
@@ -447,7 +447,7 @@ Status AsyncResultsMerger::_scheduleGetMores(WithLock lk) {
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_lifecycleState != kAlive) {
// Can't schedule further network operations if the ARM is being killed.
@@ -714,7 +714,7 @@ void AsyncResultsMerger::_scheduleKillCursors(WithLock, OperationContext* opCtx)
}
executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_killCompleteEvent.isValid()) {
invariant(_lifecycleState != kAlive);
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3cf357dca6b..e0e0f2e94c1 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
#include "mongo/s/query/cluster_query_result.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -451,7 +451,7 @@ private:
AsyncResultsMergerParams _params;
// Must be acquired before accessing any data members (other than _params, which is read-only).
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AsyncResultsMerger::_mutex");
// Data tracking the state of our communication with each of the remote nodes.
std::vector<RemoteCursorData> _remotes;
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 5d07b0e2c75..c99aff31fcf 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -157,13 +157,13 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
future.default_timed_get();
// Used for synchronizing the background thread with this thread.
- stdx::mutex mutex;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ auto mutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> lk(mutex);
// Issue a blocking wait for the next result asynchronously on a different thread.
future = launchAsync([&]() {
// Block until the main thread has responded to the getMore.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
auto next = unittest::assertGet(blockingMerger.next(
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index f5b3290a59a..8c901ef8afb 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -254,7 +254,7 @@ ClusterCursorManager::~ClusterCursorManager() {
void ClusterCursorManager::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
killAllCursors(opCtx);
@@ -270,7 +270,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
// Read the clock out of the lock.
const auto now = _clockSource->now();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
@@ -333,7 +333,7 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
OperationContext* opCtx,
AuthzCheckFn authChecker,
AuthCheck checkSessionAuth) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
return Status(ErrorCodes::ShutdownInProgress,
@@ -396,7 +396,7 @@ void ClusterCursorManager::checkInCursor(std::unique_ptr<ClusterClientCursor> cu
cursor->detachFromOperationContext();
cursor->setLastUseDate(now);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
invariant(entry);
@@ -421,7 +421,7 @@ Status ClusterCursorManager::checkAuthForKillCursors(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId,
AuthzCheckFn authChecker) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -449,7 +449,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
CursorId cursorId) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -473,7 +473,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
return Status::OK();
}
-void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) {
@@ -488,7 +488,7 @@ void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk
std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContext* opCtx,
Date_t cutoff) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [cutoff](CursorId cursorId, const CursorEntry& entry) -> bool {
bool res = entry.getLifetimeType() == CursorLifetime::Mortal &&
@@ -506,14 +506,14 @@ std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContex
}
void ClusterCursorManager::killAllCursors(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [](CursorId, const CursorEntry&) -> bool { return true; };
killCursorsSatisfying(std::move(lk), opCtx, std::move(pred));
}
std::size_t ClusterCursorManager::killCursorsSatisfying(
- stdx::unique_lock<stdx::mutex> lk,
+ stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred) {
invariant(opCtx);
@@ -568,7 +568,7 @@ std::size_t ClusterCursorManager::killCursorsSatisfying(
}
ClusterCursorManager::Stats ClusterCursorManager::stats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
@@ -601,7 +601,7 @@ ClusterCursorManager::Stats ClusterCursorManager::stats() const {
}
void ClusterCursorManager::appendActiveSessions(LogicalSessionIdSet* lsids) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& nsContainerPair : _namespaceToContainerMap) {
for (const auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
@@ -642,7 +642,7 @@ std::vector<GenericCursor> ClusterCursorManager::getIdleCursors(
const OperationContext* opCtx, MongoProcessInterface::CurrentOpUserMode userMode) const {
std::vector<GenericCursor> cursors;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
AuthorizationSession* ctxAuth = AuthorizationSession::get(opCtx->getClient());
@@ -690,7 +690,7 @@ std::pair<Status, int> ClusterCursorManager::killCursorsWithMatchingSessions(
stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
LogicalSessionId lsid) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
stdx::unordered_set<CursorId> cursorIds;
@@ -715,7 +715,7 @@ stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
boost::optional<NamespaceString> ClusterCursorManager::getNamespaceForCursorId(
CursorId cursorId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _cursorIdPrefixToNamespaceMap.find(extractPrefixFromCursorId(cursorId));
if (it == _cursorIdPrefixToNamespaceMap.end()) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 3998f7cf00d..e2d58bcfb1a 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -38,10 +38,10 @@
#include "mongo/db/kill_sessions.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/session_killer.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/s/query/cluster_client_cursor.h"
#include "mongo/s/query/cluster_client_cursor_params.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/time_support.h"
@@ -508,7 +508,7 @@ private:
/**
* Will detach a cursor, release the lock and then call kill() on it.
*/
- void detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+ void detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId);
@@ -544,7 +544,7 @@ private:
*
* Returns the number of cursors killed.
*/
- std::size_t killCursorsSatisfying(stdx::unique_lock<stdx::mutex> lk,
+ std::size_t killCursorsSatisfying(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred);
@@ -695,7 +695,7 @@ private:
ClockSource* _clockSource;
// Synchronizes access to all private state variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterCursorManager::_mutex");
bool _inShutdown{false};
diff --git a/src/mongo/s/query/establish_cursors.h b/src/mongo/s/query/establish_cursors.h
index 97e72225072..95f6e7ae9d0 100644
--- a/src/mongo/s/query/establish_cursors.h
+++ b/src/mongo/s/query/establish_cursors.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/s/router_transactions_metrics.cpp b/src/mongo/s/router_transactions_metrics.cpp
index ddc8406354f..b3cad2b1edc 100644
--- a/src/mongo/s/router_transactions_metrics.cpp
+++ b/src/mongo/s/router_transactions_metrics.cpp
@@ -223,7 +223,7 @@ void RouterTransactionsMetrics::incrementCommitSuccessful(TransactionRouter::Com
void RouterTransactionsMetrics::incrementAbortCauseMap(std::string abortCause) {
invariant(!abortCause.empty());
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
auto it = _abortCauseMap.find(abortCause);
if (it == _abortCauseMap.end()) {
_abortCauseMap.emplace(std::pair<std::string, std::int64_t>(std::move(abortCause), 1));
@@ -263,7 +263,7 @@ void RouterTransactionsMetrics::updateStats(RouterTransactionsStats* stats) {
BSONObjBuilder bob;
{
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
for (auto const& abortCauseEntry : _abortCauseMap) {
bob.append(abortCauseEntry.first, abortCauseEntry.second);
}
diff --git a/src/mongo/s/router_transactions_metrics.h b/src/mongo/s/router_transactions_metrics.h
index ed496fe394c..5c52a8e20d0 100644
--- a/src/mongo/s/router_transactions_metrics.h
+++ b/src/mongo/s/router_transactions_metrics.h
@@ -147,7 +147,7 @@ private:
CommitStats _recoverWithTokenCommitStats;
// Mutual exclusion for _abortCauseMap
- stdx::mutex _abortCauseMutex;
+ Mutex _abortCauseMutex = MONGO_MAKE_LATCH("RouterTransactionsMetrics::_abortCauseMutex");
// Map tracking the total number of each abort cause for any multi-statement transaction that
// was aborted through this router.
diff --git a/src/mongo/s/sharding_task_executor.h b/src/mongo/s/sharding_task_executor.h
index 4e96d412149..fd35c8131ca 100644
--- a/src/mongo/s/sharding_task_executor.h
+++ b/src/mongo/s/sharding_task_executor.h
@@ -33,9 +33,9 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
namespace executor {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h
index c077578892f..d9b82233974 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.h
+++ b/src/mongo/s/sharding_task_executor_pool_controller.h
@@ -35,7 +35,7 @@
#include "mongo/client/replica_set_change_notifier.h"
#include "mongo/executor/connection_pool.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -194,7 +194,7 @@ private:
ReplicaSetChangeListenerHandle _listener;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingTaskExecutorPoolController::_mutex");
// Entires to _poolDatas are added by addHost() and removed by removeHost()
stdx::unordered_map<PoolId, PoolData> _poolDatas;
diff --git a/src/mongo/scripting/deadline_monitor.h b/src/mongo/scripting/deadline_monitor.h
index 03abfcbdac6..40c8217cb07 100644
--- a/src/mongo/scripting/deadline_monitor.h
+++ b/src/mongo/scripting/deadline_monitor.h
@@ -31,8 +31,8 @@
#include <cstdint>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/idle_thread_block.h"
@@ -84,7 +84,7 @@ public:
~DeadlineMonitor() {
{
// ensure the monitor thread has been stopped before destruction
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
_inShutdown = true;
_newDeadlineAvailable.notify_one();
}
@@ -105,7 +105,7 @@ public:
} else {
deadline = Date_t::max();
}
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
if (_tasks.find(task) == _tasks.end()) {
_tasks.emplace(task, deadline);
@@ -123,7 +123,7 @@ public:
* @return true if the task was found and erased
*/
bool stopDeadline(_Task* const task) {
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
return _tasks.erase(task);
}
@@ -135,7 +135,7 @@ private:
*/
void deadlineMonitorThread() {
setThreadName("DeadlineMonitor");
- stdx::unique_lock<stdx::mutex> lk(_deadlineMutex);
+ stdx::unique_lock<Latch> lk(_deadlineMutex);
Date_t lastInterruptCycle = Date_t::now();
while (!_inShutdown) {
// get the next interval to wait
@@ -187,8 +187,9 @@ private:
}
using TaskDeadlineMap = stdx::unordered_map<_Task*, Date_t>;
- TaskDeadlineMap _tasks; // map of running tasks with deadlines
- stdx::mutex _deadlineMutex; // protects all non-const members, except _monitorThread
+ TaskDeadlineMap _tasks; // map of running tasks with deadlines
+ // protects all non-const members, except _monitorThread
+ Mutex _deadlineMutex = MONGO_MAKE_LATCH("DeadlineMonitor::_deadlineMutex");
stdx::condition_variable _newDeadlineAvailable; // Signaled for timeout, start and stop
stdx::thread _monitorThread; // the deadline monitor thread
Date_t _nearestDeadlineWallclock = Date_t::max(); // absolute time of the nearest deadline
diff --git a/src/mongo/scripting/deadline_monitor_test.cpp b/src/mongo/scripting/deadline_monitor_test.cpp
index a9dd1a546ce..6a9ca6cc2f2 100644
--- a/src/mongo/scripting/deadline_monitor_test.cpp
+++ b/src/mongo/scripting/deadline_monitor_test.cpp
@@ -45,20 +45,20 @@ class TaskGroup {
public:
TaskGroup() : _c(), _killCount(0), _targetKillCount(0) {}
void noteKill() {
- stdx::lock_guard<stdx::mutex> lk(_m);
+ stdx::lock_guard<Latch> lk(_m);
++_killCount;
if (_killCount >= _targetKillCount)
_c.notify_one();
}
void waitForKillCount(uint64_t target) {
- stdx::unique_lock<stdx::mutex> lk(_m);
+ stdx::unique_lock<Latch> lk(_m);
_targetKillCount = target;
while (_killCount < _targetKillCount)
_c.wait(lk);
}
private:
- stdx::mutex _m;
+ Mutex _m = MONGO_MAKE_LATCH("TaskGroup::_m");
stdx::condition_variable _c;
uint64_t _killCount;
uint64_t _targetKillCount;
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 4fb071471eb..676b20c6f18 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -332,7 +332,7 @@ namespace {
class ScopeCache {
public:
void release(const string& poolName, const std::shared_ptr<Scope>& scope) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (scope->hasOutOfMemoryException()) {
// make some room
@@ -358,7 +358,7 @@ public:
}
std::shared_ptr<Scope> tryAcquire(OperationContext* opCtx, const string& poolName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (Pools::iterator it = _pools.begin(); it != _pools.end(); ++it) {
if (it->poolName == poolName) {
@@ -374,7 +374,7 @@ public:
}
void clear() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pools.clear();
}
@@ -391,7 +391,7 @@ private:
typedef std::deque<ScopeAndPool> Pools; // More-recently used Scopes are kept at the front.
Pools _pools; // protected by _mutex
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ScopeCache::_mutex");
};
ScopeCache scopeCache;
diff --git a/src/mongo/scripting/mozjs/PosixNSPR.cpp b/src/mongo/scripting/mozjs/PosixNSPR.cpp
index 55a821450e6..42fdb5aa839 100644
--- a/src/mongo/scripting/mozjs/PosixNSPR.cpp
+++ b/src/mongo/scripting/mozjs/PosixNSPR.cpp
@@ -22,9 +22,9 @@
#include <js/Utility.h>
#include <vm/PosixNSPR.h>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/time_support.h"
@@ -185,11 +185,11 @@ PRStatus PR_CallOnceWithArg(PRCallOnceType* once, PRCallOnceWithArgFN func, void
}
class nspr::Lock {
- mongo::stdx::mutex mutex_;
+ mongo::Mutex mutex_;
public:
Lock() {}
- mongo::stdx::mutex& mutex() {
+ mongo::Mutex& mutex() {
return mutex_;
}
};
@@ -265,8 +265,8 @@ uint32_t PR_TicksPerSecond() {
PRStatus PR_WaitCondVar(PRCondVar* cvar, uint32_t timeout) {
if (timeout == PR_INTERVAL_NO_TIMEOUT) {
try {
- mongo::stdx::unique_lock<mongo::stdx::mutex> lk(cvar->lock()->mutex(),
- mongo::stdx::adopt_lock_t());
+ mongo::stdx::unique_lock<mongo::Mutex> lk(cvar->lock()->mutex(),
+ mongo::stdx::adopt_lock_t());
cvar->cond().wait(lk);
lk.release();
@@ -277,8 +277,8 @@ PRStatus PR_WaitCondVar(PRCondVar* cvar, uint32_t timeout) {
}
} else {
try {
- mongo::stdx::unique_lock<mongo::stdx::mutex> lk(cvar->lock()->mutex(),
- mongo::stdx::adopt_lock_t());
+ mongo::stdx::unique_lock<mongo::Mutex> lk(cvar->lock()->mutex(),
+ mongo::stdx::adopt_lock_t());
cvar->cond().wait_for(lk, mongo::Microseconds(timeout).toSystemDuration());
lk.release();
diff --git a/src/mongo/scripting/mozjs/countdownlatch.cpp b/src/mongo/scripting/mozjs/countdownlatch.cpp
index 9f0abbed741..d9100b4bde0 100644
--- a/src/mongo/scripting/mozjs/countdownlatch.cpp
+++ b/src/mongo/scripting/mozjs/countdownlatch.cpp
@@ -31,10 +31,10 @@
#include "mongo/scripting/mozjs/countdownlatch.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -61,17 +61,17 @@ public:
int32_t make(int32_t count) {
uassert(ErrorCodes::JSInterpreterFailure, "argument must be >= 0", count >= 0);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
int32_t desc = ++_counter;
- _latches.insert(std::make_pair(desc, std::make_shared<Latch>(count)));
+ _latches.insert(std::make_pair(desc, std::make_shared<CountDownLatch>(count)));
return desc;
}
void await(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
while (latch->count != 0) {
latch->cv.wait(lock);
@@ -79,8 +79,8 @@ public:
}
void countDown(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
if (latch->count > 0)
latch->count--;
@@ -90,8 +90,8 @@ public:
}
int32_t getCount(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
return latch->count;
}
@@ -100,16 +100,16 @@ private:
/**
* Latches for communication between threads
*/
- struct Latch {
- Latch(int32_t count) : count(count) {}
+ struct CountDownLatch {
+ CountDownLatch(int32_t count) : count(count) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("Latch::mutex");
stdx::condition_variable cv;
int32_t count;
};
- std::shared_ptr<Latch> get(int32_t desc) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ std::shared_ptr<CountDownLatch> get(int32_t desc) {
+ stdx::lock_guard<Latch> lock(_mutex);
auto iter = _latches.find(desc);
uassert(ErrorCodes::JSInterpreterFailure,
@@ -119,9 +119,9 @@ private:
return iter->second;
}
- using Map = stdx::unordered_map<int32_t, std::shared_ptr<Latch>>;
+ using Map = stdx::unordered_map<int32_t, std::shared_ptr<CountDownLatch>>;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CountDownLatchHolder::_mutex");
Map _latches;
int32_t _counter;
};
diff --git a/src/mongo/scripting/mozjs/engine.cpp b/src/mongo/scripting/mozjs/engine.cpp
index 1cb2e6ca3c3..c09c97a3f0b 100644
--- a/src/mongo/scripting/mozjs/engine.cpp
+++ b/src/mongo/scripting/mozjs/engine.cpp
@@ -82,7 +82,7 @@ mongo::Scope* MozJSScriptEngine::createScopeForCurrentThread() {
}
void MozJSScriptEngine::interrupt(unsigned opId) {
- stdx::lock_guard<stdx::mutex> intLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -109,7 +109,7 @@ std::string MozJSScriptEngine::printKnownOps_inlock() {
}
void MozJSScriptEngine::interruptAll() {
- stdx::lock_guard<stdx::mutex> interruptLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> interruptLock(_globalInterruptLock);
for (auto&& iScope : _opToScopeMap) {
iScope.second->kill();
@@ -141,7 +141,7 @@ void MozJSScriptEngine::setJSHeapLimitMB(int limit) {
}
void MozJSScriptEngine::registerOperation(OperationContext* opCtx, MozJSImplScope* scope) {
- stdx::lock_guard<stdx::mutex> giLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> giLock(_globalInterruptLock);
auto opId = opCtx->getOpID();
@@ -155,7 +155,7 @@ void MozJSScriptEngine::registerOperation(OperationContext* opCtx, MozJSImplScop
}
void MozJSScriptEngine::unregisterOperation(unsigned int opId) {
- stdx::lock_guard<stdx::mutex> giLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> giLock(_globalInterruptLock);
LOG(2) << "ImplScope " << static_cast<const void*>(this) << " unregistered for op " << opId;
diff --git a/src/mongo/scripting/mozjs/engine.h b/src/mongo/scripting/mozjs/engine.h
index 789137b1663..d49103a7996 100644
--- a/src/mongo/scripting/mozjs/engine.h
+++ b/src/mongo/scripting/mozjs/engine.h
@@ -31,9 +31,9 @@
#include <jsapi.h>
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/deadline_monitor.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/mutex.h"
@@ -91,7 +91,7 @@ private:
/**
* This mutex protects _opToScopeMap
*/
- stdx::mutex _globalInterruptLock;
+ Mutex _globalInterruptLock = MONGO_MAKE_LATCH("MozJSScriptEngine::_globalInterruptLock");
using OpIdToScopeMap = stdx::unordered_map<unsigned, MozJSImplScope*>;
OpIdToScopeMap _opToScopeMap; // map of mongo op ids to scopes (protected by
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 1d16da68f19..44b311693f0 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -40,13 +40,13 @@
#include "mongo/base/error_codes.h"
#include "mongo/db/operation_context.h"
#include "mongo/platform/decimal128.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/stack_locator.h"
#include "mongo/scripting/jsexception.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/valuewriter.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -93,7 +93,7 @@ const int kStackChunkSize = 8192;
* Runtime's can race on first creation (on some function statics), so we just
* serialize the initial Runtime creation.
*/
-stdx::mutex gRuntimeCreationMutex;
+Mutex gRuntimeCreationMutex;
bool gFirstRuntimeCreated = false;
bool closeToMaxMemory() {
@@ -146,7 +146,7 @@ void MozJSImplScope::unregisterOperation() {
void MozJSImplScope::kill() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If we are on the right thread, in the middle of an operation, and we have a registered
// opCtx, then we should check the opCtx for interrupts.
@@ -168,7 +168,7 @@ void MozJSImplScope::interrupt() {
}
bool MozJSImplScope::isKillPending() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return !_killStatus.isOK();
}
@@ -195,7 +195,7 @@ bool MozJSImplScope::_interruptCallback(JSContext* cx) {
// Check our initial kill status (which might be fine).
auto status = [&scope]() -> Status {
- stdx::lock_guard<stdx::mutex> lk(scope->_mutex);
+ stdx::lock_guard<Latch> lk(scope->_mutex);
return scope->_killStatus;
}();
@@ -291,7 +291,7 @@ MozJSImplScope::MozRuntime::MozRuntime(const MozJSScriptEngine* engine) {
}
{
- stdx::unique_lock<stdx::mutex> lk(gRuntimeCreationMutex);
+ stdx::unique_lock<Latch> lk(gRuntimeCreationMutex);
if (gFirstRuntimeCreated) {
// If we've already made a runtime, just proceed
@@ -786,7 +786,7 @@ void MozJSImplScope::gc() {
}
void MozJSImplScope::sleep(Milliseconds ms) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
uassert(ErrorCodes::JSUncatchableError,
"sleep was interrupted by kill",
@@ -865,7 +865,7 @@ void MozJSImplScope::setStatus(Status status) {
bool MozJSImplScope::_checkErrorState(bool success, bool reportError, bool assertOnError) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_killStatus.isOK()) {
success = false;
setStatus(_killStatus);
diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h
index b0eb7c55c14..9a087b6da06 100644
--- a/src/mongo/scripting/mozjs/implscope.h
+++ b/src/mongo/scripting/mozjs/implscope.h
@@ -414,7 +414,7 @@ private:
std::vector<JS::PersistentRootedValue> _funcs;
InternedStringTable _internedStrings;
Status _killStatus;
- mutable std::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MozJSImplScope::_mutex");
stdx::condition_variable _sleepCondition;
std::string _error;
unsigned int _opId; // op id for this scope
diff --git a/src/mongo/scripting/mozjs/jsthread.cpp b/src/mongo/scripting/mozjs/jsthread.cpp
index a46e60c9fe1..0968733b91f 100644
--- a/src/mongo/scripting/mozjs/jsthread.cpp
+++ b/src/mongo/scripting/mozjs/jsthread.cpp
@@ -37,12 +37,12 @@
#include <cstdio>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/valuewriter.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -160,12 +160,12 @@ private:
SharedData() = default;
void setErrorStatus(Status status) {
- stdx::lock_guard<stdx::mutex> lck(_statusMutex);
+ stdx::lock_guard<Latch> lck(_statusMutex);
_status = std::move(status);
}
Status getErrorStatus() {
- stdx::lock_guard<stdx::mutex> lck(_statusMutex);
+ stdx::lock_guard<Latch> lck(_statusMutex);
return _status;
}
@@ -179,7 +179,7 @@ private:
std::string _stack;
private:
- stdx::mutex _statusMutex;
+ Mutex _statusMutex = MONGO_MAKE_LATCH("SharedData::_statusMutex");
Status _status = Status::OK();
};
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index fbe83f9ed83..fc3a38d0927 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -285,7 +285,7 @@ void MozJSProxyScope::runWithoutInterruptionExceptAtGlobalShutdown(Closure&& clo
}
void MozJSProxyScope::runOnImplThread(unique_function<void()> f) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_function = std::move(f);
invariant(_state == State::Idle);
@@ -322,7 +322,7 @@ void MozJSProxyScope::runOnImplThread(unique_function<void()> f) {
void MozJSProxyScope::shutdownThread() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == State::Idle);
@@ -369,7 +369,7 @@ void MozJSProxyScope::implThread(void* arg) {
const auto unbindImplScope = makeGuard([&proxy] { proxy->_implScope = nullptr; });
while (true) {
- stdx::unique_lock<stdx::mutex> lk(proxy->_mutex);
+ stdx::unique_lock<Latch> lk(proxy->_mutex);
{
MONGO_IDLE_THREAD_BLOCK;
proxy->_implCondvar.wait(lk, [proxy] {
diff --git a/src/mongo/scripting/mozjs/proxyscope.h b/src/mongo/scripting/mozjs/proxyscope.h
index 9d11a857923..4bada54579a 100644
--- a/src/mongo/scripting/mozjs/proxyscope.h
+++ b/src/mongo/scripting/mozjs/proxyscope.h
@@ -32,9 +32,9 @@
#include "vm/PosixNSPR.h"
#include "mongo/client/dbclient_cursor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/engine.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/functional.h"
@@ -195,7 +195,7 @@ private:
* This mutex protects _function, _state and _status as channels for
* function invocation and exception handling
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MozJSProxyScope::_mutex");
unique_function<void()> _function;
State _state;
Status _status;
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index e682a08f2bd..06493a8c364 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -764,7 +764,7 @@ BenchRunState::~BenchRunState() {
}
void BenchRunState::waitForState(State awaitedState) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
switch (awaitedState) {
case BRS_RUNNING:
@@ -792,7 +792,7 @@ void BenchRunState::tellWorkersToCollectStats() {
}
void BenchRunState::assertFinished() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(0 == _numUnstartedWorkers + _numActiveWorkers);
}
@@ -805,7 +805,7 @@ bool BenchRunState::shouldWorkerCollectStats() const {
}
void BenchRunState::onWorkerStarted() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(_numUnstartedWorkers > 0);
--_numUnstartedWorkers;
++_numActiveWorkers;
@@ -815,7 +815,7 @@ void BenchRunState::onWorkerStarted() {
}
void BenchRunState::onWorkerFinished() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(_numActiveWorkers > 0);
--_numActiveWorkers;
if (_numActiveWorkers + _numUnstartedWorkers == 0) {
@@ -1376,7 +1376,7 @@ void BenchRunWorker::run() {
BenchRunner::BenchRunner(BenchRunConfig* config) : _brState(config->parallel), _config(config) {
_oid.init();
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
_activeRuns[_oid] = this;
}
@@ -1438,7 +1438,7 @@ void BenchRunner::stop() {
}
{
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
_activeRuns.erase(_oid);
}
}
@@ -1449,7 +1449,7 @@ BenchRunner* BenchRunner::createWithConfig(const BSONObj& configArgs) {
}
BenchRunner* BenchRunner::get(OID oid) {
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
return _activeRuns[oid];
}
@@ -1523,7 +1523,7 @@ BSONObj BenchRunner::finish(BenchRunner* runner) {
return zoo;
}
-stdx::mutex BenchRunner::_staticMutex;
+Mutex BenchRunner::_staticMutex = MONGO_MAKE_LATCH("BenchRunner");
std::map<OID, BenchRunner*> BenchRunner::_activeRuns;
/**
diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h
index f73d2149abe..170023917ac 100644
--- a/src/mongo/shell/bench.h
+++ b/src/mongo/shell/bench.h
@@ -38,8 +38,8 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/ops/write_ops_parsers.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/timer.h"
@@ -449,7 +449,7 @@ public:
void onWorkerFinished();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BenchRunState::_mutex");
stdx::condition_variable _stateChangeCondition;
@@ -599,7 +599,7 @@ public:
private:
// TODO: Same as for createWithConfig.
- static stdx::mutex _staticMutex;
+ static Mutex _staticMutex;
static std::map<OID, BenchRunner*> _activeRuns;
OID _oid;
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index ce086bb03ee..8e743861d3c 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -158,7 +158,7 @@ private:
// This needs to use a mutex rather than an atomic bool because we need to ensure that no more
// logging will happen once we return from disable().
- static inline stdx::mutex mx;
+ static inline Mutex mx = MONGO_MAKE_LATCH("ShellConsoleAppender::mx");
static inline bool loggingEnabled = true;
};
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index 28b2747b071..df0e8aba43c 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -50,13 +50,13 @@
#include "mongo/client/dbclient_base.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/hasher.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/scripting/engine.h"
#include "mongo/shell/bench.h"
#include "mongo/shell/shell_options.h"
#include "mongo/shell/shell_utils_extended.h"
#include "mongo/shell/shell_utils_launcher.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
@@ -443,14 +443,14 @@ void ConnectionRegistry::registerConnection(DBClientBase& client) {
BSONObj info;
if (client.runCommand("admin", BSON("whatsmyuri" << 1), info)) {
std::string connstr = client.getServerAddress();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_connectionUris[connstr].insert(info["you"].str());
}
}
void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const {
Prompter prompter("do you want to kill the current op(s) on the server?");
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto& connection : _connectionUris) {
auto status = ConnectionString::parse(connection.first);
if (!status.isOK()) {
@@ -545,6 +545,6 @@ bool fileExists(const std::string& file) {
}
-stdx::mutex& mongoProgramOutputMutex(*(new stdx::mutex()));
+Mutex& mongoProgramOutputMutex(*(new Mutex()));
} // namespace shell_utils
} // namespace mongo
diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h
index 0f9c7a7615b..5fb2b844eb6 100644
--- a/src/mongo/shell/shell_utils.h
+++ b/src/mongo/shell/shell_utils.h
@@ -35,7 +35,7 @@
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -82,14 +82,14 @@ public:
private:
std::map<std::string, std::set<std::string>> _connectionUris;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ConnectionRegistry::_mutex");
};
extern ConnectionRegistry connectionRegistry;
// This mutex helps the shell serialize output on exit, to avoid deadlocks at shutdown. So
// it also protects the global dbexitCalled.
-extern stdx::mutex& mongoProgramOutputMutex;
+extern Mutex& mongoProgramOutputMutex;
// Helper to tell if a file exists cross platform
// TODO: Remove this when we have a cross platform file utility library
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index d5d8e010d87..7537d710165 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -137,7 +137,7 @@ void safeClose(int fd) {
}
}
-stdx::mutex _createProcessMtx;
+Mutex _createProcessMtx;
} // namespace
ProgramOutputMultiplexer programOutputLogger;
@@ -242,7 +242,7 @@ void ProgramOutputMultiplexer::appendLine(int port,
ProcessId pid,
const std::string& name,
const std::string& line) {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
boost::iostreams::tee_device<std::ostream, std::stringstream> teeDevice(cout, _buffer);
boost::iostreams::stream<decltype(teeDevice)> teeStream(teeDevice);
if (port > 0) {
@@ -253,12 +253,12 @@ void ProgramOutputMultiplexer::appendLine(int port,
}
string ProgramOutputMultiplexer::str() const {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
return _buffer.str();
}
void ProgramOutputMultiplexer::clear() {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
_buffer.str("");
}
@@ -407,7 +407,7 @@ void ProgramRunner::start() {
//
// Holding the lock for the duration of those events prevents the leaks and thus the
// associated deadlocks.
- stdx::lock_guard<stdx::mutex> lk(_createProcessMtx);
+ stdx::lock_guard<Latch> lk(_createProcessMtx);
int status = pipe(pipeEnds);
if (status != 0) {
const auto ewd = errnoWithDescription();
diff --git a/src/mongo/shell/shell_utils_launcher.h b/src/mongo/shell/shell_utils_launcher.h
index bad1d2bdba7..c93e77ec34a 100644
--- a/src/mongo/shell/shell_utils_launcher.h
+++ b/src/mongo/shell/shell_utils_launcher.h
@@ -37,8 +37,8 @@
#include <vector>
#include "mongo/bson/bsonobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/process_id.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/stdx/unordered_set.h"
diff --git a/src/mongo/stdx/condition_variable.h b/src/mongo/stdx/condition_variable.h
index a27567ee13b..21baa7d26cf 100644
--- a/src/mongo/stdx/condition_variable.h
+++ b/src/mongo/stdx/condition_variable.h
@@ -206,7 +206,7 @@ private:
AtomicWord<unsigned long long> _notifyableCount;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
std::list<Notifyable*> _notifyables;
};
diff --git a/src/mongo/stdx/condition_variable_bm.cpp b/src/mongo/stdx/condition_variable_bm.cpp
index f306e999dfc..f23ba73984e 100644
--- a/src/mongo/stdx/condition_variable_bm.cpp
+++ b/src/mongo/stdx/condition_variable_bm.cpp
@@ -60,7 +60,7 @@ volatile bool alwaysTrue = true;
void BM_stdWaitWithTruePredicate(benchmark::State& state) {
std::condition_variable cv; // NOLINT
- stdx::mutex mutex;
+ stdx::mutex mutex; // NOLINT
stdx::unique_lock<stdx::mutex> lk(mutex);
for (auto _ : state) {
@@ -71,7 +71,7 @@ void BM_stdWaitWithTruePredicate(benchmark::State& state) {
void BM_stdxWaitWithTruePredicate(benchmark::State& state) {
stdx::condition_variable cv;
- stdx::mutex mutex;
+ stdx::mutex mutex; // NOLINT
stdx::unique_lock<stdx::mutex> lk(mutex);
for (auto _ : state) {
diff --git a/src/mongo/tools/bridge.cpp b/src/mongo/tools/bridge.cpp
index b3908037c4b..589b6841019 100644
--- a/src/mongo/tools/bridge.cpp
+++ b/src/mongo/tools/bridge.cpp
@@ -40,12 +40,12 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/message.h"
#include "mongo/rpc/reply_builder_interface.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/tools/bridge_commands.h"
#include "mongo/tools/mongobridge_options.h"
@@ -116,7 +116,7 @@ public:
HostSettings getHostSettings(boost::optional<HostAndPort> host) {
if (host) {
- stdx::lock_guard<stdx::mutex> lk(_settingsMutex);
+ stdx::lock_guard<Latch> lk(_settingsMutex);
return (_settings)[*host];
}
return {};
@@ -132,7 +132,7 @@ public:
private:
static const ServiceContext::Decoration<BridgeContext> _get;
- stdx::mutex _settingsMutex;
+ Mutex _settingsMutex = MONGO_MAKE_LATCH("BridgeContext::_settingsMutex");
HostSettingsMap _settings;
};
diff --git a/src/mongo/tools/bridge_commands.cpp b/src/mongo/tools/bridge_commands.cpp
index a94153904d4..aa3bc583a15 100644
--- a/src/mongo/tools/bridge_commands.cpp
+++ b/src/mongo/tools/bridge_commands.cpp
@@ -48,7 +48,7 @@ const char kHostFieldName[] = "host";
class CmdDelayMessagesFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -69,7 +69,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kForward;
hostSettings.delay = Milliseconds{newDelay};
@@ -80,7 +80,7 @@ public:
class CmdAcceptConnectionsFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -92,7 +92,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kForward;
}
@@ -102,7 +102,7 @@ public:
class CmdRejectConnectionsFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -114,7 +114,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kHangUp;
}
@@ -124,7 +124,7 @@ public:
class CmdDiscardMessagesFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -151,7 +151,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kDiscard;
hostSettings.loss = newLoss;
diff --git a/src/mongo/tools/bridge_commands.h b/src/mongo/tools/bridge_commands.h
index 1df953a8be4..8b32fbba86c 100644
--- a/src/mongo/tools/bridge_commands.h
+++ b/src/mongo/tools/bridge_commands.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/time_support.h"
@@ -58,9 +58,7 @@ public:
virtual ~BridgeCommand() = 0;
- virtual Status run(const BSONObj& cmdObj,
- stdx::mutex* settingsMutex,
- HostSettingsMap* settings) = 0;
+ virtual Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) = 0;
};
} // namespace mongo
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index 0db7fda5230..930cf52c67d 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -38,7 +38,7 @@
#include "mongo/base/checked_cast.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/transport/baton.h"
#include "mongo/transport/session_asio.h"
@@ -158,7 +158,7 @@ public:
auto pf = makePromiseFuture<void>();
auto id = timer.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
return kDetached;
@@ -178,7 +178,7 @@ public:
bool cancelSession(Session& session) noexcept override {
const auto id = session.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_sessions.find(id) == _sessions.end()) {
return false;
@@ -192,7 +192,7 @@ public:
bool cancelTimer(const ReactorTimer& timer) noexcept override {
const auto id = timer.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_timersById.find(id) == _timersById.end()) {
return false;
@@ -211,7 +211,7 @@ public:
}
void schedule(Task func) noexcept override {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_opCtx) {
func(kDetached);
@@ -261,7 +261,7 @@ public:
promise.emplaceValue();
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_scheduled.size()) {
auto toRun = std::exchange(_scheduled, {});
@@ -273,7 +273,7 @@ public:
}
});
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If anything was scheduled, run it now. No need to poll
if (_scheduled.size()) {
@@ -375,7 +375,7 @@ private:
auto id = session.id();
auto pf = makePromiseFuture<void>();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
return kDetached;
@@ -395,7 +395,7 @@ private:
decltype(_timers) timers;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_opCtx->getBaton().get() == this);
_opCtx->setBaton(nullptr);
@@ -439,10 +439,10 @@ private:
* the eventfd. If not, we run inline.
*/
template <typename Callback>
- void _safeExecute(stdx::unique_lock<stdx::mutex> lk, Callback&& cb) {
+ void _safeExecute(stdx::unique_lock<Latch> lk, Callback&& cb) {
if (_inPoll) {
_scheduled.push_back([cb = std::forward<Callback>(cb), this](Status) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
cb();
});
@@ -456,7 +456,7 @@ private:
return EventFDHolder::getForClient(_opCtx->getClient());
}
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("BatonASIO::_mutex");
OperationContext* _opCtx;
diff --git a/src/mongo/transport/service_entry_point_impl.h b/src/mongo/transport/service_entry_point_impl.h
index 87a8d815c91..2adc90be390 100644
--- a/src/mongo/transport/service_entry_point_impl.h
+++ b/src/mongo/transport/service_entry_point_impl.h
@@ -31,9 +31,9 @@
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/variant.h"
#include "mongo/transport/service_entry_point.h"
#include "mongo/transport/service_executor_reserved.h"
@@ -81,7 +81,7 @@ private:
ServiceContext* const _svcCtx;
AtomicWord<std::size_t> _nWorkers;
- mutable stdx::mutex _sessionsMutex;
+ mutable Mutex _sessionsMutex = MONGO_MAKE_LATCH("ServiceEntryPointImpl::_sessionsMutex");
stdx::condition_variable _shutdownCondition;
SSMList _sessions;
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index 9e34f16f60d..2f193bd16af 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -160,7 +160,7 @@ Status ServiceExecutorAdaptive::shutdown(Milliseconds timeout) {
_scheduleCondition.notify_one();
_controllerThread.join();
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
_reactorHandle->stop();
bool result =
_deathCondition.wait_for(lk, timeout.toSystemDuration(), [&] { return _threads.empty(); });
@@ -285,7 +285,7 @@ bool ServiceExecutorAdaptive::_isStarved() const {
* by schedule().
*/
void ServiceExecutorAdaptive::_controllerThreadRoutine() {
- stdx::mutex noopLock;
+ auto noopLock = MONGO_MAKE_LATCH();
setThreadName("worker-controller"_sd);
// Setup the timers/timeout values for stuck thread detection.
@@ -294,7 +294,7 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
// Get the initial values for our utilization percentage calculations
auto getTimerTotals = [this]() {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
auto first = _getThreadTimerTotal(ThreadTimer::kExecuting, lk);
auto second = _getThreadTimerTotal(ThreadTimer::kRunning, lk);
return std::make_pair(first, second);
@@ -428,7 +428,7 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
}
void ServiceExecutorAdaptive::_startWorkerThread(ThreadCreationReason reason) {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
auto it = _threads.emplace(_threads.begin(), _tickSource);
auto num = _threads.size();
@@ -452,7 +452,7 @@ void ServiceExecutorAdaptive::_startWorkerThread(ThreadCreationReason reason) {
}
Milliseconds ServiceExecutorAdaptive::_getThreadJitter() const {
- static stdx::mutex jitterMutex;
+ static auto jitterMutex = MONGO_MAKE_LATCH();
static std::default_random_engine randomEngine = [] {
std::random_device seed;
return std::default_random_engine(seed());
@@ -464,7 +464,7 @@ Milliseconds ServiceExecutorAdaptive::_getThreadJitter() const {
std::uniform_int_distribution<> jitterDist(-jitterParam, jitterParam);
- stdx::lock_guard<stdx::mutex> lk(jitterMutex);
+ stdx::lock_guard<Latch> lk(jitterMutex);
auto jitter = jitterDist(randomEngine);
if (jitter > _config->workerThreadRunTime().count())
jitter = 0;
@@ -485,8 +485,8 @@ void ServiceExecutorAdaptive::_accumulateTaskMetrics(MetricsArray* outArray,
}
}
-void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(
- MetricsArray* outputMetricsArray, const stdx::unique_lock<stdx::mutex>& lk) const {
+void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(MetricsArray* outputMetricsArray,
+ const stdx::unique_lock<Latch>& lk) const {
_accumulateTaskMetrics(outputMetricsArray, _accumulatedMetrics);
for (auto& thread : _threads) {
_accumulateTaskMetrics(outputMetricsArray, thread.threadMetrics);
@@ -494,7 +494,7 @@ void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(
}
TickSource::Tick ServiceExecutorAdaptive::_getThreadTimerTotal(
- ThreadTimer which, const stdx::unique_lock<stdx::mutex>& lk) const {
+ ThreadTimer which, const stdx::unique_lock<Latch>& lk) const {
TickSource::Tick accumulator;
switch (which) {
case ThreadTimer::kRunning:
@@ -539,7 +539,7 @@ void ServiceExecutorAdaptive::_workerThreadRoutine(
_accumulateTaskMetrics(&_accumulatedMetrics, state->threadMetrics);
{
- stdx::lock_guard<stdx::mutex> lk(_threadsMutex);
+ stdx::lock_guard<Latch> lk(_threadsMutex);
_threads.erase(state);
}
_deathCondition.notify_one();
@@ -631,7 +631,7 @@ StringData ServiceExecutorAdaptive::_threadStartedByToString(
}
void ServiceExecutorAdaptive::appendStats(BSONObjBuilder* bob) const {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
*bob << kExecutorLabel << kExecutorName //
<< kTotalQueued << _totalQueued.load() //
<< kTotalExecuted << _totalExecuted.load() //
diff --git a/src/mongo/transport/service_executor_adaptive.h b/src/mongo/transport/service_executor_adaptive.h
index 7beaca91026..27b38f5b665 100644
--- a/src/mongo/transport/service_executor_adaptive.h
+++ b/src/mongo/transport/service_executor_adaptive.h
@@ -34,6 +34,7 @@
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/list.h"
#include "mongo/stdx/thread.h"
@@ -138,7 +139,7 @@ private:
CumulativeTickTimer(TickSource* ts) : _timer(ts) {}
TickSource::Tick markStopped() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_running);
_running = false;
auto curTime = _timer.sinceStartTicks();
@@ -147,14 +148,14 @@ private:
}
void markRunning() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_running);
_timer.reset();
_running = true;
}
TickSource::Tick totalTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_running)
return _accumulator;
return _timer.sinceStartTicks() + _accumulator;
@@ -162,7 +163,7 @@ private:
private:
TickTimer _timer;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
TickSource::Tick _accumulator = 0;
bool _running = false;
};
@@ -202,15 +203,15 @@ private:
void _accumulateTaskMetrics(MetricsArray* outArray, const MetricsArray& inputArray) const;
void _accumulateAllTaskMetrics(MetricsArray* outputMetricsArray,
- const stdx::unique_lock<stdx::mutex>& lk) const;
+ const stdx::unique_lock<Latch>& lk) const;
TickSource::Tick _getThreadTimerTotal(ThreadTimer which,
- const stdx::unique_lock<stdx::mutex>& lk) const;
+ const stdx::unique_lock<Latch>& lk) const;
ReactorHandle _reactorHandle;
std::unique_ptr<Options> _config;
- mutable stdx::mutex _threadsMutex;
+ mutable Mutex _threadsMutex = MONGO_MAKE_LATCH("ServiceExecutorAdaptive::_threadsMutex");
ThreadList _threads;
std::array<int64_t, static_cast<size_t>(ThreadCreationReason::kMax)> _threadStartCounters;
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 5f8a0192a8e..4c9fd26951f 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -114,11 +114,11 @@ protected:
std::shared_ptr<asio::io_context> asioIoCtx;
- stdx::mutex mutex;
+ mutex = MONGO_MAKE_LATCH("ServiceExecutorAdaptiveFixture::mutex");
AtomicWord<int> waitFor{-1};
stdx::condition_variable cond;
stdx::function<void()> notifyCallback = [this] {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
invariant(waitFor.load() != -1);
waitFor.fetchAndSubtract(1);
cond.notify_one();
@@ -126,7 +126,7 @@ protected:
};
void waitForCallback(int expected, boost::optional<Milliseconds> timeout = boost::none) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
invariant(waitFor.load() != -1);
if (timeout) {
ASSERT_TRUE(cond.wait_for(
@@ -163,8 +163,8 @@ protected:
* that those threads retire when they become idle.
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -178,7 +178,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
ASSERT_OK(exec->schedule(
[this, &blockedMutex] {
notifyCallback();
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
},
ServiceExecutor::kEmptyFlags,
@@ -208,8 +208,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
* threads are running a task for longer than the stuckThreadTimeout.
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -221,7 +221,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
auto blockedTask = [this, &blockedMutex] {
log() << "waiting on blocked mutex";
notifyCallback();
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
};
@@ -260,8 +260,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
auto exec = makeAndStartExecutor<TestOptions>();
- // Mutex so we don't attempt to call schedule and shutdown concurrently
- stdx::mutex scheduleMutex;
+ // auto so = MONGO_MAKE_LATCH() we don't attempt to call schedule and shutdown concurrently
+ auto scheduleMutex = MONGO_MAKE_LATCH();
auto guard = makeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
@@ -274,7 +274,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
stdx::this_thread::sleep_for(config->maxQueueLatency().toSystemDuration() * 5);
{
- stdx::unique_lock<stdx::mutex> lock(scheduleMutex);
+ stdx::unique_lock<Latch> lock(scheduleMutex);
if (scheduleNew) {
ASSERT_OK(exec->schedule(task,
@@ -298,7 +298,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
stdx::this_thread::sleep_for(config->workerThreadRunTime().toSystemDuration() * 2);
ASSERT_EQ(exec->threadsRunning(), 2);
- stdx::unique_lock<stdx::mutex> lock(scheduleMutex);
+ stdx::unique_lock<Latch> lock(scheduleMutex);
scheduleNew = false;
}
@@ -310,7 +310,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
auto exec = makeAndStartExecutor<RecursionOptions>();
AtomicWord<int> remainingTasks{config->recursionLimit() - 1};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::function<void()> task;
@@ -334,7 +334,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
log() << "Completing task recursively";
};
- stdx::unique_lock<stdx::mutex> lock(mutex);
+ stdx::unique_lock<Latch> lock(mutex);
ASSERT_OK(exec->schedule(
task, ServiceExecutor::kEmptyFlags, ServiceExecutorTaskName::kSSMProcessMessage));
@@ -352,8 +352,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
* with new normal tasks
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestDeferredTasks) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -366,7 +366,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestDeferredTasks) {
log() << "Scheduling a blocking task";
ASSERT_OK(exec->schedule(
[this, &blockedMutex] {
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
},
ServiceExecutor::kEmptyFlags,
diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp
index 24820ab1d91..902bf98d7c3 100644
--- a/src/mongo/transport/service_executor_reserved.cpp
+++ b/src/mongo/transport/service_executor_reserved.cpp
@@ -62,7 +62,7 @@ ServiceExecutorReserved::ServiceExecutorReserved(ServiceContext* ctx,
Status ServiceExecutorReserved::start() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stillRunning.store(true);
_numStartingThreads = _reservedThreads;
}
@@ -80,7 +80,7 @@ Status ServiceExecutorReserved::start() {
Status ServiceExecutorReserved::_startWorker() {
log() << "Starting new worker thread for " << _name << " service executor";
return launchServiceWorkerThread([this] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
auto numRunningGuard = makeGuard([&] {
_numRunningWorkerThreads.subtractAndFetch(1);
@@ -142,7 +142,7 @@ Status ServiceExecutorReserved::_startWorker() {
Status ServiceExecutorReserved::shutdown(Milliseconds timeout) {
LOG(3) << "Shutting down reserved executor";
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_stillRunning.store(false);
_threadWakeup.notify_all();
@@ -178,7 +178,7 @@ Status ServiceExecutorReserved::schedule(Task task,
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_readyTasks.push_back(std::move(task));
_threadWakeup.notify_one();
@@ -186,7 +186,7 @@ Status ServiceExecutorReserved::schedule(Task task,
}
void ServiceExecutorReserved::appendStats(BSONObjBuilder* bob) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
*bob << kExecutorLabel << kExecutorName << kThreadsRunning
<< static_cast<int>(_numRunningWorkerThreads.loadRelaxed()) << kReadyThreads
<< static_cast<int>(_numReadyThreads) << kStartingThreads
diff --git a/src/mongo/transport/service_executor_reserved.h b/src/mongo/transport/service_executor_reserved.h
index d83a07566f5..8a71090bf63 100644
--- a/src/mongo/transport/service_executor_reserved.h
+++ b/src/mongo/transport/service_executor_reserved.h
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/service_executor_task_names.h"
@@ -74,7 +74,7 @@ private:
AtomicWord<bool> _stillRunning{false};
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ServiceExecutorReserved::_mutex");
stdx::condition_variable _threadWakeup;
stdx::condition_variable _shutdownCondition;
diff --git a/src/mongo/transport/service_executor_synchronous.cpp b/src/mongo/transport/service_executor_synchronous.cpp
index 79fc88e0033..25104fd46dd 100644
--- a/src/mongo/transport/service_executor_synchronous.cpp
+++ b/src/mongo/transport/service_executor_synchronous.cpp
@@ -67,7 +67,7 @@ Status ServiceExecutorSynchronous::shutdown(Milliseconds timeout) {
_stillRunning.store(false);
- stdx::unique_lock<stdx::mutex> lock(_shutdownMutex);
+ stdx::unique_lock<Latch> lock(_shutdownMutex);
bool result = _shutdownCondition.wait_for(lock, timeout.toSystemDuration(), [this]() {
return _numRunningWorkerThreads.load() == 0;
});
diff --git a/src/mongo/transport/service_executor_synchronous.h b/src/mongo/transport/service_executor_synchronous.h
index ebe381d9fe2..1f0e2f6dd33 100644
--- a/src/mongo/transport/service_executor_synchronous.h
+++ b/src/mongo/transport/service_executor_synchronous.h
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/service_executor_task_names.h"
@@ -66,7 +66,7 @@ private:
AtomicWord<bool> _stillRunning{false};
- mutable stdx::mutex _shutdownMutex;
+ mutable Mutex _shutdownMutex = MONGO_MAKE_LATCH("ServiceExecutorSynchronous::_shutdownMutex");
stdx::condition_variable _shutdownCondition;
AtomicWord<size_t> _numRunningWorkerThreads{0};
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index f3c05a72a2d..817703218f9 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -178,13 +178,13 @@ protected:
void scheduleBasicTask(ServiceExecutor* exec, bool expectSuccess) {
stdx::condition_variable cond;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
auto task = [&cond, &mutex] {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cond.notify_all();
};
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
auto status = exec->schedule(
std::move(task), ServiceExecutor::kEmptyFlags, ServiceExecutorTaskName::kSSMStartSession);
if (expectSuccess) {
diff --git a/src/mongo/transport/service_state_machine.h b/src/mongo/transport/service_state_machine.h
index a48f4db321d..5d065d8cb39 100644
--- a/src/mongo/transport/service_state_machine.h
+++ b/src/mongo/transport/service_state_machine.h
@@ -35,9 +35,9 @@
#include "mongo/config.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/message_compressor_base.h"
#include "mongo/transport/service_entry_point.h"
diff --git a/src/mongo/transport/service_state_machine_test.cpp b/src/mongo/transport/service_state_machine_test.cpp
index f10fff2d0ef..6c61236769f 100644
--- a/src/mongo/transport/service_state_machine_test.cpp
+++ b/src/mongo/transport/service_state_machine_test.cpp
@@ -281,19 +281,19 @@ private:
class SimpleEvent {
public:
void signal() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_signaled = true;
_cond.notify_one();
}
void wait() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cond.wait(lk, [this] { return _signaled; });
_signaled = false;
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SimpleEvent::_mutex");
stdx::condition_variable _cond;
bool _signaled = false;
};
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index eca9811ffb1..7e7a9809236 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -223,7 +223,7 @@ protected:
#ifdef MONGO_CONFIG_SSL
// The unique_lock here is held by TransportLayerASIO to synchronize with the asyncConnect
// timeout callback. It will be unlocked before the SSL actually handshake begins.
- Future<void> handshakeSSLForEgressWithLock(stdx::unique_lock<stdx::mutex> lk,
+ Future<void> handshakeSSLForEgressWithLock(stdx::unique_lock<Latch> lk,
const HostAndPort& target) {
if (!_tl->_egressSSLContext) {
return Future<void>::makeReady(Status(ErrorCodes::SSLHandshakeFailed,
@@ -255,8 +255,8 @@ protected:
// For synchronous connections where we don't have an async timer, just take a dummy lock and
// pass it to the WithLock version of handshakeSSLForEgress
Future<void> handshakeSSLForEgress(const HostAndPort& target) {
- stdx::mutex mutex;
- return handshakeSSLForEgressWithLock(stdx::unique_lock<stdx::mutex>(mutex), target);
+ auto mutex = MONGO_MAKE_LATCH();
+ return handshakeSSLForEgressWithLock(stdx::unique_lock<Latch>(mutex), target);
}
#endif
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 2f91a6d52c5..25c5f9d906e 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -530,7 +530,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
AtomicWord<bool> done{false};
Promise<SessionHandle> promise;
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("AsyncConnectState::mutex");
GenericSocket socket;
ASIOReactorTimer timeoutTimer;
WrappedResolver resolver;
@@ -562,7 +562,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
connector->resolvedEndpoint));
std::error_code ec;
- stdx::lock_guard<stdx::mutex> lk(connector->mutex);
+ stdx::lock_guard<Latch> lk(connector->mutex);
connector->resolver.cancel();
if (connector->session) {
connector->session->end();
@@ -583,7 +583,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
<< " took " << timeAfter - timeBefore;
}
- stdx::lock_guard<stdx::mutex> lk(connector->mutex);
+ stdx::lock_guard<Latch> lk(connector->mutex);
connector->resolvedEndpoint = results.front();
connector->socket.open(connector->resolvedEndpoint->protocol());
@@ -595,7 +595,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
return connector->socket.async_connect(*connector->resolvedEndpoint, UseFuture{});
})
.then([this, connector, sslMode]() -> Future<void> {
- stdx::unique_lock<stdx::mutex> lk(connector->mutex);
+ stdx::unique_lock<Latch> lk(connector->mutex);
connector->session =
std::make_shared<ASIOSession>(this, std::move(connector->socket), false);
connector->session->ensureAsync();
@@ -780,7 +780,7 @@ Status TransportLayerASIO::setup() {
}
Status TransportLayerASIO::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running.store(true);
if (_listenerOptions.isIngress()) {
@@ -819,7 +819,7 @@ Status TransportLayerASIO::start() {
}
void TransportLayerASIO::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running.store(false);
// Loop through the acceptors and cancel their calls to async_accept. This will prevent new
diff --git a/src/mongo/transport/transport_layer_asio.h b/src/mongo/transport/transport_layer_asio.h
index e37f72c40aa..c6c5f46c18c 100644
--- a/src/mongo/transport/transport_layer_asio.h
+++ b/src/mongo/transport/transport_layer_asio.h
@@ -35,9 +35,9 @@
#include "mongo/base/status_with.h"
#include "mongo/config.h"
#include "mongo/db/server_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/transport/transport_mode.h"
@@ -160,7 +160,7 @@ private:
SSLParams::SSLModes _sslMode() const;
#endif
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TransportLayerASIO::_mutex");
// There are three reactors that are used by TransportLayerASIO. The _ingressReactor contains
// all the accepted sockets and all ingress networking activity. The _acceptorReactor contains
diff --git a/src/mongo/transport/transport_layer_asio_test.cpp b/src/mongo/transport/transport_layer_asio_test.cpp
index 08dcd99dcae..53f979d9cd8 100644
--- a/src/mongo/transport/transport_layer_asio_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_test.cpp
@@ -48,7 +48,7 @@ namespace {
class ServiceEntryPointUtil : public ServiceEntryPoint {
public:
void startSession(transport::SessionHandle session) override {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.push_back(std::move(session));
log() << "started session";
_cv.notify_one();
@@ -58,7 +58,7 @@ public:
log() << "end all sessions";
std::vector<transport::SessionHandle> old_sessions;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
old_sessions.swap(_sessions);
}
old_sessions.clear();
@@ -75,7 +75,7 @@ public:
void appendStats(BSONObjBuilder*) const override {}
size_t numOpenSessions() const override {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _sessions.size();
}
@@ -88,12 +88,12 @@ public:
}
void waitForConnect() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_cv.wait(lock, [&] { return !_sessions.empty(); });
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
stdx::condition_variable _cv;
std::vector<transport::SessionHandle> _sessions;
transport::TransportLayer* _transport = nullptr;
@@ -107,7 +107,7 @@ public:
SockAddr sa{"localhost", _port, AF_INET};
s.connect(sa);
log() << "connection: port " << _port;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cv.wait(lk, [&] { return _stop; });
log() << "connection: Rx stop request";
}};
@@ -115,7 +115,7 @@ public:
void stop() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stop = true;
}
log() << "connection: Tx stop request";
@@ -125,7 +125,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SimpleConnectionThread::_mutex");
stdx::condition_variable _cv;
stdx::thread _thr;
bool _stop = false;
@@ -196,7 +196,7 @@ public:
}
bool waitForTimeout(boost::optional<Milliseconds> timeout = boost::none) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
bool ret = true;
if (timeout) {
ret = _cond.wait_for(lk, timeout->toSystemDuration(), [this] { return _finished; });
@@ -210,7 +210,7 @@ public:
protected:
void notifyComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_finished = true;
_cond.notify_one();
}
@@ -221,7 +221,7 @@ protected:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TimeoutSEP::_mutex");
stdx::condition_variable _cond;
bool _finished = false;
diff --git a/src/mongo/transport/transport_layer_manager.cpp b/src/mongo/transport/transport_layer_manager.cpp
index 97a44e104f4..3db9e9f17a5 100644
--- a/src/mongo/transport/transport_layer_manager.cpp
+++ b/src/mongo/transport/transport_layer_manager.cpp
@@ -53,7 +53,7 @@ TransportLayerManager::TransportLayerManager() = default;
template <typename Callable>
void TransportLayerManager::_foreach(Callable&& cb) const {
{
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
for (auto&& tl : _tls) {
cb(tl.get());
}
@@ -111,7 +111,7 @@ Status TransportLayerManager::setup() {
Status TransportLayerManager::addAndStartTransportLayer(std::unique_ptr<TransportLayer> tl) {
auto ptr = tl.get();
{
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
_tls.emplace_back(std::move(tl));
}
return ptr->start();
diff --git a/src/mongo/transport/transport_layer_manager.h b/src/mongo/transport/transport_layer_manager.h
index 1dd5ef38527..3bc0e6ba5c6 100644
--- a/src/mongo/transport/transport_layer_manager.h
+++ b/src/mongo/transport/transport_layer_manager.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/util/time_support.h"
@@ -91,7 +91,7 @@ public:
static std::unique_ptr<TransportLayer> makeAndStartDefaultEgressTransportLayer();
BatonHandle makeBaton(OperationContext* opCtx) const override {
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
// TODO: figure out what to do about managers with more than one transport layer.
invariant(_tls.size() == 1);
return _tls[0]->makeBaton(opCtx);
@@ -101,7 +101,7 @@ private:
template <typename Callable>
void _foreach(Callable&& cb) const;
- mutable stdx::mutex _tlsMutex;
+ mutable Mutex _tlsMutex = MONGO_MAKE_LATCH("TransportLayerManager::_tlsMutex");
std::vector<std::unique_ptr<TransportLayer>> _tls;
};
diff --git a/src/mongo/unittest/barrier.h b/src/mongo/unittest/barrier.h
index 6b3d102fc6f..de21587ea3b 100644
--- a/src/mongo/unittest/barrier.h
+++ b/src/mongo/unittest/barrier.h
@@ -29,8 +29,8 @@
#pragma once
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
namespace unittest {
@@ -60,7 +60,7 @@ private:
size_t _threadCount;
size_t _threadsWaiting;
uint64_t _generation;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
stdx::condition_variable _condition;
};
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index c36799e79fd..b1d6258e5a1 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -44,9 +44,9 @@
#include "mongo/logger/logger.h"
#include "mongo/logger/message_event_utf8_encoder.h"
#include "mongo/logger/message_log_domain.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -251,7 +251,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
bool _enabled = false;
logger::MessageEventDetailsEncoder _encoder;
std::vector<std::string>* _lines;
diff --git a/src/mongo/util/alarm.cpp b/src/mongo/util/alarm.cpp
index b3236a9ef5b..95a3a88dfd3 100644
--- a/src/mongo/util/alarm.cpp
+++ b/src/mongo/util/alarm.cpp
@@ -50,7 +50,7 @@ public:
return {ErrorCodes::ShutdownInProgress, "The alarm scheduler was shutdown"};
}
- stdx::unique_lock<stdx::mutex> lk(service->_mutex);
+ stdx::unique_lock<Latch> lk(service->_mutex);
if (_done) {
return {ErrorCodes::AlarmAlreadyFulfilled, "The alarm has already been canceled"};
}
@@ -80,7 +80,7 @@ AlarmSchedulerPrecise::~AlarmSchedulerPrecise() {
}
AlarmScheduler::Alarm AlarmSchedulerPrecise::alarmAt(Date_t date) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shutdown) {
Alarm ret;
ret.future = Future<void>::makeReady(
@@ -107,7 +107,7 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
std::vector<Promise<void>> toExpire;
AlarmMapIt it;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (it = _alarms.begin(); it != _alarms.end();) {
if (hook && !(*hook)(processed + 1)) {
break;
@@ -135,22 +135,22 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
}
Date_t AlarmSchedulerPrecise::nextAlarm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return (_alarms.empty()) ? Date_t::max() : _alarms.begin()->first;
}
void AlarmSchedulerPrecise::clearAllAlarms() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_clearAllAlarmsImpl(lk);
}
void AlarmSchedulerPrecise::clearAllAlarmsAndShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown = true;
_clearAllAlarmsImpl(lk);
}
-void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk) {
+void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk) {
std::vector<Promise<void>> toExpire;
for (AlarmMapIt it = _alarms.begin(); it != _alarms.end();) {
toExpire.push_back(std::move(it->second.promise));
diff --git a/src/mongo/util/alarm.h b/src/mongo/util/alarm.h
index 449284a3b21..9727a133e69 100644
--- a/src/mongo/util/alarm.h
+++ b/src/mongo/util/alarm.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -185,9 +185,9 @@ private:
using AlarmMap = std::multimap<Date_t, AlarmData>;
using AlarmMapIt = AlarmMap::iterator;
- void _clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk);
+ void _clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmSchedulerPrecise::_mutex");
bool _shutdown = false;
AlarmMap _alarms;
};
diff --git a/src/mongo/util/alarm_runner_background_thread.cpp b/src/mongo/util/alarm_runner_background_thread.cpp
index 4d22f84e87d..d0a27927246 100644
--- a/src/mongo/util/alarm_runner_background_thread.cpp
+++ b/src/mongo/util/alarm_runner_background_thread.cpp
@@ -34,13 +34,13 @@
namespace mongo {
void AlarmRunnerBackgroundThread::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running = true;
_thread = stdx::thread(&AlarmRunnerBackgroundThread::_threadRoutine, this);
}
void AlarmRunnerBackgroundThread::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_running = false;
lk.unlock();
_condVar.notify_one();
@@ -56,7 +56,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
invariant(!schedulers.empty());
const auto registerHook = [this](Date_t next, const std::shared_ptr<AlarmScheduler>& which) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (next >= _nextAlarm) {
return;
}
@@ -81,7 +81,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
}
void AlarmRunnerBackgroundThread::_threadRoutine() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_running) {
const auto clockSource = _schedulers.front()->clockSource();
const auto now = clockSource->now();
diff --git a/src/mongo/util/alarm_runner_background_thread.h b/src/mongo/util/alarm_runner_background_thread.h
index 179f6350480..251b6ad89cd 100644
--- a/src/mongo/util/alarm_runner_background_thread.h
+++ b/src/mongo/util/alarm_runner_background_thread.h
@@ -64,7 +64,7 @@ private:
void _threadRoutine();
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmRunnerBackgroundThread::_mutex");
stdx::condition_variable _condVar;
bool _running = false;
Date_t _nextAlarm = Date_t::max();
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index 3627a413f1b..e6abff78d97 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -34,9 +34,9 @@
#include "mongo/util/background.h"
#include "mongo/config.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/concurrency/mutex.h"
@@ -79,7 +79,7 @@ private:
void _runTask(PeriodicTask* task);
// _mutex protects the _shutdownRequested flag and the _tasks vector.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicTaskRunner::_mutex");
// The condition variable is used to sleep for the interval between task
// executions, and is notified when the _shutdownRequested flag is toggled.
@@ -128,7 +128,7 @@ bool runnerDestroyed = false;
struct BackgroundJob::JobStatus {
JobStatus() : state(NotStarted) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("JobStatus::mutex");
stdx::condition_variable done;
State state;
};
@@ -153,7 +153,7 @@ void BackgroundJob::jobBody() {
{
// It is illegal to access any state owned by this BackgroundJob after leaving this
// scope, with the exception of the call to 'delete this' below.
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
_status->state = Done;
_status->done.notify_all();
}
@@ -163,7 +163,7 @@ void BackgroundJob::jobBody() {
}
void BackgroundJob::go() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
massert(17234,
str::stream() << "backgroundJob already running: " << name(),
_status->state != Running);
@@ -177,7 +177,7 @@ void BackgroundJob::go() {
}
Status BackgroundJob::cancel() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
if (_status->state == Running)
return Status(ErrorCodes::IllegalOperation, "Cannot cancel a running BackgroundJob");
@@ -193,7 +193,7 @@ Status BackgroundJob::cancel() {
bool BackgroundJob::wait(unsigned msTimeOut) {
verify(!_selfDelete); // you cannot call wait on a self-deleting job
const auto deadline = Date_t::now() + Milliseconds(msTimeOut);
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
while (_status->state != Done) {
if (msTimeOut) {
if (stdx::cv_status::timeout ==
@@ -207,12 +207,12 @@ bool BackgroundJob::wait(unsigned msTimeOut) {
}
BackgroundJob::State BackgroundJob::getState() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state;
}
bool BackgroundJob::running() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state == Running;
}
@@ -267,12 +267,12 @@ Status PeriodicTask::stopRunningPeriodicTasks(int gracePeriodMillis) {
}
void PeriodicTaskRunner::add(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_tasks.push_back(task);
}
void PeriodicTaskRunner::remove(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (size_t i = 0; i != _tasks.size(); i++) {
if (_tasks[i] == task) {
_tasks[i] = NULL;
@@ -283,7 +283,7 @@ void PeriodicTaskRunner::remove(PeriodicTask* task) {
Status PeriodicTaskRunner::stop(int gracePeriodMillis) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_shutdownRequested = true;
_cond.notify_one();
}
@@ -299,7 +299,7 @@ void PeriodicTaskRunner::run() {
// Use a shorter cycle time in debug mode to help catch race conditions.
const Seconds waitTime(kDebugBuild ? 5 : 60);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_shutdownRequested) {
{
MONGO_IDLE_THREAD_BLOCK;
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index efca4fdbfa1..f95090d11a3 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/background.h"
@@ -114,7 +114,7 @@ TEST(BackgroundJobLifeCycle, Go) {
virtual void run() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_FALSE(_hasRun);
_hasRun = true;
}
@@ -127,7 +127,7 @@ TEST(BackgroundJobLifeCycle, Go) {
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Job::_mutex");
bool _hasRun;
Notification<void> _n;
};
diff --git a/src/mongo/util/background_thread_clock_source.h b/src/mongo/util/background_thread_clock_source.h
index 4b2d13324c2..0c7e32db527 100644
--- a/src/mongo/util/background_thread_clock_source.h
+++ b/src/mongo/util/background_thread_clock_source.h
@@ -33,9 +33,9 @@
#include <thread>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/time_support.h"
@@ -93,7 +93,7 @@ private:
const Milliseconds _granularity;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
stdx::condition_variable _condition;
bool _inShutdown = false;
bool _started = false;
diff --git a/src/mongo/util/clock_source.h b/src/mongo/util/clock_source.h
index b51776ceaeb..f202f67f439 100644
--- a/src/mongo/util/clock_source.h
+++ b/src/mongo/util/clock_source.h
@@ -39,7 +39,7 @@
namespace mongo {
-class Date_t;
+class Waitable;
/**
* An interface for getting the current wall clock time.
diff --git a/src/mongo/util/clock_source_mock.cpp b/src/mongo/util/clock_source_mock.cpp
index d05eb765722..0319e67b481 100644
--- a/src/mongo/util/clock_source_mock.cpp
+++ b/src/mongo/util/clock_source_mock.cpp
@@ -29,6 +29,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source_mock.h"
#include <algorithm>
diff --git a/src/mongo/util/clock_source_mock.h b/src/mongo/util/clock_source_mock.h
index 24c6851a240..689a03832f7 100644
--- a/src/mongo/util/clock_source_mock.h
+++ b/src/mongo/util/clock_source_mock.h
@@ -69,7 +69,7 @@ private:
using Alarm = std::pair<Date_t, unique_function<void()>>;
void _processAlarms(stdx::unique_lock<stdx::mutex> lk);
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
Date_t _now{Date_t::fromMillisSinceEpoch(1)};
std::vector<Alarm> _alarms;
};
diff --git a/src/mongo/util/concurrency/notification.h b/src/mongo/util/concurrency/notification.h
index 44bc7efc9ac..379b6e10d9a 100644
--- a/src/mongo/util/concurrency/notification.h
+++ b/src/mongo/util/concurrency/notification.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
#include "mongo/db/operation_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -59,7 +59,7 @@ public:
* block).
*/
explicit operator bool() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return !!_value;
}
@@ -68,7 +68,7 @@ public:
* If the wait is interrupted, throws an exception.
*/
T& get(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
opCtx->waitForConditionOrInterrupt(_condVar, lock, [this]() -> bool { return !!_value; });
return _value.get();
}
@@ -78,7 +78,7 @@ public:
* This variant of get cannot be interrupted.
*/
T& get() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_value) {
_condVar.wait(lock);
}
@@ -91,7 +91,7 @@ public:
* call. Must only be called once for the lifetime of the notification.
*/
void set(T value) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_value);
_value = std::move(value);
_condVar.notify_all();
@@ -104,13 +104,13 @@ public:
* If the wait is interrupted, throws an exception.
*/
bool waitFor(OperationContext* opCtx, Milliseconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return opCtx->waitForConditionOrInterruptFor(
_condVar, lock, waitTimeout, [&]() { return !!_value; });
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Notification::_mutex");
stdx::condition_variable _condVar;
// Protected by mutex and only moves from not-set to set once
diff --git a/src/mongo/util/concurrency/spin_lock.h b/src/mongo/util/concurrency/spin_lock.h
index 7f237dc3175..5c5a17b4b74 100644
--- a/src/mongo/util/concurrency/spin_lock.h
+++ b/src/mongo/util/concurrency/spin_lock.h
@@ -37,7 +37,7 @@
#include "mongo/config.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
};
#else
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index fd8d23377ea..ceaf9fcaf7e 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -79,7 +79,7 @@ ThreadPool::Options cleanUpOptions(ThreadPool::Options&& options) {
ThreadPool::ThreadPool(Options options) : _options(cleanUpOptions(std::move(options))) {}
ThreadPool::~ThreadPool() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown_inlock();
if (shutdownComplete != _state) {
_join_inlock(&lk);
@@ -94,7 +94,7 @@ ThreadPool::~ThreadPool() {
}
void ThreadPool::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != preStart) {
severe() << "Attempting to start pool " << _options.poolName
<< ", but it has already started";
@@ -110,7 +110,7 @@ void ThreadPool::startup() {
}
void ThreadPool::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_shutdown_inlock();
}
@@ -130,11 +130,11 @@ void ThreadPool::_shutdown_inlock() {
}
void ThreadPool::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_join_inlock(&lk);
}
-void ThreadPool::_join_inlock(stdx::unique_lock<stdx::mutex>* lk) {
+void ThreadPool::_join_inlock(stdx::unique_lock<Latch>* lk) {
_stateChange.wait(*lk, [this] {
switch (_state) {
case preStart:
@@ -177,7 +177,7 @@ void ThreadPool::_drainPendingTasks() {
<< _options.threadNamePrefix << _nextThreadId++;
setThreadName(threadName);
_options.onCreateThread(threadName);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_pendingTasks.empty()) {
_doOneTask(&lock);
}
@@ -186,7 +186,7 @@ void ThreadPool::_drainPendingTasks() {
}
void ThreadPool::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
switch (_state) {
case joinRequired:
@@ -221,7 +221,7 @@ void ThreadPool::schedule(Task task) {
}
void ThreadPool::waitForIdle() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If there are any pending tasks, or non-idle threads, the pool is not idle.
while (!_pendingTasks.empty() || _numIdleThreads < _threads.size()) {
_poolIsIdle.wait(lk);
@@ -229,7 +229,7 @@ void ThreadPool::waitForIdle() {
}
ThreadPool::Stats ThreadPool::getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats result;
result.options = _options;
result.numThreads = _threads.size();
@@ -257,7 +257,7 @@ void ThreadPool::_workerThreadBody(ThreadPool* pool, const std::string& threadNa
}
void ThreadPool::_consumeTasks() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_state == running) {
if (_pendingTasks.empty()) {
if (_threads.size() > _options.minThreads) {
@@ -331,7 +331,7 @@ void ThreadPool::_consumeTasks() {
fassertFailedNoTrace(28703);
}
-void ThreadPool::_doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept {
+void ThreadPool::_doOneTask(stdx::unique_lock<Latch>* lk) noexcept {
invariant(!_pendingTasks.empty());
LOG(3) << "Executing a task on behalf of pool " << _options.poolName;
Task task = std::move(_pendingTasks.front());
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
index c4873f84dff..ea41daf4e2e 100644
--- a/src/mongo/util/concurrency/thread_pool.h
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -33,9 +33,9 @@
#include <string>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
#include "mongo/util/time_support.h"
@@ -189,7 +189,7 @@ private:
/**
* Implementation of join once _mutex is owned by "lk".
*/
- void _join_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _join_inlock(stdx::unique_lock<Latch>* lk);
/**
* Runs the remaining tasks on a new thread as part of the join process, blocking until
@@ -201,7 +201,7 @@ private:
* Executes one task from _pendingTasks. "lk" must own _mutex, and _pendingTasks must have at
* least one entry.
*/
- void _doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept;
+ void _doOneTask(stdx::unique_lock<Latch>* lk) noexcept;
/**
* Changes the lifecycle state (_state) of the pool and wakes up any threads waiting for a state
@@ -213,7 +213,7 @@ private:
const Options _options;
// Mutex guarding all non-const member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ThreadPool::_mutex");
// This variable represents the lifecycle state of the pool.
//
diff --git a/src/mongo/util/concurrency/thread_pool_test.cpp b/src/mongo/util/concurrency/thread_pool_test.cpp
index b4a650c54bb..b57554add21 100644
--- a/src/mongo/util/concurrency/thread_pool_test.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test.cpp
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
#include "mongo/base/init.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/death_test.h"
@@ -70,7 +70,7 @@ protected:
}
void blockingWork() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
++count1;
cv1.notify_all();
while (!flag2) {
@@ -78,7 +78,7 @@ protected:
}
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("ThreadPoolTest::mutex");
stdx::condition_variable cv1;
stdx::condition_variable cv2;
size_t count1 = 0U;
@@ -86,7 +86,7 @@ protected:
private:
void tearDown() override {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
flag2 = true;
cv2.notify_all();
lk.unlock();
@@ -103,7 +103,7 @@ TEST_F(ThreadPoolTest, MinPoolSize0) {
auto& pool = makePool(options);
pool.startup();
ASSERT_EQ(0U, pool.getStats().numThreads);
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool.schedule([this](auto status) {
ASSERT_OK(status);
blockingWork();
@@ -155,7 +155,7 @@ TEST_F(ThreadPoolTest, MaxPoolSize20MinPoolSize15) {
options.maxIdleThreadAge = Milliseconds(100);
auto& pool = makePool(options);
pool.startup();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
for (size_t i = 0U; i < 30U; ++i) {
pool.schedule([this, i](auto status) {
ASSERT_OK(status) << i;
@@ -223,7 +223,7 @@ DEATH_TEST(ThreadPoolTest,
// mutex-lock is blocked waiting for the mutex, so the independent thread must be blocked inside
// of join(), until the pool thread finishes. At this point, if we destroy the pool, its
// destructor should trigger a fatal error due to double-join.
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
ThreadPool::Options options;
options.minThreads = 2;
options.poolName = "DoubleJoinPool";
@@ -233,10 +233,10 @@ DEATH_TEST(ThreadPoolTest,
while (pool->getStats().numThreads < 2U) {
sleepmillis(50);
}
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool->schedule([&mutex](auto status) {
ASSERT_OK(status);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
});
stdx::thread t([&pool] {
pool->shutdown();
diff --git a/src/mongo/util/concurrency/thread_pool_test_common.cpp b/src/mongo/util/concurrency/thread_pool_test_common.cpp
index 2c2113bb890..dde7e9051fc 100644
--- a/src/mongo/util/concurrency/thread_pool_test_common.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test_common.cpp
@@ -33,9 +33,9 @@
#include "mongo/util/concurrency/thread_pool_test_common.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
@@ -202,10 +202,10 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
auto& pool = getThreadPool();
stdx::function<void()> func;
std::size_t n = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condvar;
func = [&pool, &n, &func, &condvar, &mutex, depth]() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
if (n < depth) {
n++;
lk.unlock();
@@ -222,7 +222,7 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
pool.startup();
pool.join();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
condvar.wait(lk, [&n, depth] { return n == depth; });
}
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index e30746807ae..a6abd154b2e 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -128,7 +128,7 @@ void TicketHolder::release() {
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_resizeMutex);
+ stdx::lock_guard<Latch> lk(_resizeMutex);
if (newSize < 5)
return Status(ErrorCodes::BadValue,
@@ -174,12 +174,12 @@ TicketHolder::TicketHolder(int num) : _outof(num), _num(num) {}
TicketHolder::~TicketHolder() = default;
bool TicketHolder::tryAcquire() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _tryAcquire();
}
void TicketHolder::waitForTicket(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
opCtx->waitForConditionOrInterrupt(_newTicket, lk, [this] { return _tryAcquire(); });
@@ -189,7 +189,7 @@ void TicketHolder::waitForTicket(OperationContext* opCtx) {
}
bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
return opCtx->waitForConditionOrInterruptUntil(
@@ -202,14 +202,14 @@ bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
void TicketHolder::release() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_num++;
}
_newTicket.notify_one();
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int used = _outof.load() - _num;
if (used > newSize) {
diff --git a/src/mongo/util/concurrency/ticketholder.h b/src/mongo/util/concurrency/ticketholder.h
index 51c232bc5d1..a28096dfb1a 100644
--- a/src/mongo/util/concurrency/ticketholder.h
+++ b/src/mongo/util/concurrency/ticketholder.h
@@ -33,8 +33,8 @@
#endif
#include "mongo/db/operation_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/time_support.h"
@@ -87,13 +87,13 @@ private:
// You can read _outof without a lock, but have to hold _resizeMutex to change.
AtomicWord<int> _outof;
- stdx::mutex _resizeMutex;
+ Mutex _resizeMutex = MONGO_MAKE_LATCH("TicketHolder::_resizeMutex");
#else
bool _tryAcquire();
AtomicWord<int> _outof;
int _num;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TicketHolder::_mutex");
stdx::condition_variable _newTicket;
#endif
};
diff --git a/src/mongo/util/concurrency/with_lock.h b/src/mongo/util/concurrency/with_lock.h
index d5c55a16cb3..9d7f24bed8e 100644
--- a/src/mongo/util/concurrency/with_lock.h
+++ b/src/mongo/util/concurrency/with_lock.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include <utility>
@@ -56,7 +56,7 @@ namespace mongo {
*
* A call to such a function looks like this:
*
- * stdx::lock_guard<stdx::mutex> lk(_mutex);
+ * stdx::lock_guard<Latch> lk(_mutex);
* _clobber(lk, opCtx); // instead of _clobber_inlock(opCtx)
*
* Note that the formal argument need not (and should not) be named unless it is needed to pass
@@ -68,11 +68,11 @@ namespace mongo {
*
*/
struct WithLock {
- template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex> const&) noexcept {}
+ template <typename LatchT>
+ WithLock(stdx::lock_guard<LatchT> const&) noexcept {}
- template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex> const& lock) noexcept {
+ template <typename LatchT>
+ WithLock(stdx::unique_lock<LatchT> const& lock) noexcept {
invariant(lock.owns_lock());
}
@@ -88,9 +88,9 @@ struct WithLock {
// No moving a lock_guard<> or unique_lock<> in.
template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex>&&) = delete;
+ WithLock(stdx::lock_guard<Latch>&&) = delete;
template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex>&&) = delete;
+ WithLock(stdx::unique_lock<Latch>&&) = delete;
/*
* Produces a WithLock without benefit of any actual lock, for use in cases where a lock is not
diff --git a/src/mongo/util/concurrency/with_lock_test.cpp b/src/mongo/util/concurrency/with_lock_test.cpp
index 0bfe2b3829e..5724f899471 100644
--- a/src/mongo/util/concurrency/with_lock_test.cpp
+++ b/src/mongo/util/concurrency/with_lock_test.cpp
@@ -31,7 +31,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
@@ -46,15 +46,15 @@ struct Beerp {
explicit Beerp(int i) {
_blerp(WithLock::withoutLock(), i);
}
- Beerp(stdx::lock_guard<stdx::mutex> const& lk, int i) {
+ Beerp(stdx::lock_guard<Latch> const& lk, int i) {
_blerp(lk, i);
}
int bleep(char n) {
- stdx::lock_guard<stdx::mutex> lk(_m);
+ stdx::lock_guard<Latch> lk(_m);
return _bloop(lk, n - '0');
}
int bleep(int i) {
- stdx::unique_lock<stdx::mutex> lk(_m);
+ stdx::unique_lock<Latch> lk(_m);
return _bloop(lk, i);
}
@@ -66,7 +66,7 @@ private:
log() << i << " bleep" << (i == 1 ? "\n" : "s\n");
return i;
}
- stdx::mutex _m;
+ Mutex _m = MONGO_MAKE_LATCH("Beerp::_m");
};
TEST(WithLockTest, OverloadSet) {
@@ -74,8 +74,8 @@ TEST(WithLockTest, OverloadSet) {
ASSERT_EQ(1, b.bleep('1'));
ASSERT_EQ(2, b.bleep(2));
- stdx::mutex m;
- stdx::lock_guard<stdx::mutex> lk(m);
+ auto m = MONGO_MAKE_LATCH();
+ stdx::lock_guard<Latch> lk(m);
Beerp(lk, 3);
}
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index 452d64837e0..d93067ffe4a 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -36,9 +36,9 @@
#include <boost/optional.hpp>
#include <stack>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/log.h"
#include "mongo/util/quick_exit.h"
@@ -47,7 +47,7 @@ namespace mongo {
namespace {
-stdx::mutex shutdownMutex;
+Mutex shutdownMutex;
stdx::condition_variable shutdownTasksComplete;
boost::optional<ExitCode> shutdownExitCode;
bool shutdownTasksInProgress = false;
@@ -83,7 +83,7 @@ bool globalInShutdownDeprecated() {
}
ExitCode waitForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(shutdownMutex);
+ stdx::unique_lock<Latch> lk(shutdownMutex);
shutdownTasksComplete.wait(lk, [] {
const auto shutdownStarted = static_cast<bool>(shutdownExitCode);
return shutdownStarted && !shutdownTasksInProgress;
@@ -93,7 +93,7 @@ ExitCode waitForShutdown() {
}
void registerShutdownTask(unique_function<void(const ShutdownTaskArgs&)> task) {
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
invariant(!globalInShutdownDeprecated());
shutdownTasks.emplace(std::move(task));
}
@@ -102,7 +102,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::unique_lock<stdx::mutex> lock(shutdownMutex);
+ stdx::unique_lock<Latch> lock(shutdownMutex);
if (shutdownTasksInProgress) {
// Someone better have called shutdown in some form already.
@@ -138,7 +138,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownTasksComplete.notify_all();
@@ -151,7 +151,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
if (globalInShutdownDeprecated())
return;
@@ -166,7 +166,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownExitCode.emplace(EXIT_CLEAN);
}
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index 1e1704b90a8..1364a52c2b2 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -33,8 +33,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -197,7 +197,7 @@ private:
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable stdx::mutex _modMutex;
+ mutable stdx::mutex _modMutex; // NOLINT
/**
* Enables this fail point.
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 8ff5279cd6e..4db136ff2cd 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -171,7 +171,7 @@ public:
void stopTest() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
_inShutdown = true;
}
for (auto& t : _tasks) {
@@ -195,7 +195,7 @@ private:
}
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -218,7 +218,7 @@ private:
} catch (const std::logic_error&) {
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -227,7 +227,7 @@ private:
void simpleTask() {
while (true) {
static_cast<void>(MONGO_FAIL_POINT(_fp));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -241,7 +241,7 @@ private:
_fp.setMode(FailPoint::alwaysOn, 0, BSON("a" << 44));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -249,7 +249,8 @@ private:
FailPoint _fp;
std::vector<stdx::thread> _tasks;
- stdx::mutex _mutex;
+
+ mongo::Mutex _mutex = MONGO_MAKE_LATCH();
bool _inShutdown = false;
};
diff --git a/src/mongo/util/future_impl.h b/src/mongo/util/future_impl.h
index 37f9b129a4f..bb9a467710d 100644
--- a/src/mongo/util/future_impl.h
+++ b/src/mongo/util/future_impl.h
@@ -37,8 +37,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/type_traits.h"
#include "mongo/stdx/utility.h"
#include "mongo/util/assert_util.h"
@@ -328,7 +328,7 @@ public:
if (state.load(std::memory_order_acquire) == SSBState::kFinished)
return;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
if (!cv) {
cv.emplace();
@@ -396,7 +396,7 @@ public:
Children localChildren;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
localChildren.swap(children);
if (cv) {
// This must be done inside the lock to correctly synchronize with wait().
@@ -449,8 +449,8 @@ public:
// These are only used to signal completion to blocking waiters. Benchmarks showed that it was
// worth deferring the construction of cv, so it can be avoided when it isn't necessary.
- stdx::mutex mx; // F (not that it matters)
- boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
+ Mutex mx = MONGO_MAKE_LATCH("FutureResolution"); // F
+ boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
// This holds the children created from a SharedSemiFuture. When this SharedState is completed,
// the result will be copied in to each of the children. This allows their continuations to have
diff --git a/src/mongo/util/heap_profiler.cpp b/src/mongo/util/heap_profiler.cpp
index 3edb8717f79..067ab969166 100644
--- a/src/mongo/util/heap_profiler.cpp
+++ b/src/mongo/util/heap_profiler.cpp
@@ -282,8 +282,10 @@ private:
// >1: sample ever sampleIntervalBytes bytes allocated - less accurate but fast and small
std::atomic_size_t sampleIntervalBytes; // NOLINT
- stdx::mutex hashtable_mutex; // guards updates to both object and stack hash tables
- stdx::mutex stackinfo_mutex; // guards against races updating the StackInfo bson representation
+ // guards updates to both object and stack hash tables
+ stdx::mutex hashtable_mutex; // NOLINT
+ // guards against races updating the StackInfo bson representation
+ stdx::mutex stackinfo_mutex; // NOLINT
// cumulative bytes allocated - determines when samples are taken
std::atomic_size_t bytesAllocated{0}; // NOLINT
diff --git a/src/mongo/util/interruptible.h b/src/mongo/util/interruptible.h
index 446e61849cc..6e182d6bbd7 100644
--- a/src/mongo/util/interruptible.h
+++ b/src/mongo/util/interruptible.h
@@ -331,9 +331,9 @@ public:
* Sleeps until "deadline"; throws an exception if the interruptible is interrupted before then.
*/
void sleepUntil(Date_t deadline) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptUntil(cv, lk, deadline, [] { return false; }));
}
@@ -342,9 +342,9 @@ public:
* then.
*/
void sleepFor(Milliseconds duration) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptFor(cv, lk, duration, [] { return false; }));
}
diff --git a/src/mongo/util/invalidating_lru_cache.h b/src/mongo/util/invalidating_lru_cache.h
index 7f899151b48..9a52136ac4a 100644
--- a/src/mongo/util/invalidating_lru_cache.h
+++ b/src/mongo/util/invalidating_lru_cache.h
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -140,7 +140,7 @@ public:
* cache.
*/
boost::optional<std::shared_ptr<Value>> get(const Key& key) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto myGeneration = _generation;
auto cacheIt = _cache.find(key);
@@ -192,7 +192,7 @@ public:
* Returns a vector of info about items in the cache for testing/reporting purposes
*/
std::vector<CachedItemInfo> getCacheInfo() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<CachedItemInfo> ret;
ret.reserve(_active.size() + _cache.size());
@@ -255,7 +255,7 @@ private:
private:
InvalidatingLRUCache<Key, Value, Invalidator>* _cache;
- stdx::unique_lock<stdx::mutex> _lk;
+ stdx::unique_lock<Latch> _lk;
std::vector<std::shared_ptr<Value>> _activePtrsToDestroy;
};
@@ -331,7 +331,7 @@ private:
auto _makeDeleterWithLock(const Key& key, uint64_t myGeneration) -> auto {
return [this, key, myGeneration](Value* d) {
std::unique_ptr<Value> owned(d);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto it = _active.find(key);
if (it != _active.end() && it->second.expired()) {
_active.erase(it);
@@ -345,7 +345,7 @@ private:
};
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InvalidatingLRUCache::_mutex");
// The generation count - items will not be returned to the cache if their generation count
// does not match the current generation count
diff --git a/src/mongo/util/net/http_client_curl.cpp b/src/mongo/util/net/http_client_curl.cpp
index 1f3e6534dbe..9542be41b7b 100644
--- a/src/mongo/util/net/http_client_curl.cpp
+++ b/src/mongo/util/net/http_client_curl.cpp
@@ -44,7 +44,7 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/net/http_client.h"
@@ -131,17 +131,17 @@ private:
}
static void _lockShare(CURL*, curl_lock_data, curl_lock_access, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->lock();
+ reinterpret_cast<Mutex*>(ctx)->lock();
}
static void _unlockShare(CURL*, curl_lock_data, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->unlock();
+ reinterpret_cast<Mutex*>(ctx)->unlock();
}
private:
bool _initialized = false;
CURLSH* _share = nullptr;
- stdx::mutex _shareMutex;
+ Mutex _shareMutex = MONGO_MAKE_LATCH("CurlLibraryManager::_shareMutex");
} curlLibraryManager;
/**
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index f9933800123..2a6683c6963 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -333,7 +333,7 @@ private:
class ThreadIDManager {
public:
unsigned long reserveID() {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
if (!_idLast.empty()) {
unsigned long ret = _idLast.top();
_idLast.pop();
@@ -343,13 +343,14 @@ private:
}
void releaseID(unsigned long id) {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
_idLast.push(id);
}
private:
// Machinery for producing IDs that are unique for the life of a thread.
- stdx::mutex _idMutex; // Protects _idNext and _idLast.
+ Mutex _idMutex =
+ MONGO_MAKE_LATCH("ThreadIDManager::_idMutex"); // Protects _idNext and _idLast.
unsigned long _idNext = 0; // Stores the next thread ID to use, if none already allocated.
std::stack<unsigned long, std::vector<unsigned long>>
_idLast; // Stores old thread IDs, for reuse.
@@ -463,7 +464,7 @@ private:
/** Either returns a cached password, or prompts the user to enter one. */
StatusWith<StringData> fetchPassword() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_password->size()) {
return StringData(_password->c_str());
}
@@ -488,7 +489,7 @@ private:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PasswordFetcher::_mutex");
SecureString _password; // Protected by _mutex
std::string _prompt;
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 65b61b08ab5..7c2b223633e 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -3473,7 +3473,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3482,7 +3481,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
environment = moe::Environment();
@@ -3492,7 +3490,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
@@ -3517,7 +3514,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3527,7 +3523,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
@@ -3538,7 +3533,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
diff --git a/src/mongo/util/periodic_runner.h b/src/mongo/util/periodic_runner.h
index e9dcfa67489..210bd3c4ecf 100644
--- a/src/mongo/util/periodic_runner.h
+++ b/src/mongo/util/periodic_runner.h
@@ -35,7 +35,7 @@
#include <boost/optional.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/util/periodic_runner_impl.cpp b/src/mongo/util/periodic_runner_impl.cpp
index 98a517cf7d9..dc9f091505d 100644
--- a/src/mongo/util/periodic_runner_impl.cpp
+++ b/src/mongo/util/periodic_runner_impl.cpp
@@ -77,7 +77,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
}
startPromise.emplaceValue();
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_execStatus != ExecutionStatus::CANCELED) {
// Wait until it's unpaused or canceled
_condvar.wait(lk, [&] { return _execStatus != ExecutionStatus::PAUSED; });
@@ -120,14 +120,14 @@ void PeriodicRunnerImpl::PeriodicJobImpl::start() {
}
void PeriodicRunnerImpl::PeriodicJobImpl::pause() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING);
_execStatus = PeriodicJobImpl::ExecutionStatus::PAUSED;
}
void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::PAUSED);
_execStatus = PeriodicJobImpl::ExecutionStatus::RUNNING;
}
@@ -136,7 +136,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
auto lastExecStatus = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return std::exchange(_execStatus, ExecutionStatus::CANCELED);
}();
@@ -158,12 +158,12 @@ void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
}
Milliseconds PeriodicRunnerImpl::PeriodicJobImpl::getPeriod() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _job.interval;
}
void PeriodicRunnerImpl::PeriodicJobImpl::setPeriod(Milliseconds ms) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_job.interval = ms;
if (_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING) {
diff --git a/src/mongo/util/periodic_runner_impl.h b/src/mongo/util/periodic_runner_impl.h
index a921a66c59f..4a89b4b6a05 100644
--- a/src/mongo/util/periodic_runner_impl.h
+++ b/src/mongo/util/periodic_runner_impl.h
@@ -32,8 +32,8 @@
#include <memory>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/future.h"
@@ -82,7 +82,7 @@ private:
stdx::thread _thread;
SharedPromise<void> _stopPromise;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex");
stdx::condition_variable _condvar;
/**
* The current execution status of the job.
diff --git a/src/mongo/util/periodic_runner_impl_test.cpp b/src/mongo/util/periodic_runner_impl_test.cpp
index 8d74b1f4fe1..9dce8c05c87 100644
--- a/src/mongo/util/periodic_runner_impl_test.cpp
+++ b/src/mongo/util/periodic_runner_impl_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/periodic_runner_impl.h"
#include "mongo/db/service_context_test_fixture.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/clock_source_mock.h"
namespace mongo {
@@ -75,14 +75,14 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -96,7 +96,7 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
for (int i = 0; i < 10; i++) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&count, &i] { return count > i; });
}
}
@@ -108,14 +108,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobDoesNotRunWithoutStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -133,14 +133,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -152,7 +152,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
// Fast forward ten times, we should run all ten times.
for (int i = 0; i < 10; i++) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == i + 1; });
}
clockSource().advance(interval);
@@ -166,14 +166,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
bool isPaused = false;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// This will fail if pause does not work correctly.
ASSERT_FALSE(isPaused);
hasExecuted = true;
@@ -186,12 +186,12 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return hasExecuted; });
}
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
isPaused = true;
jobAnchor.pause();
}
@@ -211,13 +211,13 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -228,7 +228,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == 1; });
}
@@ -242,7 +242,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Wait for count to increment due to job execution.
cv.wait(lk, [&] { return count == i + 1; });
}
@@ -264,7 +264,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
// Wait for count to increase. Test will hang if resume() does not work correctly.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count > numIterationsBeforePause; });
}
@@ -277,14 +277,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
Milliseconds intervalA{5};
Milliseconds intervalB{10};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add two jobs, ensure they both run the proper number of times
PeriodicRunner::PeriodicJob jobA("job",
[&countA, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countA++;
}
cv.notify_all();
@@ -294,7 +294,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
PeriodicRunner::PeriodicJob jobB("job",
[&countB, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countB++;
}
cv.notify_all();
@@ -311,7 +311,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
for (int i = 0; i <= 10; i++) {
clockSource().advance(intervalA);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&countA, &countB, &i] { return (countA > i && countB >= i / 2); });
}
}
@@ -320,7 +320,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
}
TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::condition_variable doneCv;
bool a = false;
@@ -328,7 +328,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobA("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
a = true;
cv.notify_one();
@@ -339,7 +339,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobB("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
b = true;
cv.notify_one();
@@ -357,7 +357,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
clockSource().advance(Milliseconds(1));
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
doneCv.wait(lk, [&] { return a && b; });
ASSERT(a);
@@ -370,14 +370,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
size_t timesCalled = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
timesCalled++;
}
cv.notify_one();
@@ -388,7 +388,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled; });
}
@@ -397,7 +397,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// if we change the period to a longer duration, that doesn't trigger a run
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -405,7 +405,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// We actually changed the period
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -413,7 +413,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Now we hit the new cutoff
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 2ul; });
}
@@ -421,7 +421,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Haven't hit it
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 2ul);
}
@@ -430,7 +430,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// shortening triggers the period
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 3ul; });
}
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index 43cde512599..5040484b46b 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -34,8 +34,8 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/process_id.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index 05b39eff7db..0836bbb28c5 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -35,8 +35,8 @@
#include <numeric>
#include "mongo/db/operation_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/interruptible.h"
#include "mongo/util/scopeguard.h"
@@ -468,7 +468,7 @@ public:
//
// Leaves T unchanged if an interrupt exception is thrown while waiting for space
void push(T&& t, Interruptible* interruptible = Interruptible::notInterruptible()) {
- _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ _pushRunner([&](stdx::unique_lock<Latch>& lk) {
auto cost = _invokeCostFunc(t, lk);
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of item (" << cost
@@ -496,7 +496,7 @@ public:
void pushMany(StartIterator start,
EndIterator last,
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _pushRunner([&](stdx::unique_lock<Latch>& lk) {
size_t cost = 0;
for (auto iter = start; iter != last; ++iter) {
cost += _invokeCostFunc(*iter, lk);
@@ -521,12 +521,12 @@ public:
// Leaves T unchanged if it fails
bool tryPush(T&& t) {
return _pushRunner(
- [&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPush(lk, std::move(t)); });
+ [&](stdx::unique_lock<Latch>& lk) { return _tryPush(lk, std::move(t)); });
}
// Pops one T out of the queue
T pop(Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return _pop(lk);
});
@@ -538,7 +538,7 @@ public:
// Returns the popped values, along with the cost value of the items extracted
std::pair<std::deque<T>, size_t> popMany(
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return std::make_pair(std::exchange(_queue, {}), std::exchange(_current, 0));
});
@@ -554,7 +554,7 @@ public:
//
std::pair<std::deque<T>, size_t> popManyUpTo(
size_t budget, Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
if (_current <= budget) {
@@ -584,13 +584,13 @@ public:
// Attempts a non-blocking pop of a value
boost::optional<T> tryPop() {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPop(lk); });
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) { return _tryPop(lk); });
}
// Closes the producer end. Consumers will continue to consume until the queue is exhausted, at
// which time they will begin to throw with an interruption dbexception
void closeProducerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_producerEndClosed = true;
@@ -599,7 +599,7 @@ public:
// Closes the consumer end. This causes all callers to throw with an interruption dbexception
void closeConsumerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_consumerEndClosed = true;
_producerEndClosed = true;
@@ -608,7 +608,7 @@ public:
}
Stats getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
stats.queueDepth = _current;
stats.waitingConsumers = _consumers;
@@ -804,7 +804,7 @@ private:
template <typename Callback>
auto _pushRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkProducerClosed(lk);
@@ -815,7 +815,7 @@ private:
template <typename Callback>
auto _popRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkConsumerClosed(lk);
@@ -866,9 +866,7 @@ private:
return t;
}
- void _waitForSpace(stdx::unique_lock<stdx::mutex>& lk,
- size_t cost,
- Interruptible* interruptible) {
+ void _waitForSpace(stdx::unique_lock<Latch>& lk, size_t cost, Interruptible* interruptible) {
// We do some pre-flight checks to avoid creating a cv if we don't need one
_checkProducerClosed(lk);
@@ -885,7 +883,7 @@ private:
});
}
- void _waitForNonEmpty(stdx::unique_lock<stdx::mutex>& lk, Interruptible* interruptible) {
+ void _waitForNonEmpty(stdx::unique_lock<Latch>& lk, Interruptible* interruptible) {
typename Consumers::Waiter waiter(_consumers);
interruptible->waitForConditionOrInterrupt(_consumers.cv(), lk, [&] {
@@ -894,7 +892,7 @@ private:
});
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ProducerConsumerQueue::_mutex");
Options _options;
diff --git a/src/mongo/util/producer_consumer_queue_test.cpp b/src/mongo/util/producer_consumer_queue_test.cpp
index ba39482d0d0..5ba6a4d43de 100644
--- a/src/mongo/util/producer_consumer_queue_test.cpp
+++ b/src/mongo/util/producer_consumer_queue_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/producer_consumer_queue.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
@@ -622,7 +622,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(popManyUpToPopWithBlockingWithSpecialCost,
PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false, true>) {
typename Helper::template ProducerConsumerQueue<MoveOnly> pcq{};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t successes = 0;
size_t failures = 0;
@@ -632,10 +632,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false,
{
try {
pcq.pop(opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
successes++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueConsumed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failures++;
}
}
@@ -665,7 +665,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t success = 0;
size_t failure = 0;
@@ -675,10 +675,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
{
try {
pcq.push(MoveOnly(1), opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
success++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueEndClosed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failure++;
}
}
@@ -688,7 +688,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.pop();
while (true) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (success == 1)
break;
stdx::this_thread::yield();
@@ -744,7 +744,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool failed = false;
OperationContext* threadBopCtx = nullptr;
@@ -757,7 +757,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
auto threadB = helper.runThread("ProducerB", [&](OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
threadBopCtx = opCtx;
}
@@ -773,7 +773,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
};
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT(threadBopCtx != nullptr);
}
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 26d2e19f092..86a3b22002f 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -33,10 +33,10 @@
#include <limits>
#include <queue>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -61,12 +61,12 @@ public:
BlockingQueue(size_t size, GetSizeFn f) : _maxSize(size), _getSize(f) {}
void pushEvenIfFull(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
pushImpl_inlock(t, _getSize(t));
}
void push(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
size_t tSize = _getSize(t);
_waitForSpace_inlock(tSize, lk);
@@ -89,7 +89,7 @@ public:
return;
}
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
const auto startedEmpty = _queue.empty();
_clearing = false;
@@ -111,12 +111,12 @@ public:
* NOTE: Should only be used in a single producer case.
*/
void waitForSpace(size_t size) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_waitForSpace_inlock(size, lk);
}
bool empty() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.empty();
}
@@ -124,7 +124,7 @@ public:
* The size as measured by the size function. Default to counting each item
*/
size_t size() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _currentSize;
}
@@ -139,12 +139,12 @@ public:
* The number/count of items in the queue ( _queue.size() )
*/
size_t count() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.size();
}
void clear() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_clearing = true;
_queue = std::queue<T>();
_currentSize = 0;
@@ -153,7 +153,7 @@ public:
}
bool tryPop(T& t) {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
if (_queue.empty())
return false;
@@ -166,7 +166,7 @@ public:
}
T blockingPop() {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing)
_cvNoLongerEmpty.wait(lk);
@@ -191,7 +191,7 @@ public:
bool blockingPop(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -213,7 +213,7 @@ public:
bool blockingPeek(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -229,7 +229,7 @@ public:
// Obviously, this should only be used when you have
// only one consumer
bool peek(T& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return false;
}
@@ -242,7 +242,7 @@ public:
* Returns the item most recently added to the queue or nothing if the queue is empty.
*/
boost::optional<T> lastObjectPushed() const {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return {};
}
@@ -254,7 +254,7 @@ private:
/**
* Returns when enough space is available.
*/
- void _waitForSpace_inlock(size_t size, stdx::unique_lock<stdx::mutex>& lk) {
+ void _waitForSpace_inlock(size_t size, stdx::unique_lock<Latch>& lk) {
while (_currentSize + size > _maxSize) {
_cvNoLongerFull.wait(lk);
}
@@ -268,7 +268,7 @@ private:
_cvNoLongerEmpty.notify_one();
}
- mutable stdx::mutex _lock;
+ mutable Mutex _lock = MONGO_MAKE_LATCH("BlockingQueue::_lock");
std::queue<T> _queue;
const size_t _maxSize;
size_t _currentSize = 0;
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index 90b758d40fc..1ddbd765617 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -156,12 +156,12 @@ public:
}
private:
- static stdx::mutex _streamMutex;
+ static stdx::mutex _streamMutex; // NOLINT
static thread_local int terminateDepth;
stdx::unique_lock<stdx::mutex> _lk;
};
-stdx::mutex MallocFreeOStreamGuard::_streamMutex;
+stdx::mutex MallocFreeOStreamGuard::_streamMutex; // NOLINT
thread_local int MallocFreeOStreamGuard::terminateDepth = 0;
// must hold MallocFreeOStreamGuard to call
diff --git a/src/mongo/util/stacktrace_windows.cpp b/src/mongo/util/stacktrace_windows.cpp
index 6576300f7ba..5b299457884 100644
--- a/src/mongo/util/stacktrace_windows.cpp
+++ b/src/mongo/util/stacktrace_windows.cpp
@@ -122,7 +122,7 @@ public:
private:
boost::optional<HANDLE> _processHandle;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
DWORD _origOptions;
};
diff --git a/src/mongo/util/synchronized_value.h b/src/mongo/util/synchronized_value.h
index a49585b9426..28033cd7bf8 100644
--- a/src/mongo/util/synchronized_value.h
+++ b/src/mongo/util/synchronized_value.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -46,7 +46,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit update_guard(T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit update_guard(T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~update_guard() = default;
// Only move construction is permitted so that synchronized_value may return update_guard
@@ -81,7 +81,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
T& _value;
@@ -96,7 +96,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit const_update_guard(const T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit const_update_guard(const T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~const_update_guard() = default;
// Only move construction is permitted so that synchronized_value may return const_update_guard
@@ -121,7 +121,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
const T& _value;
@@ -156,7 +156,7 @@ public:
// Support assigning from the contained value
synchronized_value& operator=(const T& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = value;
}
return *this;
@@ -164,7 +164,7 @@ public:
synchronized_value& operator=(T&& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = std::move(value);
}
return *this;
@@ -174,7 +174,7 @@ public:
* Return a copy of the protected object.
*/
T get() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _value;
}
@@ -204,26 +204,26 @@ public:
bool operator==(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value == rhs._value;
}
bool operator!=(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value != rhs._value;
}
bool operator==(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value == rhs;
}
bool operator!=(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value != rhs;
}
@@ -250,12 +250,12 @@ private:
T _value;
// Mutex to guard value
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("synchronized_value::_mutex");
};
template <class T>
bool operator==(const synchronized_value<T>& lhs, const T& rhs) {
- stdx::lock_guard<stdx::mutex> lock(lhs._mutex);
+ stdx::lock_guard<Latch> lock(lhs._mutex);
return lhs._value == rhs;
}
@@ -267,7 +267,7 @@ bool operator!=(const synchronized_value<T>& lhs, const T& rhs) {
template <class T>
bool operator==(const T& lhs, const synchronized_value<T>& rhs) {
- stdx::lock_guard<stdx::mutex> lock(rhs._mutex);
+ stdx::lock_guard<Latch> lock(rhs._mutex);
return lhs == rhs._value;
}
@@ -281,8 +281,8 @@ template <class T>
bool operator==(const synchronized_value<T>& lhs, const synchronized_value<T>& rhs) {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(lhs._mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(lhs._mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(lhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return lhs._value == rhs._value;
}
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
index 3639d41efd3..9511a735f5c 100644
--- a/src/mongo/util/time_support.h
+++ b/src/mongo/util/time_support.h
@@ -36,8 +36,8 @@
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/duration.h"
namespace mongo {
diff --git a/src/mongo/util/uuid.cpp b/src/mongo/util/uuid.cpp
index 66835454a0c..d729777cf30 100644
--- a/src/mongo/util/uuid.cpp
+++ b/src/mongo/util/uuid.cpp
@@ -34,15 +34,15 @@
#include "mongo/util/uuid.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/hex.h"
namespace mongo {
namespace {
-stdx::mutex uuidGenMutex;
+Mutex uuidGenMutex;
auto uuidGen = SecureRandom::create();
// Regex to match valid version 4 UUIDs with variant bits set
@@ -100,7 +100,7 @@ UUID UUID::gen() {
int64_t randomWords[2];
{
- stdx::lock_guard<stdx::mutex> lk(uuidGenMutex);
+ stdx::lock_guard<Latch> lk(uuidGenMutex);
// Generate 128 random bits
randomWords[0] = uuidGen->nextInt64();
diff --git a/src/mongo/watchdog/watchdog.cpp b/src/mongo/watchdog/watchdog.cpp
index 809403ceb51..3dff62a1086 100644
--- a/src/mongo/watchdog/watchdog.cpp
+++ b/src/mongo/watchdog/watchdog.cpp
@@ -61,7 +61,7 @@ WatchdogPeriodicThread::WatchdogPeriodicThread(Milliseconds period, StringData t
void WatchdogPeriodicThread::start() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -76,7 +76,7 @@ void WatchdogPeriodicThread::shutdown() {
stdx::thread thread;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -101,7 +101,7 @@ void WatchdogPeriodicThread::shutdown() {
}
void WatchdogPeriodicThread::setPeriod(Milliseconds period) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool wasEnabled = _enabled;
@@ -130,7 +130,7 @@ void WatchdogPeriodicThread::doLoop() {
auto preciseClockSource = client->getServiceContext()->getPreciseClockSource();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Ensure state is starting from a clean slate.
resetState();
@@ -144,7 +144,7 @@ void WatchdogPeriodicThread::doLoop() {
Date_t startTime = preciseClockSource->now();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
@@ -257,7 +257,7 @@ void WatchdogMonitor::start() {
_watchdogMonitorThread.start();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -266,7 +266,7 @@ void WatchdogMonitor::start() {
void WatchdogMonitor::setPeriod(Milliseconds duration) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (duration > Milliseconds(0)) {
dassert(duration >= Milliseconds(1));
@@ -290,7 +290,7 @@ void WatchdogMonitor::setPeriod(Milliseconds duration) {
void WatchdogMonitor::shutdown() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
diff --git a/src/mongo/watchdog/watchdog.h b/src/mongo/watchdog/watchdog.h
index fe0060e3534..d8c3bf643f6 100644
--- a/src/mongo/watchdog/watchdog.h
+++ b/src/mongo/watchdog/watchdog.h
@@ -34,9 +34,9 @@
#include <vector>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/functional.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/duration.h"
@@ -204,7 +204,7 @@ private:
stdx::thread _thread;
// Lock to protect _state and control _thread
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WatchdogPeriodicThread::_mutex");
stdx::condition_variable _condvar;
};
@@ -367,7 +367,7 @@ private:
};
// Lock to protect _state and control _thread
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WatchdogMonitor::_mutex");
// State of watchdog
State _state{State::kNotStarted};
diff --git a/src/mongo/watchdog/watchdog_test.cpp b/src/mongo/watchdog/watchdog_test.cpp
index e8e822407d2..3c0ce54fd07 100644
--- a/src/mongo/watchdog/watchdog_test.cpp
+++ b/src/mongo/watchdog/watchdog_test.cpp
@@ -53,7 +53,7 @@ public:
void run(OperationContext* opCtx) final {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
++_counter;
}
@@ -69,7 +69,7 @@ public:
void waitForCount() {
invariant(_wait != 0);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_counter < _wait) {
_condvar.wait(lock);
}
@@ -79,7 +79,7 @@ public:
std::uint32_t getCounter() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _counter;
}
}
@@ -87,7 +87,7 @@ public:
private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TestPeriodicThread::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -197,7 +197,7 @@ class TestCounterCheck : public WatchdogCheck {
public:
void run(OperationContext* opCtx) final {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
++_counter;
}
@@ -217,7 +217,7 @@ public:
void waitForCount() {
invariant(_wait != 0);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_counter < _wait) {
_condvar.wait(lock);
}
@@ -225,7 +225,7 @@ public:
std::uint32_t getCounter() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _counter;
}
}
@@ -233,7 +233,7 @@ public:
private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TestCounterCheck::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -273,14 +273,14 @@ TEST_F(WatchdogCheckThreadTest, Basic) {
class ManualResetEvent {
public:
void set() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_set = true;
_condvar.notify_one();
}
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.wait(lock, [this]() { return _set; });
}
@@ -288,7 +288,7 @@ public:
private:
bool _set{false};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ManualResetEvent::_mutex");
stdx::condition_variable _condvar;
};