summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@mongodb.com>2019-09-17 23:22:19 +0000
committerevergreen <evergreen@mongodb.com>2019-09-17 23:22:19 +0000
commitbc11369435ca51e2ff6897433d00f6b909f6a25f (patch)
tree251653ec8285d798b41846e343e7e414e80ff277
parent45aea2495306dd61fab46bd398735bb6aaf7b53a (diff)
downloadmongo-bc11369435ca51e2ff6897433d00f6b909f6a25f.tar.gz
SERVER-42165 Replace uses of stdx::mutex with mongo::Mutex
-rwxr-xr-xbuildscripts/cpplint.py8
-rw-r--r--jstests/core/currentop_waiting_for_latch.js3
-rw-r--r--src/mongo/base/secure_allocator.cpp4
-rw-r--r--src/mongo/client/authenticate.cpp14
-rw-r--r--src/mongo/client/connection_pool.cpp10
-rw-r--r--src/mongo/client/connection_pool.h4
-rw-r--r--src/mongo/client/connection_string.h8
-rw-r--r--src/mongo/client/connection_string_connect.cpp4
-rw-r--r--src/mongo/client/connpool.cpp32
-rw-r--r--src/mongo/client/connpool.h5
-rw-r--r--src/mongo/client/connpool_integration_test.cpp24
-rw-r--r--src/mongo/client/dbclient_base.cpp2
-rw-r--r--src/mongo/client/dbclient_connection.cpp8
-rw-r--r--src/mongo/client/dbclient_connection.h4
-rw-r--r--src/mongo/client/fetcher.cpp20
-rw-r--r--src/mongo/client/fetcher.h6
-rw-r--r--src/mongo/client/mongo_uri.h2
-rw-r--r--src/mongo/client/remote_command_retry_scheduler.cpp16
-rw-r--r--src/mongo/client/remote_command_retry_scheduler.h6
-rw-r--r--src/mongo/client/remote_command_targeter_mock.cpp6
-rw-r--r--src/mongo/client/remote_command_targeter_mock.h2
-rw-r--r--src/mongo/client/replica_set_change_notifier.cpp8
-rw-r--r--src/mongo/client/replica_set_change_notifier.h4
-rw-r--r--src/mongo/client/replica_set_monitor.cpp26
-rw-r--r--src/mongo/client/replica_set_monitor_internal.h7
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp16
-rw-r--r--src/mongo/client/replica_set_monitor_manager.h4
-rw-r--r--src/mongo/client/scram_client_cache.h8
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp2
-rw-r--r--src/mongo/db/auth/authorization_manager.h4
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp20
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.h8
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp12
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h4
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp8
-rw-r--r--src/mongo/db/background.cpp34
-rw-r--r--src/mongo/db/baton.cpp10
-rw-r--r--src/mongo/db/catalog/collection.cpp8
-rw-r--r--src/mongo/db/catalog/collection.h6
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp46
-rw-r--r--src/mongo/db/catalog/collection_catalog.h6
-rw-r--r--src/mongo/db/catalog/index_builds_manager.cpp10
-rw-r--r--src/mongo/db/catalog/index_builds_manager.h4
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp8
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.h5
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp6
-rw-r--r--src/mongo/db/catalog/multi_index_block.h4
-rw-r--r--src/mongo/db/catalog/util/partitioned.h2
-rw-r--r--src/mongo/db/collection_index_builds_tracker.cpp3
-rw-r--r--src/mongo/db/collection_index_builds_tracker.h4
-rw-r--r--src/mongo/db/commands/dbhash.cpp2
-rw-r--r--src/mongo/db/commands/fsync.cpp30
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/parameters.cpp6
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp6
-rw-r--r--src/mongo/db/commands/validate.cpp6
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp8
-rw-r--r--src/mongo/db/concurrency/d_concurrency_bm.cpp6
-rw-r--r--src/mongo/db/concurrency/deferred_writer.cpp4
-rw-r--r--src/mongo/db/concurrency/deferred_writer.h4
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.cpp6
-rw-r--r--src/mongo/db/concurrency/flow_control_ticketholder.h6
-rw-r--r--src/mongo/db/concurrency/lock_manager.h4
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp6
-rw-r--r--src/mongo/db/concurrency/lock_state.h2
-rw-r--r--src/mongo/db/curop.cpp5
-rw-r--r--src/mongo/db/database_index_builds_tracker.cpp2
-rw-r--r--src/mongo/db/database_index_builds_tracker.h4
-rw-r--r--src/mongo/db/default_baton.cpp8
-rw-r--r--src/mongo/db/default_baton.h6
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.cpp18
-rw-r--r--src/mongo/db/free_mon/free_mon_controller.h2
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp22
-rw-r--r--src/mongo/db/free_mon/free_mon_message.h10
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.h8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.cpp8
-rw-r--r--src/mongo/db/free_mon/free_mon_queue.h2
-rw-r--r--src/mongo/db/ftdc/controller.cpp34
-rw-r--r--src/mongo/db/ftdc/controller.h6
-rw-r--r--src/mongo/db/ftdc/controller_test.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp4
-rw-r--r--src/mongo/db/index/index_build_interceptor.h3
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp44
-rw-r--r--src/mongo/db/index_builds_coordinator.h6
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp6
-rw-r--r--src/mongo/db/keys_collection_cache.cpp10
-rw-r--r--src/mongo/db/keys_collection_cache.h4
-rw-r--r--src/mongo/db/keys_collection_manager.cpp18
-rw-r--r--src/mongo/db/keys_collection_manager.h5
-rw-r--r--src/mongo/db/logical_clock.cpp12
-rw-r--r--src/mongo/db/logical_clock.h4
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp34
-rw-r--r--src/mongo/db/logical_session_cache_impl.h2
-rw-r--r--src/mongo/db/logical_time_validator.cpp22
-rw-r--r--src/mongo/db/logical_time_validator.h7
-rw-r--r--src/mongo/db/operation_context.cpp2
-rw-r--r--src/mongo/db/operation_context.h4
-rw-r--r--src/mongo/db/operation_context_group.cpp10
-rw-r--r--src/mongo/db/operation_context_group.h4
-rw-r--r--src/mongo/db/operation_context_test.cpp54
-rw-r--r--src/mongo/db/operation_time_tracker.cpp6
-rw-r--r--src/mongo/db/operation_time_tracker.h4
-rw-r--r--src/mongo/db/periodic_runner_job_abort_expired_transactions.h4
-rw-r--r--src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h5
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.cpp10
-rw-r--r--src/mongo/db/pipeline/document_source_exchange.h8
-rw-r--r--src/mongo/db/pipeline/document_source_exchange_test.cpp9
-rw-r--r--src/mongo/db/query/plan_cache.cpp20
-rw-r--r--src/mongo/db/query/plan_cache.h4
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp2
-rw-r--r--src/mongo/db/query/query_settings.cpp10
-rw-r--r--src/mongo/db/query/query_settings.h4
-rw-r--r--src/mongo/db/read_concern_mongod.cpp6
-rw-r--r--src/mongo/db/repl/abstract_async_component.cpp18
-rw-r--r--src/mongo/db/repl/abstract_async_component.h10
-rw-r--r--src/mongo/db/repl/abstract_async_component_test.cpp14
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.cpp20
-rw-r--r--src/mongo/db/repl/abstract_oplog_fetcher.h6
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h6
-rw-r--r--src/mongo/db/repl/bgsync.cpp34
-rw-r--r--src/mongo/db/repl/bgsync.h6
-rw-r--r--src/mongo/db/repl/callback_completion_guard.h10
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp6
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp34
-rw-r--r--src/mongo/db/repl/collection_cloner.h8
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp18
-rw-r--r--src/mongo/db/repl/database_cloner.cpp8
-rw-r--r--src/mongo/db/repl/database_cloner.h8
-rw-r--r--src/mongo/db/repl/databases_cloner.cpp4
-rw-r--r--src/mongo/db/repl/databases_cloner.h10
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp10
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp12
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.h6
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp58
-rw-r--r--src/mongo/db/repl/initial_syncer.h12
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp10
-rw-r--r--src/mongo/db/repl/local_oplog_info.cpp4
-rw-r--r--src/mongo/db/repl/local_oplog_info.h2
-rw-r--r--src/mongo/db/repl/multiapplier.cpp14
-rw-r--r--src/mongo/db/repl/multiapplier.h6
-rw-r--r--src/mongo/db/repl/noop_writer.cpp10
-rw-r--r--src/mongo/db/repl/noop_writer.h4
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp4
-rw-r--r--src/mongo/db/repl/oplog_applier.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.cpp30
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection.h4
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.cpp22
-rw-r--r--src/mongo/db/repl/oplog_buffer_proxy.h6
-rw-r--r--src/mongo/db/repl/oplog_test.cpp22
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.cpp26
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_mock.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp18
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h9
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h7
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp237
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp10
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp26
-rw-r--r--src/mongo/db/repl/replication_metrics.cpp66
-rw-r--r--src/mongo/db/repl/replication_metrics.h4
-rw-r--r--src/mongo/db/repl/replication_process.cpp8
-rw-r--r--src/mongo/db/repl/replication_process.h4
-rw-r--r--src/mongo/db/repl/replication_recovery_test.cpp18
-rw-r--r--src/mongo/db/repl/reporter.cpp26
-rw-r--r--src/mongo/db/repl/reporter.h6
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp7
-rw-r--r--src/mongo/db/repl/rollback_checker.h5
-rw-r--r--src/mongo/db/repl/rollback_checker_test.cpp6
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.h2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.h18
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp2
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.h4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_interface_mock.cpp14
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h4
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp12
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h6
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp16
-rw-r--r--src/mongo/db/repl/sync_source_resolver.h6
-rw-r--r--src/mongo/db/repl/sync_tail.cpp18
-rw-r--r--src/mongo/db/repl/sync_tail.h4
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp8
-rw-r--r--src/mongo/db/repl/task_runner.cpp14
-rw-r--r--src/mongo/db/repl/task_runner.h6
-rw-r--r--src/mongo/db/repl/task_runner_test.cpp56
-rw-r--r--src/mongo/db/repl/topology_coordinator_v1_test.cpp1
-rw-r--r--src/mongo/db/repl_index_build_state.h4
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp12
-rw-r--r--src/mongo/db/s/active_migrations_registry.h4
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.cpp6
-rw-r--r--src/mongo/db/s/active_move_primaries_registry.h2
-rw-r--r--src/mongo/db/s/active_rename_collection_registry.cpp4
-rw-r--r--src/mongo/db/s/active_rename_collection_registry.h2
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp6
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp30
-rw-r--r--src/mongo/db/s/balancer/balancer.h6
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp22
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h6
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp6
-rw-r--r--src/mongo/db/s/chunk_splitter.h2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp10
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state_factory_shard.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.cpp4
-rw-r--r--src/mongo/db/s/config/namespace_serializer.h6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h4
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/implicit_create_collection.cpp16
-rw-r--r--src/mongo/db/s/metadata_manager.cpp30
-rw-r--r--src/mongo/db/s/metadata_manager.h2
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp28
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp28
-rw-r--r--src/mongo/db/s/migration_destination_manager.h6
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.cpp8
-rw-r--r--src/mongo/db/s/namespace_metadata_change_notifications.h4
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp18
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.h6
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp22
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.h7
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp48
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.h8
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.h3
-rw-r--r--src/mongo/db/s/sharding_state.cpp10
-rw-r--r--src/mongo/db/s/sharding_state.h4
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp26
-rw-r--r--src/mongo/db/s/transaction_coordinator.h2
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.cpp20
-rw-r--r--src/mongo/db/s/transaction_coordinator_catalog.h6
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.cpp16
-rw-r--r--src/mongo/db/s/transaction_coordinator_futures_util.h14
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp8
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.h2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.cpp2
-rw-r--r--src/mongo/db/s/wait_for_majority_service.h4
-rw-r--r--src/mongo/db/s/wait_for_majority_service_test.cpp10
-rw-r--r--src/mongo/db/server_recovery.cpp6
-rw-r--r--src/mongo/db/server_recovery.h4
-rw-r--r--src/mongo/db/service_context.cpp15
-rw-r--r--src/mongo/db/service_context.h10
-rw-r--r--src/mongo/db/service_context_test_fixture.cpp1
-rw-r--r--src/mongo/db/service_liaison_mock.cpp16
-rw-r--r--src/mongo/db/service_liaison_mock.h6
-rw-r--r--src/mongo/db/service_liaison_mongod.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongod.h2
-rw-r--r--src/mongo/db/service_liaison_mongos.cpp2
-rw-r--r--src/mongo/db/service_liaison_mongos.h2
-rw-r--r--src/mongo/db/session_catalog.cpp18
-rw-r--r--src/mongo/db/session_catalog.h6
-rw-r--r--src/mongo/db/session_catalog_test.cpp4
-rw-r--r--src/mongo/db/session_killer.cpp8
-rw-r--r--src/mongo/db/session_killer.h8
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp2
-rw-r--r--src/mongo/db/sessions_collection_config_server.h4
-rw-r--r--src/mongo/db/sessions_collection_mock.cpp12
-rw-r--r--src/mongo/db/sessions_collection_mock.h4
-rw-r--r--src/mongo/db/sessions_collection_rs.h4
-rw-r--r--src/mongo/db/snapshot_window_util.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.cpp8
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.h2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.cpp2
-rw-r--r--src/mongo/db/storage/biggie/biggie_kv_engine.h4
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.cpp8
-rw-r--r--src/mongo/db/storage/biggie/biggie_record_store.h7
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.cpp12
-rw-r--r--src/mongo/db/storage/biggie/biggie_visibility_manager.h5
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp20
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp16
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h7
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h2
-rw-r--r--src/mongo/db/storage/flow_control.cpp8
-rw-r--r--src/mongo/db/storage/flow_control.h4
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp12
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h4
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_prefix.h4
-rw-r--r--src/mongo/db/storage/kv/storage_engine_test.cpp12
-rw-r--r--src/mongo/db/storage/mobile/mobile_kv_engine.h6
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.cpp16
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.h4
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.cpp8
-rw-r--r--src/mongo/db/storage/mobile/mobile_session_pool.h6
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp10
-rw-r--r--src/mongo/db/storage/storage_engine_impl.h4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp48
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h9
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp36
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp26
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp14
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h8
-rw-r--r--src/mongo/db/time_proof_service.cpp4
-rw-r--r--src/mongo/db/time_proof_service.h4
-rw-r--r--src/mongo/db/traffic_recorder.cpp22
-rw-r--r--src/mongo/db/traffic_recorder.h4
-rw-r--r--src/mongo/db/ttl_collection_cache.cpp6
-rw-r--r--src/mongo/db/ttl_collection_cache.h4
-rw-r--r--src/mongo/db/views/view_catalog.cpp18
-rw-r--r--src/mongo/db/views/view_catalog.h4
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp6
-rw-r--r--src/mongo/dbtests/framework.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.cpp8
-rw-r--r--src/mongo/dbtests/mock/mock_conn_registry.h2
-rw-r--r--src/mongo/dbtests/threadedtests.cpp6
-rw-r--r--src/mongo/embedded/index_builds_coordinator_embedded.cpp2
-rw-r--r--src/mongo/embedded/periodic_runner_embedded.cpp24
-rw-r--r--src/mongo/embedded/periodic_runner_embedded.h6
-rw-r--r--src/mongo/executor/async_multicaster.cpp12
-rw-r--r--src/mongo/executor/async_multicaster.h2
-rw-r--r--src/mongo/executor/async_timer_mock.cpp12
-rw-r--r--src/mongo/executor/async_timer_mock.h4
-rw-r--r--src/mongo/executor/connection_pool.cpp2
-rw-r--r--src/mongo/executor/connection_pool.h4
-rw-r--r--src/mongo/executor/connection_pool_tl.cpp6
-rw-r--r--src/mongo/executor/connection_pool_tl.h2
-rw-r--r--src/mongo/executor/egress_tag_closer_manager.cpp10
-rw-r--r--src/mongo/executor/egress_tag_closer_manager.h4
-rw-r--r--src/mongo/executor/network_interface_integration_test.cpp8
-rw-r--r--src/mongo/executor/network_interface_mock.cpp58
-rw-r--r--src/mongo/executor/network_interface_mock.h10
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp6
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp18
-rw-r--r--src/mongo/executor/network_interface_thread_pool.h10
-rw-r--r--src/mongo/executor/network_interface_tl.cpp28
-rw-r--r--src/mongo/executor/network_interface_tl.h4
-rw-r--r--src/mongo/executor/scoped_task_executor.cpp4
-rw-r--r--src/mongo/executor/scoped_task_executor.h4
-rw-r--r--src/mongo/executor/task_executor.h2
-rw-r--r--src/mongo/executor/thread_pool_mock.cpp18
-rw-r--r--src/mongo/executor/thread_pool_mock.h10
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp50
-rw-r--r--src/mongo/executor/thread_pool_task_executor.h16
-rw-r--r--src/mongo/idl/mutable_observer_registry.h4
-rw-r--r--src/mongo/logger/console.cpp2
-rw-r--r--src/mongo/logger/console.h2
-rw-r--r--src/mongo/logger/log_component_settings.cpp4
-rw-r--r--src/mongo/logger/log_component_settings.h4
-rw-r--r--src/mongo/logger/log_severity_limiter.h6
-rw-r--r--src/mongo/logger/ramlog.cpp4
-rw-r--r--src/mongo/logger/ramlog.h4
-rw-r--r--src/mongo/logger/rotatable_file_writer.h6
-rw-r--r--src/mongo/logv2/console.h2
-rw-r--r--src/mongo/logv2/log_component_settings.cpp4
-rw-r--r--src/mongo/logv2/log_component_settings.h4
-rw-r--r--src/mongo/logv2/logv2_bm.cpp8
-rw-r--r--src/mongo/logv2/ramlog.cpp6
-rw-r--r--src/mongo/logv2/ramlog.h5
-rw-r--r--src/mongo/platform/condition_variable.cpp2
-rw-r--r--src/mongo/platform/condition_variable.h14
-rw-r--r--src/mongo/platform/condition_variable_test.cpp6
-rw-r--r--src/mongo/platform/mutex.cpp18
-rw-r--r--src/mongo/platform/mutex.h66
-rw-r--r--src/mongo/s/balancer_configuration.cpp12
-rw-r--r--src/mongo/s/balancer_configuration.h5
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp40
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h4
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp12
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.h6
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp46
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h4
-rw-r--r--src/mongo/s/catalog_cache.cpp30
-rw-r--r--src/mongo/s/catalog_cache.h4
-rw-r--r--src/mongo/s/chunk_writes_tracker.cpp2
-rw-r--r--src/mongo/s/chunk_writes_tracker.h4
-rw-r--r--src/mongo/s/client/rs_local_client.cpp4
-rw-r--r--src/mongo/s/client/rs_local_client.h4
-rw-r--r--src/mongo/s/client/shard_connection.cpp8
-rw-r--r--src/mongo/s/client/shard_registry.cpp30
-rw-r--r--src/mongo/s/client/shard_registry.h8
-rw-r--r--src/mongo/s/client/shard_remote.cpp4
-rw-r--r--src/mongo/s/client/shard_remote.h5
-rw-r--r--src/mongo/s/client/version_manager.cpp10
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp6
-rw-r--r--src/mongo/s/cluster_identity_loader.h6
-rw-r--r--src/mongo/s/cluster_last_error_info.cpp8
-rw-r--r--src/mongo/s/cluster_last_error_info.h6
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.h2
-rw-r--r--src/mongo/s/grid.cpp8
-rw-r--r--src/mongo/s/grid.h4
-rw-r--r--src/mongo/s/query/async_results_merger.cpp26
-rw-r--r--src/mongo/s/query/async_results_merger.h4
-rw-r--r--src/mongo/s/query/blocking_results_merger_test.cpp6
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp30
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h8
-rw-r--r--src/mongo/s/query/establish_cursors.h2
-rw-r--r--src/mongo/s/router_transactions_metrics.cpp4
-rw-r--r--src/mongo/s/router_transactions_metrics.h2
-rw-r--r--src/mongo/s/sharding_task_executor.h4
-rw-r--r--src/mongo/s/sharding_task_executor_pool_controller.h4
-rw-r--r--src/mongo/scripting/deadline_monitor.h17
-rw-r--r--src/mongo/scripting/deadline_monitor_test.cpp6
-rw-r--r--src/mongo/scripting/engine.cpp8
-rw-r--r--src/mongo/scripting/mozjs/PosixNSPR.cpp16
-rw-r--r--src/mongo/scripting/mozjs/countdownlatch.cpp34
-rw-r--r--src/mongo/scripting/mozjs/engine.cpp8
-rw-r--r--src/mongo/scripting/mozjs/engine.h4
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp16
-rw-r--r--src/mongo/scripting/mozjs/implscope.h2
-rw-r--r--src/mongo/scripting/mozjs/jsthread.cpp10
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp6
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.h6
-rw-r--r--src/mongo/shell/bench.cpp16
-rw-r--r--src/mongo/shell/bench.h8
-rw-r--r--src/mongo/shell/dbshell.cpp2
-rw-r--r--src/mongo/shell/shell_utils.cpp8
-rw-r--r--src/mongo/shell/shell_utils.h6
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp10
-rw-r--r--src/mongo/shell/shell_utils_launcher.h2
-rw-r--r--src/mongo/stdx/condition_variable.h2
-rw-r--r--src/mongo/stdx/condition_variable_bm.cpp4
-rw-r--r--src/mongo/tools/bridge.cpp6
-rw-r--r--src/mongo/tools/bridge_commands.cpp16
-rw-r--r--src/mongo/tools/bridge_commands.h6
-rw-r--r--src/mongo/transport/baton_asio_linux.h24
-rw-r--r--src/mongo/transport/service_entry_point_impl.h6
-rw-r--r--src/mongo/transport/service_executor_adaptive.cpp22
-rw-r--r--src/mongo/transport/service_executor_adaptive.h16
-rw-r--r--src/mongo/transport/service_executor_adaptive_test.cpp36
-rw-r--r--src/mongo/transport/service_executor_reserved.cpp10
-rw-r--r--src/mongo/transport/service_executor_reserved.h6
-rw-r--r--src/mongo/transport/service_executor_synchronous.cpp2
-rw-r--r--src/mongo/transport/service_executor_synchronous.h6
-rw-r--r--src/mongo/transport/service_executor_test.cpp6
-rw-r--r--src/mongo/transport/service_state_machine.h2
-rw-r--r--src/mongo/transport/service_state_machine_test.cpp6
-rw-r--r--src/mongo/transport/session_asio.h6
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp12
-rw-r--r--src/mongo/transport/transport_layer_asio.h6
-rw-r--r--src/mongo/transport/transport_layer_asio_test.cpp22
-rw-r--r--src/mongo/transport/transport_layer_manager.cpp4
-rw-r--r--src/mongo/transport/transport_layer_manager.h6
-rw-r--r--src/mongo/unittest/barrier.h6
-rw-r--r--src/mongo/unittest/unittest.cpp4
-rw-r--r--src/mongo/util/alarm.cpp14
-rw-r--r--src/mongo/util/alarm.h6
-rw-r--r--src/mongo/util/alarm_runner_background_thread.cpp8
-rw-r--r--src/mongo/util/alarm_runner_background_thread.h4
-rw-r--r--src/mongo/util/background.cpp28
-rw-r--r--src/mongo/util/background_job_test.cpp6
-rw-r--r--src/mongo/util/background_thread_clock_source.h6
-rw-r--r--src/mongo/util/clock_source.cpp10
-rw-r--r--src/mongo/util/clock_source.h2
-rw-r--r--src/mongo/util/clock_source_mock.cpp1
-rw-r--r--src/mongo/util/clock_source_mock.h2
-rw-r--r--src/mongo/util/concurrency/notification.h16
-rw-r--r--src/mongo/util/concurrency/spin_lock.h4
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp22
-rw-r--r--src/mongo/util/concurrency/thread_pool.h10
-rw-r--r--src/mongo/util/concurrency/thread_pool_test.cpp20
-rw-r--r--src/mongo/util/concurrency/thread_pool_test_common.cpp10
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp12
-rw-r--r--src/mongo/util/concurrency/ticketholder.h8
-rw-r--r--src/mongo/util/concurrency/with_lock.h16
-rw-r--r--src/mongo/util/concurrency/with_lock_test.cpp14
-rw-r--r--src/mongo/util/diagnostic_info.cpp118
-rw-r--r--src/mongo/util/diagnostic_info.h52
-rw-r--r--src/mongo/util/diagnostic_info_test.cpp39
-rw-r--r--src/mongo/util/exit.cpp18
-rw-r--r--src/mongo/util/fail_point.cpp4
-rw-r--r--src/mongo/util/fail_point.h4
-rw-r--r--src/mongo/util/fail_point_test.cpp15
-rw-r--r--src/mongo/util/future_impl.h12
-rw-r--r--src/mongo/util/heap_profiler.cpp6
-rw-r--r--src/mongo/util/interruptible.h8
-rw-r--r--src/mongo/util/invalidating_lru_cache.h14
-rw-r--r--src/mongo/util/lockable_adapter_test.cpp6
-rw-r--r--src/mongo/util/net/http_client_curl.cpp8
-rw-r--r--src/mongo/util/net/ssl_manager_openssl.cpp11
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp6
-rw-r--r--src/mongo/util/periodic_runner.h2
-rw-r--r--src/mongo/util/periodic_runner_impl.cpp12
-rw-r--r--src/mongo/util/periodic_runner_impl.h6
-rw-r--r--src/mongo/util/periodic_runner_impl_test.cpp70
-rw-r--r--src/mongo/util/processinfo.h2
-rw-r--r--src/mongo/util/producer_consumer_queue.h36
-rw-r--r--src/mongo/util/producer_consumer_queue_test.cpp24
-rw-r--r--src/mongo/util/queue.h36
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp4
-rw-r--r--src/mongo/util/stacktrace_windows.cpp2
-rw-r--r--src/mongo/util/synchronized_value.h38
-rw-r--r--src/mongo/util/time_support.h2
-rw-r--r--src/mongo/util/uuid.cpp6
-rw-r--r--src/mongo/watchdog/watchdog.cpp16
-rw-r--r--src/mongo/watchdog/watchdog.h8
-rw-r--r--src/mongo/watchdog/watchdog_test.cpp22
502 files changed, 2818 insertions, 2741 deletions
diff --git a/buildscripts/cpplint.py b/buildscripts/cpplint.py
index ca7598ce9b7..68c2b893db7 100755
--- a/buildscripts/cpplint.py
+++ b/buildscripts/cpplint.py
@@ -1673,6 +1673,13 @@ def CheckForMongoVolatile(filename, clean_lines, linenum, error):
'Illegal use of the volatile storage keyword, use AtomicWord instead '
'from "mongo/platform/atomic_word.h"')
+def CheckForMongoMutex(filename, clean_lines, linenum, error):
+ line = clean_lines.elided[linenum]
+ if re.search('[ ({,]stdx?::mutex[ ({]', line):
+ error(filename, linenum, 'mongodb/stdxmutex', 5,
+ 'Illegal use of prohibited stdx::mutex, '
+ 'use mongo::Mutex from mongo/platform/mutex.h instead.')
+
def CheckForNonMongoAssert(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search(r'\bassert\s*\(', line):
@@ -5894,6 +5901,7 @@ def ProcessLine(filename, file_extension, clean_lines, line,
CheckForMongoPolyfill(filename, clean_lines, line, error)
CheckForMongoAtomic(filename, clean_lines, line, error)
CheckForMongoVolatile(filename, clean_lines, line, error)
+ CheckForMongoMutex(filename, clean_lines, line, error)
CheckForNonMongoAssert(filename, clean_lines, line, error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
diff --git a/jstests/core/currentop_waiting_for_latch.js b/jstests/core/currentop_waiting_for_latch.js
index 90b7e0fd478..383ba32a643 100644
--- a/jstests/core/currentop_waiting_for_latch.js
+++ b/jstests/core/currentop_waiting_for_latch.js
@@ -57,6 +57,8 @@ try {
assert(result.hasOwnProperty("waitingForLatch"));
assert(result["waitingForLatch"].hasOwnProperty("timestamp"));
assert(result["waitingForLatch"].hasOwnProperty("captureName"));
+
+ /* Absent until we have efficient enough backtracing
assert(result["waitingForLatch"].hasOwnProperty("backtrace"));
result["waitingForLatch"]["backtrace"].forEach(function(frame) {
assert(frame.hasOwnProperty("addr"));
@@ -64,6 +66,7 @@ try {
assert(frame.hasOwnProperty("path"));
assert(typeof frame["path"] === "string");
});
+ */
} finally {
assert.commandWorked(db.adminCommand(
{"configureFailPoint": 'currentOpSpawnsThreadWaitingForLatch', "mode": 'off'}));
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index 676827d1182..d342f0e1bbf 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -44,7 +44,7 @@
#endif
#include "mongo/base/init.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -113,7 +113,7 @@ void EnablePrivilege(const wchar_t* name) {
* size, and then raising the working set. This is the same reason that "i++" has race conditions
* across multiple threads.
*/
-stdx::mutex workingSizeMutex;
+stdx::mutex workingSizeMutex; // NOLINT
/**
* There is a minimum gap between the minimum working set size and maximum working set size.
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index f035312e4f7..e76f72035a3 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -42,9 +42,9 @@
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/sasl_command_constants.h"
#include "mongo/db/server_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/op_msg_rpc_impls.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/net/ssl_options.h"
@@ -186,13 +186,13 @@ Future<void> authenticateClient(const BSONObj& params,
AuthMongoCRHandler authMongoCR = authMongoCRImpl;
-static stdx::mutex internalAuthKeysMutex;
+static auto internalAuthKeysMutex = MONGO_MAKE_LATCH();
static bool internalAuthSet = false;
static std::vector<std::string> internalAuthKeys;
static BSONObj internalAuthParams;
void setInternalAuthKeys(const std::vector<std::string>& keys) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
internalAuthKeys = keys;
fassert(50996, internalAuthKeys.size() > 0);
@@ -200,24 +200,24 @@ void setInternalAuthKeys(const std::vector<std::string>& keys) {
}
void setInternalUserAuthParams(BSONObj obj) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
internalAuthParams = obj.getOwned();
internalAuthKeys.clear();
internalAuthSet = true;
}
bool hasMultipleInternalAuthKeys() {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
return internalAuthSet && internalAuthKeys.size() > 1;
}
bool isInternalAuthSet() {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
return internalAuthSet;
}
BSONObj getInternalAuthParams(size_t idx, const std::string& mechanism) {
- stdx::lock_guard<stdx::mutex> lk(internalAuthKeysMutex);
+ stdx::lock_guard<Latch> lk(internalAuthKeysMutex);
if (!internalAuthSet) {
return BSONObj();
}
diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp
index dfe098e1c9d..1dc6884f1fe 100644
--- a/src/mongo/client/connection_pool.cpp
+++ b/src/mongo/client/connection_pool.cpp
@@ -68,7 +68,7 @@ ConnectionPool::~ConnectionPool() {
}
void ConnectionPool::cleanUpOlderThan(Date_t now) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
HostConnectionMap::iterator hostConns = _connections.begin();
while (hostConns != _connections.end()) {
@@ -102,7 +102,7 @@ bool ConnectionPool::_shouldKeepConnection(Date_t now, const ConnectionInfo& con
}
void ConnectionPool::closeAllInUseConnections() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (ConnectionList::iterator iter = _inUseConnections.begin(); iter != _inUseConnections.end();
++iter) {
iter->conn->shutdownAndDisallowReconnect();
@@ -127,7 +127,7 @@ void ConnectionPool::_cleanUpStaleHosts_inlock(Date_t now) {
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
const HostAndPort& target, Date_t now, Milliseconds timeout) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Clean up connections on stale/unused hosts
_cleanUpStaleHosts_inlock(now);
@@ -218,7 +218,7 @@ ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
}
void ConnectionPool::releaseConnection(ConnectionList::iterator iter, const Date_t now) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_shouldKeepConnection(now, *iter)) {
_destroyConnection_inlock(&_inUseConnections, iter);
return;
@@ -232,7 +232,7 @@ void ConnectionPool::releaseConnection(ConnectionList::iterator iter, const Date
}
void ConnectionPool::destroyConnection(ConnectionList::iterator iter) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_destroyConnection_inlock(&_inUseConnections, iter);
}
diff --git a/src/mongo/client/connection_pool.h b/src/mongo/client/connection_pool.h
index 1bcada49dcd..6436bd9bc20 100644
--- a/src/mongo/client/connection_pool.h
+++ b/src/mongo/client/connection_pool.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/client/dbclient_connection.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -194,7 +194,7 @@ private:
const int _messagingPortTags;
// Mutex guarding members of the connection pool
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionPool::_mutex");
// Map from HostAndPort to idle connections.
HostConnectionMap _connections;
diff --git a/src/mongo/client/connection_string.h b/src/mongo/client/connection_string.h
index 29b77ecf9ab..493c916528f 100644
--- a/src/mongo/client/connection_string.h
+++ b/src/mongo/client/connection_string.h
@@ -37,7 +37,7 @@
#include "mongo/base/status_with.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
@@ -153,12 +153,12 @@ public:
};
static void setConnectionHook(ConnectionHook* hook) {
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
_connectHook = hook;
}
static ConnectionHook* getConnectionHook() {
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
return _connectHook;
}
@@ -190,7 +190,7 @@ private:
std::string _string;
std::string _setName;
- static stdx::mutex _connectHookMutex;
+ static Mutex _connectHookMutex;
static ConnectionHook* _connectHook;
};
diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp
index 14b3f8f08ff..c60770e2f3f 100644
--- a/src/mongo/client/connection_string_connect.cpp
+++ b/src/mongo/client/connection_string_connect.cpp
@@ -43,7 +43,7 @@
namespace mongo {
-stdx::mutex ConnectionString::_connectHookMutex;
+Mutex ConnectionString::_connectHookMutex = MONGO_MAKE_LATCH();
ConnectionString::ConnectionHook* ConnectionString::_connectHook = nullptr;
std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationName,
@@ -84,7 +84,7 @@ std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationNa
case CUSTOM: {
// Lock in case other things are modifying this at the same time
- stdx::lock_guard<stdx::mutex> lk(_connectHookMutex);
+ stdx::lock_guard<Latch> lk(_connectHookMutex);
// Allow the replacement of connections with other connections - useful for testing.
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 5901f695014..776f8bd6dbd 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -217,7 +217,7 @@ void PoolForHost::initializeHostName(const std::string& hostName) {
}
}
-void PoolForHost::waitForFreeConnection(int timeout, stdx::unique_lock<stdx::mutex>& lk) {
+void PoolForHost::waitForFreeConnection(int timeout, stdx::unique_lock<Latch>& lk) {
auto condition = [&] { return (numInUse() < _maxInUse || _inShutdown.load()); };
if (timeout > 0) {
@@ -263,7 +263,7 @@ public:
// there are too many connections in this pool to make a new one, block until a
// connection is released.
{
- stdx::unique_lock<stdx::mutex> lk(_this->_mutex);
+ stdx::unique_lock<Latch> lk(_this->_mutex);
PoolForHost& p = _this->_pools[PoolKey(host, timeout)];
if (p.openConnections() >= _this->_maxInUse) {
@@ -307,7 +307,7 @@ DBConnectionPool::DBConnectionPool()
void DBConnectionPool::shutdown() {
if (!_inShutdown.swap(true)) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (auto i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p.shutdown();
@@ -319,7 +319,7 @@ DBClientBase* DBConnectionPool::_get(const string& ident, double socketTimeout)
uassert(ErrorCodes::ShutdownInProgress,
"Can't use connection pool during shutdown",
!globalInShutdownDeprecated());
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.setSocketTimeout(socketTimeout);
@@ -328,7 +328,7 @@ DBClientBase* DBConnectionPool::_get(const string& ident, double socketTimeout)
}
int DBConnectionPool::openConnections(const string& ident, double socketTimeout) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
return p.openConnections();
}
@@ -337,7 +337,7 @@ DBClientBase* DBConnectionPool::_finishCreate(const string& ident,
double socketTimeout,
DBClientBase* conn) {
{
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
PoolForHost& p = _pools[PoolKey(ident, socketTimeout)];
p.setMaxPoolSize(_maxPoolSize);
p.initializeHostName(ident);
@@ -400,13 +400,13 @@ DBClientBase* DBConnectionPool::get(const MongoURI& uri, double socketTimeout) {
}
int DBConnectionPool::getNumAvailableConns(const string& host, double socketTimeout) const {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
auto it = _pools.find(PoolKey(host, socketTimeout));
return (it == _pools.end()) ? 0 : it->second.numAvailable();
}
int DBConnectionPool::getNumBadConns(const string& host, double socketTimeout) const {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
auto it = _pools.find(PoolKey(host, socketTimeout));
return (it == _pools.end()) ? 0 : it->second.getNumBadConns();
}
@@ -424,7 +424,7 @@ void DBConnectionPool::onRelease(DBClientBase* conn) {
void DBConnectionPool::release(const string& host, DBClientBase* c) {
onRelease(c);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
PoolForHost& p = _pools[PoolKey(host, c->getSoTimeout())];
p.done(this, c);
@@ -441,7 +441,7 @@ void DBConnectionPool::decrementEgress(const string& host, DBClientBase* c) {
DBConnectionPool::~DBConnectionPool() {
// Do not log in destruction, because global connection pools get
// destroyed after the logging framework.
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p._parentDestroyed = true;
@@ -453,7 +453,7 @@ DBConnectionPool::~DBConnectionPool() {
}
void DBConnectionPool::flush() {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++) {
PoolForHost& p = i->second;
p.flush();
@@ -461,7 +461,7 @@ void DBConnectionPool::flush() {
}
void DBConnectionPool::clear() {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
LOG(2) << "Removing connections on all pools owned by " << _name << endl;
for (PoolMap::iterator iter = _pools.begin(); iter != _pools.end(); ++iter) {
iter->second.clear();
@@ -469,7 +469,7 @@ void DBConnectionPool::clear() {
}
void DBConnectionPool::removeHost(const string& host) {
- stdx::lock_guard<stdx::mutex> L(_mutex);
+ stdx::lock_guard<Latch> L(_mutex);
LOG(2) << "Removing connections from all pools for host: " << host << endl;
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) {
const string& poolHost = i->first.ident;
@@ -513,7 +513,7 @@ void DBConnectionPool::onDestroy(DBClientBase* conn) {
void DBConnectionPool::appendConnectionStats(executor::ConnectionPoolStats* stats) const {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (PoolMap::const_iterator i = _pools.begin(); i != _pools.end(); ++i) {
if (i->second.numCreated() == 0)
continue;
@@ -581,7 +581,7 @@ bool DBConnectionPool::isConnectionGood(const string& hostName, DBClientBase* co
}
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
PoolForHost& pool = _pools[PoolKey(hostName, conn->getSoTimeout())];
if (pool.isBadSocketCreationTime(conn->getSockCreationMicroSec())) {
return false;
@@ -597,7 +597,7 @@ void DBConnectionPool::taskDoWork() {
{
// we need to get the connections inside the lock
// but we can actually delete them outside
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) {
i->second.getStaleConnections(idleThreshold, toDelete);
}
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h
index 9fbf65214db..4fb06c6057a 100644
--- a/src/mongo/client/connpool.h
+++ b/src/mongo/client/connpool.h
@@ -35,6 +35,7 @@
#include "mongo/client/dbclient_base.h"
#include "mongo/client/mongo_uri.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/time_support.h"
@@ -179,7 +180,7 @@ public:
* throw if a free connection cannot be acquired within that amount of
* time. Timeout is in seconds.
*/
- void waitForFreeConnection(int timeout, stdx::unique_lock<stdx::mutex>& lk);
+ void waitForFreeConnection(int timeout, stdx::unique_lock<Latch>& lk);
/**
* Notifies any waiters that there are new connections available.
@@ -392,7 +393,7 @@ private:
typedef std::map<PoolKey, PoolForHost, poolKeyCompare> PoolMap; // servername -> pool
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DBConnectionPool::_mutex");
std::string _name;
// The maximum number of connections we'll save in the pool per-host
diff --git a/src/mongo/client/connpool_integration_test.cpp b/src/mongo/client/connpool_integration_test.cpp
index 2c07b67107d..41323e41584 100644
--- a/src/mongo/client/connpool_integration_test.cpp
+++ b/src/mongo/client/connpool_integration_test.cpp
@@ -31,8 +31,8 @@
#include "mongo/client/connpool.h"
#include "mongo/client/global_conn_pool.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/integration_test.h"
#include "mongo/unittest/unittest.h"
@@ -46,7 +46,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
auto host = fixture.getServers()[0].toString();
stdx::condition_variable cv;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int counter = 0;
pool.setMaxInUse(2);
@@ -60,7 +60,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
// Try creating a new one, should block until we release one.
stdx::thread t([&] {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -69,7 +69,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
auto conn3 = pool.get(host);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -79,7 +79,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
// First thread should be blocked.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 1; });
}
@@ -87,7 +87,7 @@ TEST(ConnectionPoolTest, ConnectionPoolMaxInUseConnectionsTest) {
pool.release(host, conn2);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 2; });
}
@@ -125,7 +125,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
auto host = fixture.getServers()[0].toString();
stdx::condition_variable cv;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int counter = 0;
pool.setMaxInUse(2);
@@ -139,7 +139,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
// Attempt to open a new connection, should block.
stdx::thread t([&] {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -148,7 +148,7 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
ASSERT_THROWS(pool.get(host), AssertionException);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
counter++;
}
@@ -157,14 +157,14 @@ TEST(ConnectionPoolTest, ConnectionPoolShutdownLogicTest) {
// Wait for new thread to block.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 1; });
}
// Shut down the pool, this should unblock our waiting connection.
pool.shutdown();
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return counter == 2; });
}
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 893c4e0fab7..3bc0f1366f6 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -56,13 +56,13 @@
#include "mongo/db/wire_version.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/rpc/reply_interface.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/debug_util.h"
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index 0d058396c0f..aba715197b8 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -62,10 +62,10 @@
#include "mongo/db/wire_version.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/client_metadata.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/debug_util.h"
@@ -331,7 +331,7 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
}
{
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
if (_stayFailed.load()) {
// This object is still in a failed state. The session we just created will be destroyed
// immediately since we aren't holding on to it.
@@ -400,7 +400,7 @@ void DBClientConnection::_markFailed(FailAction action) {
} else if (action == kReleaseSession) {
transport::SessionHandle destroyedOutsideMutex;
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
_session.swap(destroyedOutsideMutex);
}
}
@@ -452,7 +452,7 @@ void DBClientConnection::setTags(transport::Session::TagMask tags) {
}
void DBClientConnection::shutdownAndDisallowReconnect() {
- stdx::lock_guard<stdx::mutex> lk(_sessionMutex);
+ stdx::lock_guard<Latch> lk(_sessionMutex);
_stayFailed.store(true);
_markFailed(kEndSession);
}
diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h
index 5b2976a134f..fa55e65f278 100644
--- a/src/mongo/client/dbclient_connection.h
+++ b/src/mongo/client/dbclient_connection.h
@@ -44,12 +44,12 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/message.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/op_msg.h"
#include "mongo/rpc/protocol.h"
#include "mongo/rpc/unique_message.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/message_compressor_manager.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
@@ -293,7 +293,7 @@ protected:
// rebind the handle from the owning thread. The thread that owns this DBClientConnection is
// allowed to use the _session without locking the mutex. This mutex also guards writes to
// _stayFailed, although reads are allowed outside the mutex.
- stdx::mutex _sessionMutex;
+ Mutex _sessionMutex = MONGO_MAKE_LATCH("DBClientConnection::_sessionMutex");
transport::SessionHandle _session;
boost::optional<Milliseconds> _socketTimeout;
transport::Session::TagMask _tagMask = transport::Session::kEmptyTagMask;
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index 8843227ded5..df0bfb41077 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -195,7 +195,7 @@ std::string Fetcher::toString() const {
}
std::string Fetcher::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
str::stream output;
output << "Fetcher";
output << " source: " << _source.toString();
@@ -218,7 +218,7 @@ std::string Fetcher::getDiagnosticString() const {
}
bool Fetcher::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -227,7 +227,7 @@ bool Fetcher::_isActive_inlock() const {
}
Status Fetcher::schedule() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -250,7 +250,7 @@ Status Fetcher::schedule() {
}
void Fetcher::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -273,17 +273,17 @@ void Fetcher::shutdown() {
}
void Fetcher::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
Fetcher::State Fetcher::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
bool Fetcher::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShuttingDown_inlock();
}
@@ -292,7 +292,7 @@ bool Fetcher::_isShuttingDown_inlock() const {
}
Status Fetcher::_scheduleGetMore(const BSONObj& cmdObj) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_isShuttingDown_inlock()) {
return Status(ErrorCodes::CallbackCanceled,
"fetcher was shut down after previous batch was processed");
@@ -347,7 +347,7 @@ void Fetcher::_callback(const RemoteCommandCallbackArgs& rcbd, const char* batch
batchData.otherFields.metadata = std::move(rcbd.response.data);
batchData.elapsedMillis = rcbd.response.elapsedMillis.value_or(Milliseconds{0});
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
batchData.first = _first;
_first = false;
}
@@ -416,7 +416,7 @@ void Fetcher::_finishCallback() {
// 'tempWork' must be declared before lock guard 'lk' so that it is destroyed outside the lock.
Fetcher::CallbackFn tempWork;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(State::kComplete != _state);
_state = State::kComplete;
_first = false;
diff --git a/src/mongo/client/fetcher.h b/src/mongo/client/fetcher.h
index 78bbdc378b8..0a2abbd12fe 100644
--- a/src/mongo/client/fetcher.h
+++ b/src/mongo/client/fetcher.h
@@ -42,8 +42,8 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/namespace_string.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -239,7 +239,7 @@ private:
CallbackFn _work;
// Protects member data of this Fetcher.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Fetcher::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/client/mongo_uri.h b/src/mongo/client/mongo_uri.h
index 89de663d138..816ac10eda2 100644
--- a/src/mongo/client/mongo_uri.h
+++ b/src/mongo/client/mongo_uri.h
@@ -39,7 +39,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/util/builder.h"
#include "mongo/client/connection_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/client/remote_command_retry_scheduler.cpp b/src/mongo/client/remote_command_retry_scheduler.cpp
index b8eaf0d951f..936c326367d 100644
--- a/src/mongo/client/remote_command_retry_scheduler.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler.cpp
@@ -78,7 +78,7 @@ RemoteCommandRetryScheduler::~RemoteCommandRetryScheduler() {
}
bool RemoteCommandRetryScheduler::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -87,7 +87,7 @@ bool RemoteCommandRetryScheduler::_isActive_inlock() const {
}
Status RemoteCommandRetryScheduler::startup() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
@@ -113,7 +113,7 @@ Status RemoteCommandRetryScheduler::startup() {
void RemoteCommandRetryScheduler::shutdown() {
executor::TaskExecutor::CallbackHandle remoteCommandCallbackHandle;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -136,12 +136,12 @@ void RemoteCommandRetryScheduler::shutdown() {
}
void RemoteCommandRetryScheduler::join() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condition.wait(lock, [this]() { return !_isActive_inlock(); });
}
std::string RemoteCommandRetryScheduler::toString() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
str::stream output;
output << "RemoteCommandRetryScheduler";
output << " request: " << _request.toString();
@@ -174,7 +174,7 @@ void RemoteCommandRetryScheduler::_remoteCommandCallback(
// Use a lambda to avoid unnecessary lock acquisition when checking conditions for termination.
auto getCurrentAttempt = [this]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _currentAttempt;
};
@@ -188,7 +188,7 @@ void RemoteCommandRetryScheduler::_remoteCommandCallback(
// TODO(benety): Check cumulative elapsed time of failed responses received against retry
// policy. Requires SERVER-24067.
auto scheduleStatus = [this]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (State::kShuttingDown == _state) {
return Status(ErrorCodes::CallbackCanceled,
"scheduler was shut down before retrying command");
@@ -213,7 +213,7 @@ void RemoteCommandRetryScheduler::_onComplete(
// RemoteCommandRetryScheduler, we release this function object outside the lock.
_callback = {};
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_isActive_inlock());
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/client/remote_command_retry_scheduler.h b/src/mongo/client/remote_command_retry_scheduler.h
index b4cfe52ef88..c0718b8408f 100644
--- a/src/mongo/client/remote_command_retry_scheduler.h
+++ b/src/mongo/client/remote_command_retry_scheduler.h
@@ -37,8 +37,8 @@
#include "mongo/base/error_codes.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -150,7 +150,7 @@ private:
Milliseconds _currentUsedMillis{0};
// Protects member data of this scheduler declared after mutex.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RemoteCommandRetryScheduler::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp
index 164b1e593b3..2120725e981 100644
--- a/src/mongo/client/remote_command_targeter_mock.cpp
+++ b/src/mongo/client/remote_command_targeter_mock.cpp
@@ -78,12 +78,12 @@ SemiFuture<std::vector<HostAndPort>> RemoteCommandTargeterMock::findHostsWithMax
}
void RemoteCommandTargeterMock::markHostNotMaster(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_hostsMarkedDown.insert(host);
}
void RemoteCommandTargeterMock::markHostUnreachable(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_hostsMarkedDown.insert(host);
}
@@ -105,7 +105,7 @@ void RemoteCommandTargeterMock::setFindHostsReturnValue(
}
std::set<HostAndPort> RemoteCommandTargeterMock::getAndClearMarkedDownHosts() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto hostsMarkedDown = _hostsMarkedDown;
_hostsMarkedDown.clear();
return hostsMarkedDown;
diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h
index 5bb0a486987..12ae4c90586 100644
--- a/src/mongo/client/remote_command_targeter_mock.h
+++ b/src/mongo/client/remote_command_targeter_mock.h
@@ -99,7 +99,7 @@ private:
StatusWith<std::vector<HostAndPort>> _findHostReturnValue;
// Protects _hostsMarkedDown.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RemoteCommandTargeterMock::_mutex");
// HostAndPorts marked not master or unreachable. Meant to verify a code path updates the
// RemoteCommandTargeterMock.
diff --git a/src/mongo/client/replica_set_change_notifier.cpp b/src/mongo/client/replica_set_change_notifier.cpp
index cf6c5b2d90b..d9333f54122 100644
--- a/src/mongo/client/replica_set_change_notifier.cpp
+++ b/src/mongo/client/replica_set_change_notifier.cpp
@@ -56,7 +56,7 @@ void ReplicaSetChangeNotifier::_removeListener(Listener* listener) {
void ReplicaSetChangeNotifier::onFoundSet(const std::string& name) {
LOG(2) << "Signaling found set " << name;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_replicaSetStates.emplace(name, State{});
@@ -73,7 +73,7 @@ void ReplicaSetChangeNotifier::onPossibleSet(ConnectionString connectionString)
const auto& name = connectionString.getSetName();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto state = [&] {
auto& state = _replicaSetStates[name];
@@ -99,7 +99,7 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
LOG(2) << "Signaling confirmed set " << connectionString << " with primary " << primary;
const auto& name = connectionString.getSetName();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto state = [&] {
auto& state = _replicaSetStates[name];
@@ -123,7 +123,7 @@ void ReplicaSetChangeNotifier::onConfirmedSet(ConnectionString connectionString,
void ReplicaSetChangeNotifier::onDroppedSet(const std::string& name) {
LOG(2) << "Signaling dropped set " << name;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If we never singaled the initial possible set, we should not on dropped set
auto it = _replicaSetStates.find(name);
diff --git a/src/mongo/client/replica_set_change_notifier.h b/src/mongo/client/replica_set_change_notifier.h
index de61d5dc504..b79da076c4d 100644
--- a/src/mongo/client/replica_set_change_notifier.h
+++ b/src/mongo/client/replica_set_change_notifier.h
@@ -35,7 +35,7 @@
#include "mongo/client/connection_string.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/functional.h"
@@ -102,7 +102,7 @@ private:
void _addListener(Listener* listener);
void _removeListener(Listener* listener);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ReplicaSetChangeNotifier::_mutex");
std::vector<Listener*> _listeners;
stdx::unordered_map<Key, State> _replicaSetStates;
};
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index fd79717719c..ec7fdc24ddb 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -46,8 +46,8 @@
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/exit.h"
@@ -315,7 +315,7 @@ SemiFuture<std::vector<HostAndPort>> ReplicaSetMonitor::getHostsOrRefresh(
Future<std::vector<HostAndPort>> ReplicaSetMonitor::_getHostsOrRefresh(
const ReadPreferenceSetting& criteria, Milliseconds maxWait) {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
if (_state->isDropped) {
return Status(ErrorCodes::ReplicaSetMonitorRemoved,
str::stream() << "ReplicaSetMonitor for set " << getName() << " is removed");
@@ -350,7 +350,7 @@ HostAndPort ReplicaSetMonitor::getMasterOrUassert() {
}
void ReplicaSetMonitor::failedHost(const HostAndPort& host, const Status& status) {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
if (node)
node->markFailed(status);
@@ -359,19 +359,19 @@ void ReplicaSetMonitor::failedHost(const HostAndPort& host, const Status& status
}
bool ReplicaSetMonitor::isPrimary(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isMaster : false;
}
bool ReplicaSetMonitor::isHostUp(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
Node* node = _state->findNode(host);
return node ? node->isUp : false;
}
int ReplicaSetMonitor::getMinWireVersion() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
int minVersion = 0;
for (const auto& host : _state->nodes) {
if (host.isUp) {
@@ -383,7 +383,7 @@ int ReplicaSetMonitor::getMinWireVersion() const {
}
int ReplicaSetMonitor::getMaxWireVersion() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
int maxVersion = std::numeric_limits<int>::max();
for (const auto& host : _state->nodes) {
if (host.isUp) {
@@ -400,7 +400,7 @@ std::string ReplicaSetMonitor::getName() const {
}
std::string ReplicaSetMonitor::getServerAddress() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
// We return our setUri until first confirmation
return _state->seedConnStr.isValid() ? _state->seedConnStr.toString()
: _state->setUri.connectionString().toString();
@@ -412,7 +412,7 @@ const MongoURI& ReplicaSetMonitor::getOriginalUri() const {
}
bool ReplicaSetMonitor::contains(const HostAndPort& host) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
return _state->seedNodes.count(host);
}
@@ -444,7 +444,7 @@ ReplicaSetChangeNotifier& ReplicaSetMonitor::getNotifier() {
// TODO move to correct order with non-statics before pushing
void ReplicaSetMonitor::appendInfo(BSONObjBuilder& bsonObjBuilder, bool forFTDC) const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
BSONObjBuilder monitorInfo(bsonObjBuilder.subobjStart(getName()));
if (forFTDC) {
@@ -489,7 +489,7 @@ void ReplicaSetMonitor::disableRefreshRetries_forTest() {
}
bool ReplicaSetMonitor::isKnownToHaveGoodPrimary() const {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
for (const auto& node : _state->nodes) {
if (node.isMaster) {
@@ -501,7 +501,7 @@ bool ReplicaSetMonitor::isKnownToHaveGoodPrimary() const {
}
void ReplicaSetMonitor::runScanForMockReplicaSet() {
- stdx::lock_guard<stdx::mutex> lk(_state->mutex);
+ stdx::lock_guard<Latch> lk(_state->mutex);
_ensureScanInProgress(_state);
// This function should only be called from tests using MockReplicaSet and they should use the
diff --git a/src/mongo/client/replica_set_monitor_internal.h b/src/mongo/client/replica_set_monitor_internal.h
index 29758633ece..939537de01f 100644
--- a/src/mongo/client/replica_set_monitor_internal.h
+++ b/src/mongo/client/replica_set_monitor_internal.h
@@ -44,9 +44,9 @@
#include "mongo/client/read_preference.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -251,7 +251,8 @@ public:
bool isDropped = false;
- mutable stdx::mutex mutex; // You must hold this to access any member below.
+ // You must hold this to access any member below.
+ mutable Mutex mutex = MONGO_MAKE_LATCH("SetState::mutex");
executor::TaskExecutor::CallbackHandle refresherHandle;
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 7c34fb6fb97..cb6c5094e75 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -45,8 +45,8 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -70,7 +70,7 @@ ReplicaSetMonitorManager::~ReplicaSetMonitorManager() {
}
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getMonitor(StringData setName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (auto monitor = _monitors[setName].lock()) {
return monitor;
@@ -108,7 +108,7 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(const MongoURI& uri) {
invariant(uri.type() == ConnectionString::SET);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::ShutdownInProgress,
str::stream() << "Unable to get monitor for '" << uri << "' due to shutdown",
!_isShutdown);
@@ -132,7 +132,7 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(const
vector<string> ReplicaSetMonitorManager::getAllSetNames() {
vector<string> allNames;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& entry : _monitors) {
allNames.push_back(entry.first);
@@ -142,7 +142,7 @@ vector<string> ReplicaSetMonitorManager::getAllSetNames() {
}
void ReplicaSetMonitorManager::removeMonitor(StringData setName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ReplicaSetMonitorsMap::const_iterator it = _monitors.find(setName);
if (it != _monitors.end()) {
if (auto monitor = it->second.lock()) {
@@ -161,7 +161,7 @@ void ReplicaSetMonitorManager::shutdown() {
decltype(_monitors) monitors;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (std::exchange(_isShutdown, true)) {
return;
}
@@ -192,7 +192,7 @@ void ReplicaSetMonitorManager::removeAllMonitors() {
shutdown();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isShutdown = false;
}
}
@@ -226,7 +226,7 @@ ReplicaSetChangeNotifier& ReplicaSetMonitorManager::getNotifier() {
}
bool ReplicaSetMonitorManager::isShutdown() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShutdown;
}
diff --git a/src/mongo/client/replica_set_monitor_manager.h b/src/mongo/client/replica_set_monitor_manager.h
index 38a2040ec7c..04ef76b08e4 100644
--- a/src/mongo/client/replica_set_monitor_manager.h
+++ b/src/mongo/client/replica_set_monitor_manager.h
@@ -34,7 +34,7 @@
#include "mongo/client/replica_set_change_notifier.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -104,7 +104,7 @@ private:
using ReplicaSetMonitorsMap = StringMap<std::weak_ptr<ReplicaSetMonitor>>;
// Protects access to the replica set monitors
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicaSetMonitorManager::_mutex");
// Executor for monitoring replica sets.
std::unique_ptr<executor::TaskExecutor> _taskExecutor;
diff --git a/src/mongo/client/scram_client_cache.h b/src/mongo/client/scram_client_cache.h
index f671cd88fe0..fb43d76b622 100644
--- a/src/mongo/client/scram_client_cache.h
+++ b/src/mongo/client/scram_client_cache.h
@@ -32,7 +32,7 @@
#include <string>
#include "mongo/crypto/mechanism_scram.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/net/hostandport.h"
@@ -76,7 +76,7 @@ public:
*/
scram::Secrets<HashBlock> getCachedSecrets(
const HostAndPort& target, const scram::Presecrets<HashBlock>& presecrets) const {
- const stdx::lock_guard<stdx::mutex> lock(_hostToSecretsMutex);
+ const stdx::lock_guard<Latch> lock(_hostToSecretsMutex);
// Search the cache for a record associated with the host we're trying to connect to.
auto foundSecret = _hostToSecrets.find(target);
@@ -102,7 +102,7 @@ public:
void setCachedSecrets(HostAndPort target,
scram::Presecrets<HashBlock> presecrets,
scram::Secrets<HashBlock> secrets) {
- const stdx::lock_guard<stdx::mutex> lock(_hostToSecretsMutex);
+ const stdx::lock_guard<Latch> lock(_hostToSecretsMutex);
typename HostToSecretsMap::iterator it;
bool insertionSuccessful;
@@ -117,7 +117,7 @@ public:
}
private:
- mutable stdx::mutex _hostToSecretsMutex;
+ mutable Mutex _hostToSecretsMutex = MONGO_MAKE_LATCH("SCRAMClientCache::_hostToSecretsMutex");
HostToSecretsMap _hostToSecrets;
};
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index b40e48b4ece..cfdcf367452 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -57,7 +57,7 @@
#include "mongo/db/global_settings.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index 19034dee81b..035d797f48d 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -49,8 +49,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index 219f07d5cc0..8c94db89c40 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -62,7 +62,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/mongod_options.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -115,7 +115,7 @@ class PinnedUserSetParameter {
public:
void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) const {
BSONArrayBuilder sub(b.subarrayStart(name));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& username : _pinnedUsersList) {
BSONObjBuilder nameObj(sub.subobjStart());
nameObj << AuthorizationManager::USER_NAME_FIELD_NAME << username.getUser()
@@ -138,7 +138,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -171,7 +171,7 @@ public:
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pinnedUsersList = out;
auto authzManager = _authzManager;
if (!authzManager) {
@@ -183,7 +183,7 @@ public:
}
void setAuthzManager(AuthorizationManager* authzManager) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_authzManager = authzManager;
_authzManager->updatePinnedUsersList(std::move(_pinnedUsersList));
}
@@ -200,7 +200,7 @@ private:
}
AuthorizationManager* _authzManager = nullptr;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PinnedUserSetParameter::_mutex");
std::vector<UserName> _pinnedUsersList;
} authorizationManagerPinnedUsers;
@@ -339,7 +339,7 @@ private:
bool _isThisGuardInFetchPhase;
AuthorizationManagerImpl* _authzManager;
- stdx::unique_lock<stdx::mutex> _cacheLock;
+ stdx::unique_lock<Latch> _cacheLock;
};
AuthorizationManagerImpl::AuthorizationManagerImpl()
@@ -396,7 +396,7 @@ Status AuthorizationManagerImpl::getAuthorizationVersion(OperationContext* opCtx
}
OID AuthorizationManagerImpl::getCacheGeneration() {
- stdx::lock_guard<stdx::mutex> lk(_cacheWriteMutex);
+ stdx::lock_guard<Latch> lk(_cacheWriteMutex);
return _fetchGeneration;
}
@@ -641,7 +641,7 @@ Status AuthorizationManagerImpl::_fetchUserV2(OperationContext* opCtx,
}
void AuthorizationManagerImpl::updatePinnedUsersList(std::vector<UserName> names) {
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
_usersToPin = std::move(names);
bool noUsersToPin = _usersToPin->empty();
_pinnedUsersCond.notify_one();
@@ -664,7 +664,7 @@ void AuthorizationManagerImpl::_pinnedUsersThreadRoutine() noexcept try {
while (true) {
auto opCtx = cc().makeOperationContext();
- stdx::unique_lock<stdx::mutex> lk(_pinnedUsersMutex);
+ stdx::unique_lock<Latch> lk(_pinnedUsersMutex);
const Milliseconds timeout(authorizationManagerPinnedUsersRefreshIntervalMillis.load());
auto waitRes = opCtx->waitForConditionOrInterruptFor(
_pinnedUsersCond, lk, timeout, [&] { return _usersToPin.has_value(); });
diff --git a/src/mongo/db/auth/authorization_manager_impl.h b/src/mongo/db/auth/authorization_manager_impl.h
index d18b6e7724a..725bb27b7d8 100644
--- a/src/mongo/db/auth/authorization_manager_impl.h
+++ b/src/mongo/db/auth/authorization_manager_impl.h
@@ -50,8 +50,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/invalidating_lru_cache.h"
@@ -232,7 +232,7 @@ private:
InvalidatingLRUCache<UserName, User, UserCacheInvalidator> _userCache;
- stdx::mutex _pinnedUsersMutex;
+ Mutex _pinnedUsersMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_pinnedUsersMutex");
stdx::condition_variable _pinnedUsersCond;
std::once_flag _pinnedThreadTrackerStarted;
boost::optional<std::vector<UserName>> _usersToPin;
@@ -241,7 +241,7 @@ private:
* Protects _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
* via CacheGuard.
*/
- stdx::mutex _cacheWriteMutex;
+ Mutex _cacheWriteMutex = MONGO_MAKE_LATCH("AuthorizationManagerImpl::_cacheWriteMutex");
/**
* Current generation of cached data. Updated every time part of the cache gets
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 311e68ad533..923acc650fa 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -204,7 +204,7 @@ void AuthzManagerExternalStateLocal::resolveUserRoles(mutablebson::Document* use
bool isRoleGraphConsistent = false;
{
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
isRoleGraphConsistent = _roleGraphState == roleGraphStateConsistent;
for (const auto& role : directRoles) {
indirectRoles.insert(role);
@@ -306,7 +306,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescription(
*result = resultDoc.getObject();
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
return _getRoleDescription_inlock(roleName, showPrivileges, showRestrictions, result);
}
@@ -326,7 +326,7 @@ Status AuthzManagerExternalStateLocal::getRolesDescription(
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
BSONArrayBuilder resultBuilder;
for (const RoleName& role : roles) {
BSONObj roleDoc;
@@ -441,7 +441,7 @@ Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(
"Cannot get user fragment for all roles in a database");
}
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_roleGraphMutex);
for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname); it.more(); it.next()) {
if (!showBuiltinRoles && _roleGraph.isBuiltinRole(it.get())) {
continue;
@@ -476,7 +476,7 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
} // namespace
Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
+ stdx::lock_guard<Latch> lkInitialzeRoleGraph(_roleGraphMutex);
_roleGraphState = roleGraphStateInitial;
_roleGraph = RoleGraph();
@@ -562,7 +562,7 @@ private:
void _refreshRoleGraph() {
- stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
+ stdx::lock_guard<Latch> lk(_externalState->_roleGraphMutex);
Status status = _externalState->_roleGraph.handleLogOp(
_opCtx, _op.c_str(), _nss, _o, _o2 ? &*_o2 : nullptr);
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 31b3869f114..a5965abf3c2 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -37,7 +37,7 @@
#include "mongo/db/auth/role_graph.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user_name.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -161,7 +161,7 @@ private:
/**
* Guards _roleGraphState and _roleGraph.
*/
- stdx::mutex _roleGraphMutex;
+ Mutex _roleGraphMutex = MONGO_MAKE_LATCH("AuthzManagerExternalStateLocal::_roleGraphMutex");
};
} // namespace mongo
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 893cc5ddae7..2b0c89e4f1b 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -43,9 +43,9 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/platform/compiler.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/duration.h"
@@ -63,7 +63,7 @@ public:
void setInterval(Seconds interval) {
{
- stdx::lock_guard<stdx::mutex> twiddle(_mutex);
+ stdx::lock_guard<Latch> twiddle(_mutex);
MONGO_LOG(5) << "setInterval: old=" << _interval << ", new=" << interval;
_interval = interval;
}
@@ -75,7 +75,7 @@ public:
}
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (true) {
Date_t now = Date_t::now();
Date_t expiry = _last + _interval;
@@ -95,7 +95,7 @@ public:
private:
Seconds _interval;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ThreadSleepInterval::_mutex");
stdx::condition_variable _condition;
Date_t _last;
};
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index 0f7b9cd7eea..62afb3e099b 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -35,8 +35,8 @@
#include <string>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/map_util.h"
@@ -56,13 +56,13 @@ public:
void recordBegin();
int recordEnd();
- void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
+ void awaitNoBgOps(stdx::unique_lock<Latch>& lk);
int getOpsInProgCount() const {
return _opsInProgCount;
}
- void waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
private:
int _opsInProgCount;
@@ -75,7 +75,7 @@ typedef StringMap<std::shared_ptr<BgInfo>> BgInfoMap;
typedef BgInfoMap::const_iterator BgInfoMapIterator;
// Static data for this file is never destroyed.
-stdx::mutex& m = *(new stdx::mutex());
+Mutex& m = *(new Mutex());
BgInfoMap& dbsInProg = *(new BgInfoMap());
BgInfoMap& nsInProg = *(new BgInfoMap());
@@ -94,12 +94,12 @@ int BgInfo::recordEnd() {
return _opsInProgCount;
}
-void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
+void BgInfo::awaitNoBgOps(stdx::unique_lock<Latch>& lk) {
while (_opsInProgCount > 0)
_noOpsInProg.wait(lk);
}
-void BgInfo::waitForAnOpRemoval(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx) {
+void BgInfo::waitForAnOpRemoval(stdx::unique_lock<Latch>& lk, OperationContext* opCtx) {
int startOpRemovalsCount = _opRemovalsCount;
// Wait for an index build to finish.
@@ -122,7 +122,7 @@ void recordEndAndRemove(BgInfoMap& bgiMap, StringData key) {
}
}
-void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringData key) {
+void awaitNoBgOps(stdx::unique_lock<Latch>& lk, BgInfoMap* bgiMap, StringData key) {
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(*bgiMap, key, std::shared_ptr<BgInfo>());
if (!bgInfo)
return;
@@ -132,7 +132,7 @@ void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringD
} // namespace
void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx, StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(nsInProg, ns, std::shared_ptr<BgInfo>());
if (!bgInfo) {
// There are no index builds in progress on the collection, so no need to wait.
@@ -142,12 +142,12 @@ void BackgroundOperation::waitUntilAnIndexBuildFinishes(OperationContext* opCtx,
}
bool BackgroundOperation::inProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return dbsInProg.find(db) != dbsInProg.end();
}
int BackgroundOperation::numInProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(dbsInProg, db, std::shared_ptr<BgInfo>());
if (!bgInfo)
return 0;
@@ -155,7 +155,7 @@ int BackgroundOperation::numInProgForDb(StringData db) {
}
bool BackgroundOperation::inProgForNs(StringData ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
return nsInProg.find(ns) != nsInProg.end();
}
@@ -189,29 +189,29 @@ void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
}
void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &dbsInProg, db);
}
void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
awaitNoBgOps(lk, &nsInProg, ns);
}
BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordBeginAndInsert(dbsInProg, _ns.db());
recordBeginAndInsert(nsInProg, _ns.ns());
}
BackgroundOperation::~BackgroundOperation() {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
recordEndAndRemove(dbsInProg, _ns.db());
recordEndAndRemove(nsInProg, _ns.ns());
}
void BackgroundOperation::dump(std::ostream& ss) {
- stdx::lock_guard<stdx::mutex> lk(m);
+ stdx::lock_guard<Latch> lk(m);
if (nsInProg.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for (BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i)
diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp
index f648c3e13ed..937a8a61664 100644
--- a/src/mongo/db/baton.cpp
+++ b/src/mongo/db/baton.cpp
@@ -36,7 +36,7 @@
#include "mongo/db/baton.h"
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -81,7 +81,7 @@ public:
}
_baton->schedule([this, anchor = shared_from_this()](Status status) {
- _runJobs(stdx::unique_lock(_mutex), status);
+ _runJobs(stdx::unique_lock<Latch>(_mutex), status);
});
}
@@ -114,14 +114,14 @@ public:
}
void detachImpl() noexcept override {
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isDead = true;
_runJobs(std::move(lk), kDetached);
}
private:
- void _runJobs(stdx::unique_lock<stdx::mutex> lk, Status status) {
+ void _runJobs(stdx::unique_lock<Latch> lk, Status status) {
if (status.isOK() && _isDead) {
status = kDetached;
}
@@ -140,7 +140,7 @@ private:
BatonHandle _baton;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SubBaton::_mutex");
bool _isDead = false;
std::vector<Task> _scheduled;
};
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 0d86d5c1572..f46d636240f 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -42,13 +42,13 @@ namespace mongo {
//
void CappedInsertNotifier::notifyAll() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
++_version;
_notifier.notify_all();
}
void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_dead && prevVersion == _version) {
if (stdx::cv_status::timeout == _notifier.wait_until(lk, deadline.toSystemTimePoint())) {
return;
@@ -57,13 +57,13 @@ void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) cons
}
void CappedInsertNotifier::kill() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dead = true;
_notifier.notify_all();
}
bool CappedInsertNotifier::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _dead;
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 4247900f0f4..ee12d90656d 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -51,8 +51,8 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/snapshot.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/decorable.h"
namespace mongo {
@@ -136,7 +136,7 @@ private:
mutable stdx::condition_variable _notifier;
// Mutex used with '_notifier'. Protects access to '_version'.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CappedInsertNotifier::_mutex");
// A counter, incremented on insertion of new data into the capped collection.
//
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 0513e37b9cb..4e7dc82f9a5 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -73,7 +73,7 @@ CollectionCatalog::iterator::iterator(StringData dbName,
: _dbName(dbName), _genNum(genNum), _catalog(&catalog) {
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_mapIter = _catalog->_orderedCollections.lower_bound(std::make_pair(_dbName, minUuid));
if (_mapIter != _catalog->_orderedCollections.end() && _mapIter->first.first == _dbName) {
@@ -86,7 +86,7 @@ CollectionCatalog::iterator::iterator(
: _mapIter(mapIter) {}
const CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
_repositionIfNeeded();
if (_exhausted()) {
return _nullCollection;
@@ -100,7 +100,7 @@ boost::optional<CollectionUUID> CollectionCatalog::iterator::uuid() {
}
CollectionCatalog::iterator CollectionCatalog::iterator::operator++() {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (!_repositionIfNeeded()) {
_mapIter++; // If the position was not updated, increment iterator to next element.
@@ -125,7 +125,7 @@ CollectionCatalog::iterator CollectionCatalog::iterator::operator++(int) {
}
bool CollectionCatalog::iterator::operator==(const iterator& other) {
- stdx::lock_guard<stdx::mutex> lock(_catalog->_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalog->_catalogLock);
if (other._mapIter == _catalog->_orderedCollections.end()) {
return _uuid == boost::none;
@@ -183,7 +183,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
// manager locks) are held. The purpose of this function is ensure that we write to the
// Collection's namespace string under '_catalogLock'.
invariant(coll);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(toCollection);
@@ -197,7 +197,7 @@ void CollectionCatalog::setCollectionNamespace(OperationContext* opCtx,
addResource(newRid, toCollection.ns());
opCtx->recoveryUnit()->onRollback([this, coll, fromCollection, toCollection] {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
coll->setNs(std::move(fromCollection));
_collections[fromCollection] = _collections[toCollection];
@@ -219,7 +219,7 @@ void CollectionCatalog::onCloseDatabase(OperationContext* opCtx, std::string dbN
void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(!_shadowCatalog);
_shadowCatalog.emplace();
for (auto& entry : _catalog)
@@ -228,13 +228,13 @@ void CollectionCatalog::onCloseCatalog(OperationContext* opCtx) {
void CollectionCatalog::onOpenCatalog(OperationContext* opCtx) {
invariant(opCtx->lockState()->isW());
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_shadowCatalog);
_shadowCatalog.reset();
}
Collection* CollectionCatalog::lookupCollectionByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
return _lookupCollectionByUUID(lock, uuid);
}
@@ -244,13 +244,13 @@ Collection* CollectionCatalog::_lookupCollectionByUUID(WithLock, CollectionUUID
}
Collection* CollectionCatalog::lookupCollectionByNamespace(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto it = _collections.find(nss);
return it == _collections.end() ? nullptr : it->second;
}
boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUUID uuid) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto foundIt = _catalog.find(uuid);
if (foundIt != _catalog.end()) {
NamespaceString ns = foundIt->second->ns();
@@ -271,7 +271,7 @@ boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(CollectionUU
boost::optional<CollectionUUID> CollectionCatalog::lookupUUIDByNSS(
const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(nss.db().toString(), minUuid));
@@ -312,7 +312,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
CollectionInfoFn predicate) const {
invariant(predicate);
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto collection = _lookupCollectionByUUID(lock, uuid);
if (!collection) {
@@ -324,7 +324,7 @@ bool CollectionCatalog::checkIfCollectionSatisfiable(CollectionUUID uuid,
std::vector<CollectionUUID> CollectionCatalog::getAllCollectionUUIDsFromDb(
StringData dbName) const {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
auto it = _orderedCollections.lower_bound(std::make_pair(dbName.toString(), minUuid));
@@ -340,7 +340,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
OperationContext* opCtx, StringData dbName) const {
invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_S));
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue();
std::vector<NamespaceString> ret;
@@ -354,7 +354,7 @@ std::vector<NamespaceString> CollectionCatalog::getAllCollectionNamesFromDb(
std::vector<std::string> CollectionCatalog::getAllDbNames() const {
std::vector<std::string> ret;
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
auto maxUuid = UUID::parse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").getValue();
auto iter = _orderedCollections.upper_bound(std::make_pair("", maxUuid));
while (iter != _orderedCollections.end()) {
@@ -366,7 +366,7 @@ std::vector<std::string> CollectionCatalog::getAllDbNames() const {
}
void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<Collection> coll) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(1) << "Registering collection " << coll->ns() << " with UUID " << uuid;
@@ -391,7 +391,7 @@ void CollectionCatalog::registerCollection(CollectionUUID uuid, std::unique_ptr<
}
std::unique_ptr<Collection> CollectionCatalog::deregisterCollection(CollectionUUID uuid) {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
invariant(_catalog.find(uuid) != _catalog.end());
@@ -426,7 +426,7 @@ std::unique_ptr<RecoveryUnit::Change> CollectionCatalog::makeFinishDropCollectio
}
void CollectionCatalog::deregisterAllCollections() {
- stdx::lock_guard<stdx::mutex> lock(_catalogLock);
+ stdx::lock_guard<Latch> lock(_catalogLock);
LOG(0) << "Deregistering all the collections";
for (auto& entry : _catalog) {
@@ -444,7 +444,7 @@ void CollectionCatalog::deregisterAllCollections() {
_orderedCollections.clear();
_catalog.clear();
- stdx::lock_guard<stdx::mutex> resourceLock(_resourceLock);
+ stdx::lock_guard<Latch> resourceLock(_resourceLock);
_resourceInformation.clear();
_generationNumber++;
@@ -460,7 +460,7 @@ CollectionCatalog::iterator CollectionCatalog::end() const {
boost::optional<std::string> CollectionCatalog::lookupResourceName(const ResourceId& rid) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -480,7 +480,7 @@ boost::optional<std::string> CollectionCatalog::lookupResourceName(const Resourc
void CollectionCatalog::removeResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
@@ -498,7 +498,7 @@ void CollectionCatalog::removeResource(const ResourceId& rid, const std::string&
void CollectionCatalog::addResource(const ResourceId& rid, const std::string& entry) {
invariant(rid.getType() == RESOURCE_DATABASE || rid.getType() == RESOURCE_COLLECTION);
- stdx::lock_guard<stdx::mutex> lock(_resourceLock);
+ stdx::lock_guard<Latch> lock(_resourceLock);
auto search = _resourceInformation.find(rid);
if (search == _resourceInformation.end()) {
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index e54c6141a13..66a12a92ae5 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -248,8 +248,8 @@ private:
Collection* _lookupCollectionByUUID(WithLock, CollectionUUID uuid) const;
const std::vector<CollectionUUID>& _getOrdering_inlock(const StringData& db,
- const stdx::lock_guard<stdx::mutex>&);
- mutable mongo::stdx::mutex _catalogLock;
+ const stdx::lock_guard<Latch>&);
+ mutable mongo::Mutex _catalogLock;
/**
* When present, indicates that the catalog is in closed state, and contains a map from UUID
@@ -273,7 +273,7 @@ private:
uint64_t _generationNumber;
// Protects _resourceInformation.
- mutable stdx::mutex _resourceLock;
+ mutable Mutex _resourceLock = MONGO_MAKE_LATCH("CollectionCatalog::_resourceLock");
// Mapping from ResourceId to a set of strings that contains collection and database namespaces.
std::map<ResourceId, std::set<std::string>> _resourceInformation;
diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp
index 2720ffd09c7..56493b64610 100644
--- a/src/mongo/db/catalog/index_builds_manager.cpp
+++ b/src/mongo/db/catalog/index_builds_manager.cpp
@@ -253,7 +253,7 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx,
}
bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -270,7 +270,7 @@ bool IndexBuildsManager::abortIndexBuild(const UUID& buildUUID, const std::strin
bool IndexBuildsManager::interruptIndexBuild(OperationContext* opCtx,
const UUID& buildUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
if (builderIt == _builders.end()) {
@@ -305,14 +305,14 @@ void IndexBuildsManager::verifyNoIndexBuilds_forTestOnly() {
}
void IndexBuildsManager::_registerIndexBuild(UUID buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::shared_ptr<MultiIndexBlock> mib = std::make_shared<MultiIndexBlock>();
invariant(_builders.insert(std::make_pair(buildUUID, mib)).second);
}
void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
@@ -320,7 +320,7 @@ void IndexBuildsManager::_unregisterIndexBuild(const UUID& buildUUID) {
}
std::shared_ptr<MultiIndexBlock> IndexBuildsManager::_getBuilder(const UUID& buildUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto builderIt = _builders.find(buildUUID);
invariant(builderIt != _builders.end());
return builderIt->second;
diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h
index 21678546061..55f2fe73211 100644
--- a/src/mongo/db/catalog/index_builds_manager.h
+++ b/src/mongo/db/catalog/index_builds_manager.h
@@ -36,7 +36,7 @@
#include "mongo/db/catalog/multi_index_block.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -205,7 +205,7 @@ private:
std::shared_ptr<MultiIndexBlock> _getBuilder(const UUID& buildUUID);
// Protects the map data structures below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsManager::_mutex");
// Map of index builders by build UUID. Allows access to the builders so that actions can be
// taken on and information passed to and from index builds.
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index f4d55c60880..073b4d1b8dc 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -40,7 +40,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/debug_util.h"
namespace mongo {
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 69da31c0ae2..0e4796ca6c4 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -74,7 +74,7 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx,
_isReady = _catalogIsReady(opCtx);
{
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
_isMultikey.store(_catalogIsMultikey(opCtx, &_indexMultikeyPaths));
_indexTracksPathLevelMultikeyInfo = !_indexMultikeyPaths.empty();
}
@@ -148,7 +148,7 @@ bool IndexCatalogEntryImpl::isMultikey() const {
}
MultikeyPaths IndexCatalogEntryImpl::getMultikeyPaths(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
return _indexMultikeyPaths;
}
@@ -173,7 +173,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
}
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
invariant(multikeyPaths.size() == _indexMultikeyPaths.size());
bool newPathIsMultikey = false;
@@ -241,7 +241,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx,
_isMultikey.store(true);
if (_indexTracksPathLevelMultikeyInfo) {
- stdx::lock_guard<stdx::mutex> lk(_indexMultikeyPathsMutex);
+ stdx::lock_guard<Latch> lk(_indexMultikeyPathsMutex);
for (size_t i = 0; i < multikeyPaths.size(); ++i) {
_indexMultikeyPaths[i].insert(multikeyPaths[i].begin(), multikeyPaths[i].end());
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h
index df7f053537f..5ec961f65af 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.h
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.h
@@ -41,7 +41,7 @@
#include "mongo/db/record_id.h"
#include "mongo/db/storage/kv/kv_prefix.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -223,7 +223,8 @@ private:
// Controls concurrent access to '_indexMultikeyPaths'. We acquire this mutex rather than the
// RESOURCE_METADATA lock as a performance optimization so that it is cheaper to detect whether
// there is actually any path-level multikey information to update or not.
- mutable stdx::mutex _indexMultikeyPathsMutex;
+ mutable Mutex _indexMultikeyPathsMutex =
+ MONGO_MAKE_LATCH("IndexCatalogEntryImpl::_indexMultikeyPathsMutex");
// Non-empty only if '_indexTracksPathLevelMultikeyInfo' is true.
//
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index ee75f9ed64e..10c4fe6d485 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -884,18 +884,18 @@ MultiIndexBlock::State MultiIndexBlock::getState_forTest() const {
}
MultiIndexBlock::State MultiIndexBlock::_getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void MultiIndexBlock::_setState(State newState) {
invariant(State::kAborted != newState);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = newState;
}
void MultiIndexBlock::_setStateToAbortedIfNotCommitted(StringData reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (State::kCommitted == _state) {
return;
}
diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h
index 69aa9cd4a34..df940d2121e 100644
--- a/src/mongo/db/catalog/multi_index_block.h
+++ b/src/mongo/db/catalog/multi_index_block.h
@@ -46,7 +46,7 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/record_id.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/fail_point_service.h"
namespace mongo {
@@ -344,7 +344,7 @@ private:
bool _constraintsChecked = false;
// Protects member variables of this class declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiIndexBlock::_mutex");
State _state = State::kUninitialized;
std::string _abortReason;
diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h
index c449932f653..e6966e30ce3 100644
--- a/src/mongo/db/catalog/util/partitioned.h
+++ b/src/mongo/db/catalog/util/partitioned.h
@@ -39,7 +39,7 @@
#include <boost/align/aligned_allocator.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/with_alignment.h"
diff --git a/src/mongo/db/collection_index_builds_tracker.cpp b/src/mongo/db/collection_index_builds_tracker.cpp
index 17f62d1a644..40b4f292c27 100644
--- a/src/mongo/db/collection_index_builds_tracker.cpp
+++ b/src/mongo/db/collection_index_builds_tracker.cpp
@@ -104,8 +104,7 @@ int CollectionIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _buildStateByBuildUUID.size();
}
-void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(
- stdx::unique_lock<stdx::mutex>& lk) {
+void CollectionIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] {
if (_buildStateByBuildUUID.empty()) {
return true;
diff --git a/src/mongo/db/collection_index_builds_tracker.h b/src/mongo/db/collection_index_builds_tracker.h
index 5a4ab1eb5f9..8d43dbd96bc 100644
--- a/src/mongo/db/collection_index_builds_tracker.h
+++ b/src/mongo/db/collection_index_builds_tracker.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/repl_index_build_state.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/uuid.h"
@@ -96,7 +96,7 @@ public:
/**
* Returns when no index builds remain on this collection.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Maps of index build states on the collection, by build UUID and index name.
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index a8ac6e5c022..751ac181371 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -50,7 +50,7 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/transaction_participant.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
#include "mongo/util/net/socket_utils.h"
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 66535156467..45fde032004 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -49,7 +49,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/storage/backup_cursor_hooks.h"
#include "mongo/db/storage/storage_engine.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/background.h"
#include "mongo/util/exit.h"
@@ -95,7 +95,7 @@ public:
virtual ~FSyncCommand() {
// The FSyncLockThread is owned by the FSyncCommand and accesses FsyncCommand state. It must
// be shut down prior to FSyncCommand destruction.
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
if (_lockCount > 0) {
_lockCount = 0;
releaseFsyncLockSyncCV.notify_one();
@@ -166,7 +166,7 @@ public:
Status status = Status::OK();
{
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
threadStatus = Status::OK();
threadStarted = false;
_lockThread = std::make_unique<FSyncLockThread>(allowFsyncFailure);
@@ -199,13 +199,13 @@ public:
// Returns whether we are currently fsyncLocked. For use by callers not holding lockStateMutex.
bool fsyncLocked() {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
return _fsyncLocked;
}
// For callers not already holding 'lockStateMutex'.
int64_t getLockCount() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
return getLockCount_inLock();
}
@@ -215,17 +215,17 @@ public:
}
void releaseLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
releaseLock_inLock(lk);
}
- void releaseLock_inLock(stdx::unique_lock<stdx::mutex>& lk) {
+ void releaseLock_inLock(stdx::unique_lock<Latch>& lk) {
invariant(_lockCount >= 1);
_lockCount--;
if (_lockCount == 0) {
{
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = false;
}
releaseFsyncLockSyncCV.notify_one();
@@ -237,7 +237,7 @@ public:
// Allows for control of lock state change between the fsyncLock and fsyncUnlock commands and
// the FSyncLockThread that maintains the global read lock.
- stdx::mutex lockStateMutex;
+ Mutex lockStateMutex = MONGO_MAKE_LATCH("FSyncCommand::lockStateMutex");
stdx::condition_variable acquireFsyncLockSyncCV;
stdx::condition_variable releaseFsyncLockSyncCV;
@@ -248,11 +248,11 @@ public:
private:
void acquireLock() {
- stdx::unique_lock<stdx::mutex> lk(lockStateMutex);
+ stdx::unique_lock<Latch> lk(lockStateMutex);
_lockCount++;
if (_lockCount == 1) {
- stdx::unique_lock<stdx::mutex> lkFsyncLocked(_fsyncLockedMutex);
+ stdx::unique_lock<Latch> lkFsyncLocked(_fsyncLockedMutex);
_fsyncLocked = true;
}
}
@@ -263,7 +263,7 @@ private:
// number is decremented to 0. May only be accessed while 'lockStateMutex' is held.
int64_t _lockCount = 0;
- stdx::mutex _fsyncLockedMutex;
+ Mutex _fsyncLockedMutex = MONGO_MAKE_LATCH("FSyncCommand::_fsyncLockedMutex");
bool _fsyncLocked = false;
} fsyncCmd;
@@ -302,7 +302,7 @@ public:
Lock::ExclusiveLock lk(opCtx->lockState(), commandMutex);
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
auto lockCount = fsyncCmd.getLockCount_inLock();
if (lockCount == 0) {
@@ -340,7 +340,7 @@ bool FSyncLockThread::_shutdownTaskRegistered = false;
void FSyncLockThread::run() {
ThreadClient tc("fsyncLockWorker", getGlobalServiceContext());
stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
- stdx::unique_lock<stdx::mutex> lk(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> lk(fsyncCmd.lockStateMutex);
invariant(fsyncCmd.getLockCount_inLock() == 1);
@@ -357,7 +357,7 @@ void FSyncLockThread::run() {
if (!_shutdownTaskRegistered) {
_shutdownTaskRegistered = true;
registerShutdownTask([&] {
- stdx::unique_lock<stdx::mutex> stateLock(fsyncCmd.lockStateMutex);
+ stdx::unique_lock<Latch> stateLock(fsyncCmd.lockStateMutex);
if (fsyncCmd.getLockCount_inLock() > 0) {
warning() << "Interrupting fsync because the server is shutting down.";
while (fsyncCmd.getLockCount_inLock()) {
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 3319caa2fcf..80045f62d90 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -64,6 +64,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/durable_catalog.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/parallel.h"
#include "mongo/s/client/shard_connection.h"
@@ -72,7 +73,6 @@
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/stale_exception.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index 4f2306ad1b2..625b3aaee76 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -185,7 +185,7 @@ Status setLogComponentVerbosity(const BSONObj& bsonSettings) {
}
// for automationServiceDescription
-stdx::mutex autoServiceDescriptorMutex;
+Mutex autoServiceDescriptorMutex;
std::string autoServiceDescriptorValue;
} // namespace
@@ -436,7 +436,7 @@ Status LogComponentVerbosityServerParameter::setFromString(const std::string& st
void AutomationServiceDescriptorServerParameter::append(OperationContext*,
BSONObjBuilder& builder,
const std::string& name) {
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
if (!autoServiceDescriptorValue.empty()) {
builder.append(name, autoServiceDescriptorValue);
}
@@ -458,7 +458,7 @@ Status AutomationServiceDescriptorServerParameter::setFromString(const std::stri
<< " must be no more than " << kMaxSize << " bytes"};
{
- const stdx::lock_guard<stdx::mutex> lock(autoServiceDescriptorMutex);
+ const stdx::lock_guard<Latch> lock(autoServiceDescriptorMutex);
autoServiceDescriptorValue = str;
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index ae199964060..90bec146f42 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -67,9 +67,9 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/icu.h"
#include "mongo/util/log.h"
@@ -558,7 +558,7 @@ Status writeAuthSchemaVersionIfNeeded(OperationContext* opCtx,
return status;
}
-auto getUMCMutex = ServiceContext::declareDecoration<stdx::mutex>();
+auto getUMCMutex = ServiceContext::declareDecoration<Mutex>();
class AuthzLockGuard {
AuthzLockGuard(AuthzLockGuard&) = delete;
@@ -590,7 +590,7 @@ public:
private:
OperationContext* _opCtx;
AuthorizationManager* _authzManager;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
InvalidationMode _mode;
OID _cacheGeneration;
};
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index f90a3ace4fe..482baff8b74 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -50,7 +50,7 @@ MONGO_FAIL_POINT_DEFINE(validateCmdCollectionNotValid);
namespace {
// Protects the state below.
-stdx::mutex _validationMutex;
+Mutex _validationMutex;
// Holds the set of full `databaseName.collectionName` namespace strings in progress. Validation
// commands register themselves in this data structure so that subsequent commands on the same
@@ -140,7 +140,7 @@ public:
// Only one validation per collection can be in progress, the rest wait.
{
- stdx::unique_lock<stdx::mutex> lock(_validationMutex);
+ stdx::unique_lock<Latch> lock(_validationMutex);
try {
while (_validationsInProgress.find(nss.ns()) != _validationsInProgress.end()) {
opCtx->waitForConditionOrInterrupt(_validationNotifier, lock);
@@ -157,7 +157,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lock(_validationMutex);
+ stdx::lock_guard<Latch> lock(_validationMutex);
_validationsInProgress.erase(nss.ns());
_validationNotifier.notify_all();
});
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 5daed970104..28d5017f6ae 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/concurrency/flow_control_ticketholder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -77,7 +77,7 @@ public:
}
static std::string nameForId(ResourceId resourceId) {
- stdx::lock_guard<stdx::mutex> lk(resourceIdFactory->labelsMutex);
+ stdx::lock_guard<Latch> lk(resourceIdFactory->labelsMutex);
return resourceIdFactory->labels.at(resourceId.getHashId());
}
@@ -93,7 +93,7 @@ public:
private:
ResourceId _newResourceIdForMutex(std::string resourceLabel) {
- stdx::lock_guard<stdx::mutex> lk(labelsMutex);
+ stdx::lock_guard<Latch> lk(labelsMutex);
invariant(nextId == labels.size());
labels.push_back(std::move(resourceLabel));
@@ -104,7 +104,7 @@ private:
std::uint64_t nextId = 0;
std::vector<std::string> labels;
- stdx::mutex labelsMutex;
+ Mutex labelsMutex = MONGO_MAKE_LATCH("ResourceIdFactory::labelsMutex");
};
ResourceIdFactory* ResourceIdFactory::resourceIdFactory;
diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp
index 95c6771badf..a13df7a3ea4 100644
--- a/src/mongo/db/concurrency/d_concurrency_bm.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp
@@ -34,7 +34,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/lock_manager_test_help.h"
#include "mongo/db/storage/recovery_unit_noop.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -67,10 +67,10 @@ protected:
};
BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) {
- static stdx::mutex mtx;
+ static auto mtx = MONGO_MAKE_LATCH();
for (auto keepRunning : state) {
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
}
}
diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp
index 6f7c7df6ea5..4bedbe1995c 100644
--- a/src/mongo/db/concurrency/deferred_writer.cpp
+++ b/src/mongo/db/concurrency/deferred_writer.cpp
@@ -118,7 +118,7 @@ void DeferredWriter::_worker(InsertStatement stmt) {
return Status::OK();
});
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_numBytes -= stmt.doc.objsize();
@@ -166,7 +166,7 @@ bool DeferredWriter::insertDocument(BSONObj obj) {
// We can't insert documents if we haven't been started up.
invariant(_pool);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Check if we're allowed to insert this object.
if (_numBytes + obj.objsize() >= _maxNumBytes) {
diff --git a/src/mongo/db/concurrency/deferred_writer.h b/src/mongo/db/concurrency/deferred_writer.h
index d573f497851..0ac8238fa8d 100644
--- a/src/mongo/db/concurrency/deferred_writer.h
+++ b/src/mongo/db/concurrency/deferred_writer.h
@@ -32,7 +32,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -158,7 +158,7 @@ private:
/**
* Guards all non-const, non-thread-safe members.
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DeferredWriter::_mutex");
/**
* The number of bytes currently in the in-memory buffer.
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.cpp b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
index 8055a7597bd..6bb95797502 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.cpp
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.cpp
@@ -80,7 +80,7 @@ void FlowControlTicketholder::set(ServiceContext* service,
void FlowControlTicketholder::refreshTo(int numTickets) {
invariant(numTickets >= 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LOG(4) << "Refreshing tickets. Before: " << _tickets << " Now: " << numTickets;
_tickets = numTickets;
_cv.notify_all();
@@ -88,7 +88,7 @@ void FlowControlTicketholder::refreshTo(int numTickets) {
void FlowControlTicketholder::getTicket(OperationContext* opCtx,
FlowControlTicketholder::CurOp* stats) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
return;
}
@@ -135,7 +135,7 @@ void FlowControlTicketholder::getTicket(OperationContext* opCtx,
// Should only be called once, during shutdown.
void FlowControlTicketholder::setInShutdown() {
LOG(0) << "Stopping further Flow Control ticket acquisitions.";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
}
diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.h b/src/mongo/db/concurrency/flow_control_ticketholder.h
index 599779ddd15..39413477937 100644
--- a/src/mongo/db/concurrency/flow_control_ticketholder.h
+++ b/src/mongo/db/concurrency/flow_control_ticketholder.h
@@ -31,8 +31,8 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _totalTimeAcquiringMicros;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FlowControlTicketHolder::_mutex");
stdx::condition_variable _cv;
int _tickets;
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index 50b2116d953..e8cbfd39054 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -40,8 +40,8 @@
#include "mongo/db/concurrency/lock_request_list.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/mutex.h"
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index fd840437c89..5f5171e6129 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -220,7 +220,7 @@ void CondVarLockGrantNotification::clear() {
}
LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _cond.wait_for(
lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; })
? _result
@@ -229,7 +229,7 @@ LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) {
LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseconds timeout) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opCtx->waitForConditionOrInterruptFor(
_cond, lock, timeout, [this] { return _result != LOCK_INVALID; })) {
// Because waitForConditionOrInterruptFor evaluates the predicate before checking for
@@ -243,7 +243,7 @@ LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseco
}
void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_result == LOCK_INVALID);
_result = result;
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index 9371ba0ae13..9994e25f7be 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -75,7 +75,7 @@ private:
virtual void notify(ResourceId resId, LockResult result);
// These two go together to implement the conditional variable pattern.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CondVarLockGrantNotification::_mutex");
stdx::condition_variable _cond;
// Result from the last call to notify
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index cef1fcd7449..945dbfc9bdb 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -305,12 +305,11 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx,
CurOp::get(clientOpCtx)->reportState(infoBuilder, truncateOps);
}
- std::shared_ptr<DiagnosticInfo> diagnostic = DiagnosticInfo::Diagnostic::get(client);
- if (diagnostic && backtraceMode) {
+ if (auto diagnostic = DiagnosticInfo::get(*client)) {
BSONObjBuilder waitingForLatchBuilder(infoBuilder->subobjStart("waitingForLatch"));
waitingForLatchBuilder.append("timestamp", diagnostic->getTimestamp());
waitingForLatchBuilder.append("captureName", diagnostic->getCaptureName());
- {
+ if (backtraceMode) {
BSONArrayBuilder backtraceBuilder(waitingForLatchBuilder.subarrayStart("backtrace"));
for (const auto& frame : diagnostic->makeStackTrace().frames) {
BSONObjBuilder backtraceObj(backtraceBuilder.subobjStart());
diff --git a/src/mongo/db/database_index_builds_tracker.cpp b/src/mongo/db/database_index_builds_tracker.cpp
index 03097a4844a..4f7bb13a472 100644
--- a/src/mongo/db/database_index_builds_tracker.cpp
+++ b/src/mongo/db/database_index_builds_tracker.cpp
@@ -74,7 +74,7 @@ int DatabaseIndexBuildsTracker::getNumberOfIndexBuilds(WithLock) const {
return _allIndexBuilds.size();
}
-void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk) {
+void DatabaseIndexBuildsTracker::waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk) {
_noIndexBuildsRemainCondVar.wait(lk, [&] {
if (_allIndexBuilds.empty()) {
return true;
diff --git a/src/mongo/db/database_index_builds_tracker.h b/src/mongo/db/database_index_builds_tracker.h
index b91ab826527..372f5f08210 100644
--- a/src/mongo/db/database_index_builds_tracker.h
+++ b/src/mongo/db/database_index_builds_tracker.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/repl_index_build_state.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/uuid.h"
@@ -87,7 +87,7 @@ public:
/**
* Returns when no index builds remain on this database.
*/
- void waitUntilNoIndexBuildsRemain(stdx::unique_lock<stdx::mutex>& lk);
+ void waitUntilNoIndexBuildsRemain(stdx::unique_lock<Latch>& lk);
private:
// Map of index build states on the database, by build UUID.
diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp
index 8ae455226cf..cd9332cb92e 100644
--- a/src/mongo/db/default_baton.cpp
+++ b/src/mongo/db/default_baton.cpp
@@ -61,7 +61,7 @@ void DefaultBaton::detachImpl() noexcept {
decltype(_scheduled) scheduled;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_opCtx->getBaton().get() == this);
_opCtx->setBaton(nullptr);
@@ -79,7 +79,7 @@ void DefaultBaton::detachImpl() noexcept {
}
void DefaultBaton::schedule(Task func) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
lk.unlock();
@@ -97,14 +97,14 @@ void DefaultBaton::schedule(Task func) noexcept {
}
void DefaultBaton::notify() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_notified = true;
_cv.notify_one();
}
Waitable::TimeoutState DefaultBaton::run_until(ClockSource* clkSource,
Date_t oldDeadline) noexcept {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We'll fulfill promises and run jobs on the way out, ensuring we don't hold any locks
const auto guard = makeGuard([&] {
diff --git a/src/mongo/db/default_baton.h b/src/mongo/db/default_baton.h
index 063b12edd07..c406741e332 100644
--- a/src/mongo/db/default_baton.h
+++ b/src/mongo/db/default_baton.h
@@ -32,8 +32,8 @@
#include <vector>
#include "mongo/db/baton.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/functional.h"
namespace mongo {
@@ -62,7 +62,7 @@ public:
private:
void detachImpl() noexcept override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DefaultBaton::_mutex");
stdx::condition_variable _cv;
bool _notified = false;
bool _sleeping = false;
diff --git a/src/mongo/db/free_mon/free_mon_controller.cpp b/src/mongo/db/free_mon/free_mon_controller.cpp
index 057ae5ecd7a..a9de7ca4c49 100644
--- a/src/mongo/db/free_mon/free_mon_controller.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller.cpp
@@ -61,7 +61,7 @@ FreeMonNetworkInterface::~FreeMonNetworkInterface() = default;
void FreeMonController::addRegistrationCollector(
std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_registrationCollectors.add(std::move(collector));
@@ -70,7 +70,7 @@ void FreeMonController::addRegistrationCollector(
void FreeMonController::addMetricsCollector(std::unique_ptr<FreeMonCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_metricCollectors.add(std::move(collector));
@@ -128,7 +128,7 @@ void FreeMonController::notifyOnRollback() {
void FreeMonController::_enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -139,7 +139,7 @@ void FreeMonController::start(RegistrationType registrationType,
std::vector<std::string>& tags,
Seconds gatherMetricsInterval) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
}
@@ -154,7 +154,7 @@ void FreeMonController::start(RegistrationType registrationType,
_thread = stdx::thread([this] { _processor->run(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -170,7 +170,7 @@ void FreeMonController::stop() {
log() << "Shutting down free monitoring";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -194,7 +194,7 @@ void FreeMonController::stop() {
void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStarted);
}
@@ -205,7 +205,7 @@ void FreeMonController::turnCrankForTest(size_t countMessagesToIgnore) {
void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
@@ -218,7 +218,7 @@ void FreeMonController::getStatus(OperationContext* opCtx, BSONObjBuilder* statu
void FreeMonController::getServerStatus(OperationContext* opCtx, BSONObjBuilder* status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kStarted) {
status->append("state", "disabled");
diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h
index 9307ab7570c..5c74a8a4b5f 100644
--- a/src/mongo/db/free_mon/free_mon_controller.h
+++ b/src/mongo/db/free_mon/free_mon_controller.h
@@ -191,7 +191,7 @@ private:
State _state{State::kNotStarted};
// Mutext to protect internal state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonController::_mutex");
// Set of registration collectors
FreeMonCollectorCollection _registrationCollectors;
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index 090eed90cc9..4db1bfa3c44 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -86,7 +86,7 @@ public:
builder.append("mock", "some data");
{
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
++_counter;
@@ -105,12 +105,12 @@ public:
}
std::uint32_t count() {
- stdx::lock_guard<stdx::mutex> lck(_mutex);
+ stdx::lock_guard<Latch> lck(_mutex);
return _counter;
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -130,7 +130,7 @@ private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMetricsCollectorMock::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -158,7 +158,7 @@ public:
* Set the count of events to wait for.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_EQ(_count, 0UL);
ASSERT_GT(count, 0UL);
@@ -170,7 +170,7 @@ public:
* Set the payload and signal waiter.
*/
void set(T payload) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -187,7 +187,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<T> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(
lock, duration.toSystemDuration(), [this]() { return _count == 0; })) {
@@ -202,7 +202,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CountdownLatchResult::_mutex");
// Count to wait fore
uint32_t _count;
@@ -309,7 +309,7 @@ public:
auto cdr = req.getMetrics();
{
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
auto metrics = decompressMetrics(cdr);
_lastMetrics = metrics;
_countdownMetrics.set(metrics);
@@ -354,7 +354,7 @@ public:
}
BSONArray getLastMetrics() {
- stdx::lock_guard<stdx::mutex> lock(_metricsLock);
+ stdx::lock_guard<Latch> lock(_metricsLock);
return _lastMetrics;
}
@@ -365,7 +365,7 @@ private:
executor::ThreadPoolTaskExecutor* _threadPool;
- stdx::mutex _metricsLock;
+ Mutex _metricsLock = MONGO_MAKE_LATCH("FreeMonNetworkInterfaceMock::_metricsLock");
BSONArray _lastMetrics;
Options _options;
diff --git a/src/mongo/db/free_mon/free_mon_message.h b/src/mongo/db/free_mon/free_mon_message.h
index 71a34dd84b4..11fab7c8501 100644
--- a/src/mongo/db/free_mon/free_mon_message.h
+++ b/src/mongo/db/free_mon/free_mon_message.h
@@ -33,8 +33,8 @@
#include <vector>
#include "mongo/db/free_mon/free_mon_protocol_gen.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -292,7 +292,7 @@ public:
* Set Status and signal waiter.
*/
void set(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_set);
if (!_set) {
@@ -308,7 +308,7 @@ public:
* Returns boost::none on timeout.
*/
boost::optional<Status> wait_for(Milliseconds duration) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (!_condvar.wait_for(lock, duration.toSystemDuration(), [this]() { return _set; })) {
return {};
@@ -322,7 +322,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitableResult::_mutex");
// Indicates whether _status has been set
bool _set{false};
diff --git a/src/mongo/db/free_mon/free_mon_processor.h b/src/mongo/db/free_mon/free_mon_processor.h
index 11584595147..ab519bfb84d 100644
--- a/src/mongo/db/free_mon/free_mon_processor.h
+++ b/src/mongo/db/free_mon/free_mon_processor.h
@@ -235,7 +235,7 @@ public:
* Reset countdown latch wait for N events.
*/
void reset(uint32_t count) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
dassert(_count == 0);
dassert(count > 0);
_count = count;
@@ -245,7 +245,7 @@ public:
* Count down an event.
*/
void countDown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_count > 0) {
--_count;
@@ -259,13 +259,13 @@ public:
* Wait until the N events specified in reset have occured.
*/
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.wait(lock, [&] { return _count == 0; });
}
private:
// mutex to break count and cond var
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonCountdownLatch::_mutex");
// cond var to signal and wait on
stdx::condition_variable _condvar;
diff --git a/src/mongo/db/free_mon/free_mon_queue.cpp b/src/mongo/db/free_mon/free_mon_queue.cpp
index d2cc1115ef5..56b01eade93 100644
--- a/src/mongo/db/free_mon/free_mon_queue.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue.cpp
@@ -74,7 +74,7 @@ FreeMonMessage::~FreeMonMessage() {}
void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If we were stopped, drop messages
if (_stop) {
@@ -98,7 +98,7 @@ void FreeMonMessageQueue::enqueue(std::shared_ptr<FreeMonMessage> msg) {
boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
ClockSource* clockSource) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stop) {
return {};
}
@@ -188,7 +188,7 @@ boost::optional<std::shared_ptr<FreeMonMessage>> FreeMonMessageQueue::dequeue(
void FreeMonMessageQueue::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// We can be stopped twice in some situations:
// 1. Stop on unexpected error
@@ -204,7 +204,7 @@ void FreeMonMessageQueue::turnCrankForTest(size_t countMessagesToIgnore) {
invariant(_useCrank);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_waitable = std::make_unique<WaitableResult>();
diff --git a/src/mongo/db/free_mon/free_mon_queue.h b/src/mongo/db/free_mon/free_mon_queue.h
index 18be1b7a330..6e7bb85dcbf 100644
--- a/src/mongo/db/free_mon/free_mon_queue.h
+++ b/src/mongo/db/free_mon/free_mon_queue.h
@@ -133,7 +133,7 @@ private:
stdx::condition_variable _condvar;
// Lock for condition variable and to protect state
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FreeMonMessageQueue::_mutex");
// Indicates whether queue has been stopped.
bool _stop{false};
diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp
index db944bae91d..11f80503acd 100644
--- a/src/mongo/db/ftdc/controller.cpp
+++ b/src/mongo/db/ftdc/controller.cpp
@@ -39,8 +39,8 @@
#include "mongo/db/ftdc/collector.h"
#include "mongo/db/ftdc/util.h"
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/exit.h"
@@ -50,7 +50,7 @@
namespace mongo {
Status FTDCController::setEnabled(bool enabled) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_path.empty()) {
return Status(ErrorCodes::FTDCPathNotSet,
@@ -65,37 +65,37 @@ Status FTDCController::setEnabled(bool enabled) {
}
void FTDCController::setPeriod(Milliseconds millis) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.period = millis;
_condvar.notify_one();
}
void FTDCController::setMaxDirectorySizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxDirectorySizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxFileSizeBytes(std::uint64_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxFileSizeBytes = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerArchiveMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerArchiveMetricChunk = size;
_condvar.notify_one();
}
void FTDCController::setMaxSamplesPerInterimMetricChunk(size_t size) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_configTemp.maxSamplesPerInterimMetricChunk = size;
_condvar.notify_one();
}
Status FTDCController::setDirectory(const boost::filesystem::path& path) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_path.empty()) {
return Status(ErrorCodes::FTDCPathAlreadySet,
@@ -113,7 +113,7 @@ Status FTDCController::setDirectory(const boost::filesystem::path& path) {
void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_periodicCollectors.add(std::move(collector));
@@ -122,7 +122,7 @@ void FTDCController::addPeriodicCollector(std::unique_ptr<FTDCCollectorInterface
void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface> collector) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_rotateCollectors.add(std::move(collector));
@@ -131,7 +131,7 @@ void FTDCController::addOnRotateCollector(std::unique_ptr<FTDCCollectorInterface
BSONObj FTDCController::getMostRecentPeriodicDocument() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _mostRecentPeriodicDocument.getOwned();
}
}
@@ -144,7 +144,7 @@ void FTDCController::start() {
_thread = stdx::thread([this] { doLoop(); });
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -155,7 +155,7 @@ void FTDCController::stop() {
log() << "Shutting down full-time diagnostic data capture";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -189,7 +189,7 @@ void FTDCController::doLoop() {
try {
// Update config
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_config = _configTemp;
}
@@ -206,7 +206,7 @@ void FTDCController::doLoop() {
// Wait for the next run or signal to shutdown
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// We ignore spurious wakeups by just doing an iteration of the loop
@@ -252,7 +252,7 @@ void FTDCController::doLoop() {
// Store a reference to the most recent document from the periodic collectors
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_mostRecentPeriodicDocument = std::get<0>(collectSample);
}
}
diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h
index 5d1f2f5487a..949117cbc79 100644
--- a/src/mongo/db/ftdc/controller.h
+++ b/src/mongo/db/ftdc/controller.h
@@ -37,8 +37,8 @@
#include "mongo/db/ftdc/config.h"
#include "mongo/db/ftdc/file_manager.h"
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -187,7 +187,7 @@ private:
boost::filesystem::path _path;
// Mutex to protect the condvar, configuration changes, and most recent periodic document.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCController::_mutex");
stdx::condition_variable _condvar;
// Config settings that are used by controller, file manager, and all other classes.
diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp
index 43872f1ccd5..6788f66881b 100644
--- a/src/mongo/db/ftdc/controller_test.cpp
+++ b/src/mongo/db/ftdc/controller_test.cpp
@@ -107,7 +107,7 @@ public:
}
void wait() {
- stdx::unique_lock<stdx::mutex> lck(_mutex);
+ stdx::unique_lock<Latch> lck(_mutex);
while (_counter < _wait) {
_condvar.wait(lck);
}
@@ -133,7 +133,7 @@ private:
std::vector<BSONObj> _docs;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FTDCMetricsCollectorMockTee::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index 048163d6c6e..e8b88e91d1f 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -388,7 +388,7 @@ bool IndexBuildInterceptor::areAllWritesApplied(OperationContext* opCtx) const {
}
boost::optional<MultikeyPaths> IndexBuildInterceptor::getMultikeyPaths() const {
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
return _multikeyPaths;
}
@@ -414,7 +414,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// SERVER-39705: It's worth noting that a document may not generate any keys, but be
// described as being multikey. This step must be done to maintain parity with `validate`s
// expectations.
- stdx::unique_lock<stdx::mutex> lk(_multikeyPathMutex);
+ stdx::unique_lock<Latch> lk(_multikeyPathMutex);
if (_multikeyPaths) {
MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.get(), multikeyPaths);
} else {
diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h
index 97dca244576..01c9ba40326 100644
--- a/src/mongo/db/index/index_build_interceptor.h
+++ b/src/mongo/db/index/index_build_interceptor.h
@@ -167,7 +167,8 @@ private:
// shared resource.
std::shared_ptr<AtomicWord<long long>> _sideWritesCounter;
- mutable stdx::mutex _multikeyPathMutex;
+ mutable Mutex _multikeyPathMutex =
+ MONGO_MAKE_LATCH("IndexBuildInterceptor::_multikeyPathMutex");
boost::optional<MultikeyPaths> _multikeyPaths;
};
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index d4e6da3ba2b..92127b4253a 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -240,7 +240,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::startIndexRe
/*commitQuorum=*/boost::none);
Status status = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _registerIndexBuild(lk, replIndexBuildState);
}();
if (!status.isOK()) {
@@ -276,7 +276,7 @@ Future<void> IndexBuildsCoordinator::joinIndexBuilds(const NamespaceString& nss,
}
void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// All index builds should have been signaled to stop via the ServiceContext.
@@ -291,7 +291,7 @@ void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown() {
void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUUID,
const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the collection.
auto it = _disallowedCollections.find(collectionUUID);
@@ -311,7 +311,7 @@ void IndexBuildsCoordinator::abortCollectionIndexBuilds(const UUID& collectionUU
}
void IndexBuildsCoordinator::abortDatabaseIndexBuilds(StringData db, const std::string& reason) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Ensure the caller correctly stopped any new index builds on the database.
auto it = _disallowedDbs.find(db);
@@ -343,7 +343,7 @@ void IndexBuildsCoordinator::recoverIndexBuilds() {
}
int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -353,7 +353,7 @@ int IndexBuildsCoordinator::numInProgForDb(StringData db) const {
}
void IndexBuildsCoordinator::dump(std::ostream& ss) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_collectionIndexBuilds.size()) {
ss << "\n<b>Background Jobs in Progress</b>\n";
@@ -370,17 +370,17 @@ void IndexBuildsCoordinator::dump(std::ostream& ss) const {
}
bool IndexBuildsCoordinator::inProgForCollection(const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _collectionIndexBuilds.find(collectionUUID) != _collectionIndexBuilds.end();
}
bool IndexBuildsCoordinator::inProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _databaseIndexBuilds.find(db) != _databaseIndexBuilds.end();
}
void IndexBuildsCoordinator::assertNoIndexBuildInProgress() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
str::stream() << "cannot perform operation: there are currently "
<< _allIndexBuilds.size() << " index builds running.",
@@ -406,7 +406,7 @@ void IndexBuildsCoordinator::assertNoBgOpInProgForDb(StringData db) const {
void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
const UUID& collectionUUID) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collIndexBuildsIt = _collectionIndexBuilds.find(collectionUUID);
if (collIndexBuildsIt == _collectionIndexBuilds.end()) {
@@ -420,7 +420,7 @@ void IndexBuildsCoordinator::awaitNoIndexBuildInProgressForCollection(
}
void IndexBuildsCoordinator::awaitNoBgOpInProgForDb(StringData db) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto dbIndexBuildsIt = _databaseIndexBuilds.find(db);
if (dbIndexBuildsIt == _databaseIndexBuilds.end()) {
@@ -438,7 +438,7 @@ void IndexBuildsCoordinator::onReplicaSetReconfig() {
}
void IndexBuildsCoordinator::sleepIndexBuilds_forTestOnly(bool sleep) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sleepForTest = sleep;
}
@@ -576,7 +576,7 @@ IndexBuildsCoordinator::_registerAndSetUpIndexBuild(
// Lock from when we ascertain what indexes to build through to when the build is registered
// on the Coordinator and persistedly set up in the catalog. This serializes setting up an
// index build so that no attempts are made to register the same build twice.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
std::vector<BSONObj> filteredSpecs;
try {
@@ -694,7 +694,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
const UUID& buildUUID,
const IndexBuildOptions& indexBuildOptions) noexcept {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_sleepForTest) {
lk.unlock();
sleepmillis(100);
@@ -703,7 +703,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
}
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -735,7 +735,7 @@ void IndexBuildsCoordinator::_runIndexBuild(OperationContext* opCtx,
// Ensure the index build is unregistered from the Coordinator and the Promise is set with
// the build's result so that callers are notified of the outcome.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
@@ -1040,7 +1040,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(opCtx->lockState()->isW());
auto replState = [&] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -1102,7 +1102,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
invariant(indexCatalogStats.numIndexesBefore == indexCatalogStats.numIndexesAfter);
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unregisterIndexBuild(lk, replState);
}
@@ -1113,7 +1113,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::_runIndexReb
}
void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
if (it != _disallowedDbs.end()) {
@@ -1124,7 +1124,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
if (it != _disallowedCollections.end()) {
@@ -1135,7 +1135,7 @@ void IndexBuildsCoordinator::_stopIndexBuildsOnCollection(const UUID& collection
}
void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedDbs.find(dbName);
invariant(it != _disallowedDbs.end());
@@ -1146,7 +1146,7 @@ void IndexBuildsCoordinator::_allowIndexBuildsOnDatabase(StringData dbName) {
}
void IndexBuildsCoordinator::_allowIndexBuildsOnCollection(const UUID& collectionUUID) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _disallowedCollections.find(collectionUUID);
invariant(it != _disallowedCollections.end());
diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h
index 7ab40ef1e85..19bf083689a 100644
--- a/src/mongo/db/index_builds_coordinator.h
+++ b/src/mongo/db/index_builds_coordinator.h
@@ -43,8 +43,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl_index_build_state.h"
#include "mongo/db/storage/durable_catalog.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/future.h"
@@ -414,7 +414,7 @@ protected:
const UUID& buildUUID) noexcept;
// Protects the below state.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("IndexBuildsCoordinator::_mutex");
// New index builds are not allowed on a collection or database if the collection or database is
// in either of these maps. These are used when concurrent operations need to abort index builds
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 31a9859b38c..5a2f3686c81 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -111,7 +111,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
}
auto replState = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
@@ -172,7 +172,7 @@ IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
](auto status) noexcept {
// Clean up the index build if we failed to schedule it.
if (!status.isOK()) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Unregister the index build before setting the promises,
// so callers do not see the build again.
@@ -249,7 +249,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
UUID collectionUUID = collection->uuid();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto collectionIt = _collectionIndexBuilds.find(collectionUUID);
if (collectionIt == _collectionIndexBuilds.end()) {
return Status(ErrorCodes::IndexNotFound,
diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp
index c97697aea41..0e57d6b091a 100644
--- a/src/mongo/db/keys_collection_cache.cpp
+++ b/src/mongo/db/keys_collection_cache.cpp
@@ -47,7 +47,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
decltype(_cache)::size_type originalSize = 0;
{
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.crbegin();
if (iter != _cache.crend()) {
newerThanThis = iter->second.getExpiresAt();
@@ -73,7 +73,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
auto& newKeys = refreshStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (originalSize > _cache.size()) {
// _cache cleared while we getting the new keys, just return the newest key without
// touching the _cache so the next refresh will populate it properly.
@@ -96,7 +96,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::refresh(OperationContext
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long keyId,
const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
for (auto iter = _cache.lower_bound(forThisTime); iter != _cache.cend(); ++iter) {
if (iter->second.getKeyId() == keyId) {
@@ -111,7 +111,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKeyById(long long key
}
StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime& forThisTime) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto iter = _cache.upper_bound(forThisTime);
@@ -126,7 +126,7 @@ StatusWith<KeysCollectionDocument> KeysCollectionCache::getKey(const LogicalTime
void KeysCollectionCache::resetCache() {
// keys that read with non majority readConcern level can be rolled back.
if (!_client->supportsMajorityReads()) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
_cache.clear();
}
}
diff --git a/src/mongo/db/keys_collection_cache.h b/src/mongo/db/keys_collection_cache.h
index 28d72892277..61989d6ae5b 100644
--- a/src/mongo/db/keys_collection_cache.h
+++ b/src/mongo/db/keys_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/base/status_with.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -68,7 +68,7 @@ private:
const std::string _purpose;
KeysCollectionClient* const _client;
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("KeysCollectionCache::_cacheMutex");
std::map<LogicalTime, KeysCollectionDocument> _cache; // expiresAt -> KeysDocument
};
diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp
index bbad4f450c6..155c52b163c 100644
--- a/src/mongo/db/keys_collection_manager.cpp
+++ b/src/mongo/db/keys_collection_manager.cpp
@@ -193,7 +193,7 @@ void KeysCollectionManager::clearCache() {
void KeysCollectionManager::PeriodicRunner::refreshNow(OperationContext* opCtx) {
auto refreshRequest = [this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
uasserted(ErrorCodes::ShutdownInProgress,
@@ -227,7 +227,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
unsigned errorCount = 0;
std::shared_ptr<RefreshFunc> doRefresh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
@@ -250,7 +250,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
auto currentTime = LogicalClock::get(service)->getClusterTime();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_hasSeenKeys = true;
}
@@ -269,7 +269,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
nextWakeup = std::min(nextWakeup, Milliseconds(data["overrideMS"].numberInt()));
});
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
if (!hasRefreshRequestInitially) {
@@ -297,7 +297,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_refreshRequest) {
_refreshRequest->set();
_refreshRequest.reset();
@@ -305,7 +305,7 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s
}
void KeysCollectionManager::PeriodicRunner::setFunc(RefreshFunc newRefreshStrategy) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_doRefresh = std::make_shared<RefreshFunc>(std::move(newRefreshStrategy));
_refreshNeededCV.notify_all();
}
@@ -318,7 +318,7 @@ void KeysCollectionManager::PeriodicRunner::switchFunc(OperationContext* opCtx,
void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
const std::string& threadName,
Milliseconds refreshInterval) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_backgroundThread.joinable());
invariant(!_inShutdown);
@@ -329,7 +329,7 @@ void KeysCollectionManager::PeriodicRunner::start(ServiceContext* service,
void KeysCollectionManager::PeriodicRunner::stop() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_backgroundThread.joinable()) {
return;
}
@@ -343,7 +343,7 @@ void KeysCollectionManager::PeriodicRunner::stop() {
}
bool KeysCollectionManager::PeriodicRunner::hasSeenKeys() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _hasSeenKeys;
}
diff --git a/src/mongo/db/keys_collection_manager.h b/src/mongo/db/keys_collection_manager.h
index c220e1ba990..1131b7c3612 100644
--- a/src/mongo/db/keys_collection_manager.h
+++ b/src/mongo/db/keys_collection_manager.h
@@ -37,7 +37,7 @@
#include "mongo/db/keys_collection_cache.h"
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/keys_collection_manager_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/duration.h"
@@ -169,7 +169,8 @@ private:
std::string threadName,
Milliseconds refreshInterval);
- stdx::mutex _mutex; // protects all the member variables below.
+ // protects all the member variables below.
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicRunner::_mutex");
std::shared_ptr<Notification<void>> _refreshRequest;
stdx::condition_variable _refreshNeededCV;
diff --git a/src/mongo/db/logical_clock.cpp b/src/mongo/db/logical_clock.cpp
index 415566094d2..5cc982465d9 100644
--- a/src/mongo/db/logical_clock.cpp
+++ b/src/mongo/db/logical_clock.cpp
@@ -76,12 +76,12 @@ void LogicalClock::set(ServiceContext* service, std::unique_ptr<LogicalClock> cl
LogicalClock::LogicalClock(ServiceContext* service) : _service(service) {}
LogicalTime LogicalClock::getClusterTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _clusterTime;
}
Status LogicalClock::advanceClusterTime(const LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rateLimitStatus = _passesRateLimiter_inlock(newTime);
if (!rateLimitStatus.isOK()) {
@@ -99,7 +99,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
invariant(nTicks > 0 && nTicks <= kMaxSignedInt);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
LogicalTime clusterTime = _clusterTime;
@@ -142,7 +142,7 @@ LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {
}
void LogicalClock::setClusterTimeFromTrustedSource(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Rate limit checks are skipped here so a server with no activity for longer than
// maxAcceptableLogicalClockDriftSecs seconds can still have its cluster time initialized.
@@ -177,12 +177,12 @@ Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
}
bool LogicalClock::isEnabled() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isEnabled;
}
void LogicalClock::disable() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_isEnabled = false;
}
diff --git a/src/mongo/db/logical_clock.h b/src/mongo/db/logical_clock.h
index c6cebe983d8..28191be87f6 100644
--- a/src/mongo/db/logical_clock.h
+++ b/src/mongo/db/logical_clock.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class ServiceContext;
@@ -107,7 +107,7 @@ private:
ServiceContext* const _service;
// The mutex protects _clusterTime and _isEnabled.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalClock::_mutex");
LogicalTime _clusterTime;
bool _isEnabled{true};
};
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index 8afd9f6889f..17b136f566a 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -116,7 +116,7 @@ Status LogicalSessionCacheImpl::reapNow(Client* client) {
}
size_t LogicalSessionCacheImpl::size() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _activeSessions.size();
}
@@ -140,7 +140,7 @@ void LogicalSessionCacheImpl::_periodicReap(Client* client) {
Status LogicalSessionCacheImpl::_reap(Client* client) {
// Take the lock to update some stats.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the last set of stats for our new run.
_stats.setLastTransactionReaperJobDurationMillis(0);
@@ -187,7 +187,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
Minutes(gTransactionRecordMinimumLifetimeMinutes));
} catch (const DBException& ex) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
}
@@ -196,7 +196,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastTransactionReaperJobTimestamp();
_stats.setLastTransactionReaperJobDurationMillis(millis.count());
_stats.setLastTransactionReaperJobEntriesCleanedUp(numReaped);
@@ -208,7 +208,7 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
void LogicalSessionCacheImpl::_refresh(Client* client) {
// Stats for serverStatus:
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Clear the refresh-related stats with the beginning of our run.
_stats.setLastSessionsCollectionJobDurationMillis(0);
@@ -223,7 +223,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// This will finish timing _refresh for our stats no matter when we return.
const auto timeRefreshJob = makeGuard([this] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto millis = _service->now() - _stats.getLastSessionsCollectionJobTimestamp();
_stats.setLastSessionsCollectionJobDurationMillis(millis.count());
});
@@ -255,7 +255,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
{
using std::swap;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
swap(explicitlyEndingSessions, _endingSessions);
swap(activeSessions, _activeSessions);
}
@@ -264,7 +264,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// swapped out of LogicalSessionCache, and merges in any records that had been added since we
// swapped them out.
auto backSwap = [this](auto& member, auto& temp) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
using std::swap;
swap(member, temp);
for (const auto& it : temp) {
@@ -300,7 +300,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->refreshSessions(opCtx, activeSessionRecords));
activeSessionsBackSwapper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesRefreshed(activeSessionRecords.size());
}
@@ -308,7 +308,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
uassertStatusOK(_sessionsColl->removeRecords(opCtx, explicitlyEndingSessions));
explicitlyEndingBackSwaper.dismiss();
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobEntriesEnded(explicitlyEndingSessions.size());
}
@@ -321,7 +321,7 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
// Exclude sessions added to _activeSessions from the openCursorSession to avoid race between
// killing cursors on the removed sessions and creating sessions.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& it : _activeSessions) {
auto newSessionIt = openCursorSessions.find(it.first);
@@ -351,18 +351,18 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
SessionKiller::Matcher matcher(std::move(patterns));
auto killRes = _service->killCursorsWithMatchingSessions(opCtx, std::move(matcher));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setLastSessionsCollectionJobCursorsClosed(killRes.second);
}
}
void LogicalSessionCacheImpl::endSessions(const LogicalSessionIdSet& sessions) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_endingSessions.insert(begin(sessions), end(sessions));
}
LogicalSessionCacheStats LogicalSessionCacheImpl::getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stats.setActiveSessionsCount(_activeSessions.size());
return _stats;
}
@@ -380,7 +380,7 @@ Status LogicalSessionCacheImpl::_addToCache(WithLock, LogicalSessionRecord recor
}
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
ret.reserve(_activeSessions.size());
for (const auto& id : _activeSessions) {
@@ -391,7 +391,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds() const {
std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
const std::vector<SHA256Block>& userDigests) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<LogicalSessionId> ret;
for (const auto& it : _activeSessions) {
if (std::find(userDigests.cbegin(), userDigests.cend(), it.first.getUid()) !=
@@ -404,7 +404,7 @@ std::vector<LogicalSessionId> LogicalSessionCacheImpl::listIds(
boost::optional<LogicalSessionRecord> LogicalSessionCacheImpl::peekCached(
const LogicalSessionId& id) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _activeSessions.find(id);
if (it == _activeSessions.end()) {
return boost::none;
diff --git a/src/mongo/db/logical_session_cache_impl.h b/src/mongo/db/logical_session_cache_impl.h
index dcc827a98ef..c92e45fee4a 100644
--- a/src/mongo/db/logical_session_cache_impl.h
+++ b/src/mongo/db/logical_session_cache_impl.h
@@ -109,7 +109,7 @@ private:
const std::shared_ptr<SessionsCollection> _sessionsColl;
const ReapSessionsOlderThanFn _reapSessionsOlderThanFn;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("LogicalSessionCacheImpl::_mutex");
LogicalSessionIdMap<LogicalSessionRecord> _activeSessions;
diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp
index 66136950343..df814663ff6 100644
--- a/src/mongo/db/logical_time_validator.cpp
+++ b/src/mongo/db/logical_time_validator.cpp
@@ -51,7 +51,7 @@ namespace {
const auto getLogicalClockValidator =
ServiceContext::declareDecoration<std::unique_ptr<LogicalTimeValidator>>();
-stdx::mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
+Mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator.
std::vector<Privilege> advanceClusterTimePrivilege;
@@ -67,7 +67,7 @@ Milliseconds kRefreshIntervalIfErrored(200);
} // unnamed namespace
LogicalTimeValidator* LogicalTimeValidator::get(ServiceContext* service) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
return getLogicalClockValidator(service).get();
}
@@ -77,7 +77,7 @@ LogicalTimeValidator* LogicalTimeValidator::get(OperationContext* ctx) {
void LogicalTimeValidator::set(ServiceContext* service,
std::unique_ptr<LogicalTimeValidator> newValidator) {
- stdx::lock_guard<stdx::mutex> lk(validatorMutex);
+ stdx::lock_guard<Latch> lk(validatorMutex);
auto& validator = getLogicalClockValidator(service);
validator = std::move(newValidator);
}
@@ -91,7 +91,7 @@ SignedLogicalTime LogicalTimeValidator::_getProof(const KeysCollectionDocument&
// Compare and calculate HMAC inside mutex to prevent multiple threads computing HMAC for the
// same cluster time.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: _lastSeenValidTime will initially not have a proof set.
if (newTime == _lastSeenValidTime.getTime() && _lastSeenValidTime.getProof()) {
return _lastSeenValidTime;
@@ -143,7 +143,7 @@ SignedLogicalTime LogicalTimeValidator::signLogicalTime(OperationContext* opCtx,
Status LogicalTimeValidator::validate(OperationContext* opCtx, const SignedLogicalTime& newTime) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (newTime.getTime() <= _lastSeenValidTime.getTime()) {
return Status::OK();
}
@@ -173,7 +173,7 @@ void LogicalTimeValidator::init(ServiceContext* service) {
}
void LogicalTimeValidator::shutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
if (_keyManager) {
_keyManager->stopMonitoring();
}
@@ -198,23 +198,23 @@ bool LogicalTimeValidator::shouldGossipLogicalTime() {
void LogicalTimeValidator::resetKeyManagerCache() {
log() << "Resetting key manager cache";
{
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
invariant(_keyManager);
_keyManager->clearCache();
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
}
void LogicalTimeValidator::stopKeyManager() {
- stdx::lock_guard<stdx::mutex> keyManagerLock(_mutexKeyManager);
+ stdx::lock_guard<Latch> keyManagerLock(_mutexKeyManager);
if (_keyManager) {
log() << "Stopping key manager";
_keyManager->stopMonitoring();
_keyManager->clearCache();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastSeenValidTime = SignedLogicalTime();
_timeProofService.resetCache();
} else {
@@ -223,7 +223,7 @@ void LogicalTimeValidator::stopKeyManager() {
}
std::shared_ptr<KeysCollectionManager> LogicalTimeValidator::_getKeyManagerCopy() {
- stdx::lock_guard<stdx::mutex> lk(_mutexKeyManager);
+ stdx::lock_guard<Latch> lk(_mutexKeyManager);
invariant(_keyManager);
return _keyManager;
}
diff --git a/src/mongo/db/logical_time_validator.h b/src/mongo/db/logical_time_validator.h
index b87ff47436e..e639b4435d8 100644
--- a/src/mongo/db/logical_time_validator.h
+++ b/src/mongo/db/logical_time_validator.h
@@ -33,7 +33,7 @@
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -122,8 +122,9 @@ private:
SignedLogicalTime _getProof(const KeysCollectionDocument& keyDoc, LogicalTime newTime);
- stdx::mutex _mutex; // protects _lastSeenValidTime
- stdx::mutex _mutexKeyManager; // protects _keyManager
+ Mutex _mutex = MONGO_MAKE_LATCH("LogicalTimeValidator::_mutex"); // protects _lastSeenValidTime
+ Mutex _mutexKeyManager =
+ MONGO_MAKE_LATCH("LogicalTimevalidator::_mutexKeyManager"); // protects _keyManager
SignedLogicalTime _lastSeenValidTime;
TimeProofService _timeProofService;
std::shared_ptr<KeysCollectionManager> _keyManager;
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 8a2fbca0e7b..c471c870fe1 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/client.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/baton.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/clock_source.h"
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index 025e8f7c1f3..a834f89ae1d 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -41,8 +41,8 @@
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/session.h"
#include "mongo/util/decorable.h"
#include "mongo/util/interruptible.h"
diff --git a/src/mongo/db/operation_context_group.cpp b/src/mongo/db/operation_context_group.cpp
index c3f46ea9f9f..bb215d21095 100644
--- a/src/mongo/db/operation_context_group.cpp
+++ b/src/mongo/db/operation_context_group.cpp
@@ -61,7 +61,7 @@ OperationContextGroup::Context::Context(OperationContext& ctx, OperationContextG
void OperationContextGroup::Context::discard() {
if (!_movedFrom) {
- stdx::lock_guard<stdx::mutex> lk(_ctxGroup._lock);
+ stdx::lock_guard<Latch> lk(_ctxGroup._lock);
auto it = find(_ctxGroup._contexts, &_opCtx);
_ctxGroup._contexts.erase(it);
_movedFrom = true;
@@ -77,7 +77,7 @@ auto OperationContextGroup::makeOperationContext(Client& client) -> Context {
auto OperationContextGroup::adopt(UniqueOperationContext opCtx) -> Context {
auto cp = opCtx.get();
invariant(cp);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_contexts.emplace_back(std::move(opCtx));
return Context(*cp, *this);
}
@@ -87,7 +87,7 @@ auto OperationContextGroup::take(Context ctx) -> Context {
return ctx;
}
{
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
auto it = find(ctx._ctxGroup._contexts, &ctx._opCtx);
_contexts.emplace_back(std::move(*it));
ctx._ctxGroup._contexts.erase(it);
@@ -98,14 +98,14 @@ auto OperationContextGroup::take(Context ctx) -> Context {
void OperationContextGroup::interrupt(ErrorCodes::Error code) {
invariant(code);
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
for (auto&& uniqueOperationContext : _contexts) {
interruptOne(uniqueOperationContext.get(), code);
}
}
bool OperationContextGroup::isEmpty() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _contexts.empty();
}
diff --git a/src/mongo/db/operation_context_group.h b/src/mongo/db/operation_context_group.h
index 189069cdb17..0de0792e269 100644
--- a/src/mongo/db/operation_context_group.h
+++ b/src/mongo/db/operation_context_group.h
@@ -32,7 +32,7 @@
#include "mongo/db/client.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -96,7 +96,7 @@ public:
private:
friend class Context;
- stdx::mutex _lock;
+ Mutex _lock = MONGO_MAKE_LATCH("OperationContextGroup::_lock");
std::vector<UniqueOperationContext> _contexts;
}; // class OperationContextGroup
diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp
index 43b4a51df5d..d805541218c 100644
--- a/src/mongo/db/operation_context_test.cpp
+++ b/src/mongo/db/operation_context_test.cpp
@@ -254,9 +254,9 @@ public:
}
void checkForInterruptForTimeout(OperationContext* opCtx) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
opCtx->waitForConditionOrInterrupt(cv, lk);
}
@@ -334,18 +334,18 @@ TEST_F(OperationDeadlineTests, VeryLargeRelativeDeadlinesNanoseconds) {
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCV) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::ExceededTimeLimit, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForMaxTimeExpiredCVWithWaitUntilSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(
ErrorCodes::ExceededTimeLimit,
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now() + Seconds{10})
@@ -598,17 +598,17 @@ TEST_F(OperationDeadlineTests, DeadlineAfterRunWithoutInterruptDoesntSeeUnviolat
TEST_F(OperationDeadlineTests, WaitForKilledOpCV) {
auto opCtx = client->makeOperationContext();
opCtx->markKilled();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_EQ(ErrorCodes::Interrupted, opCtx->waitForConditionOrInterruptNoAssert(cv, lk));
}
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -617,9 +617,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCV) {
TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now() + Seconds{10}, ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(stdx::cv_status::timeout ==
unittest::assertGet(
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now())));
@@ -627,9 +627,9 @@ TEST_F(OperationDeadlineTests, WaitForUntilExpiredCVWithMaxTimeSet) {
TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
auto opCtx = client->makeOperationContext();
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT_FALSE(opCtx->waitForConditionOrInterruptFor(
cv, lk, Milliseconds(-1000), []() -> bool { return false; }));
}
@@ -637,9 +637,9 @@ TEST_F(OperationDeadlineTests, WaitForDurationExpired) {
TEST_F(OperationDeadlineTests, DuringWaitMaxTimeExpirationDominatesUntilExpiration) {
auto opCtx = client->makeOperationContext();
opCtx->setDeadlineByDate(mockClock->now(), ErrorCodes::ExceededTimeLimit);
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
ASSERT(ErrorCodes::ExceededTimeLimit ==
opCtx->waitForConditionOrInterruptNoAssertUntil(cv, lk, mockClock->now()));
}
@@ -648,17 +648,17 @@ class ThreadedOperationDeadlineTests : public OperationDeadlineTests {
public:
using CvPred = std::function<bool()>;
using WaitFn = std::function<bool(
- OperationContext*, stdx::condition_variable&, stdx::unique_lock<stdx::mutex>&, CvPred)>;
+ OperationContext*, stdx::condition_variable&, stdx::unique_lock<Latch>&, CvPred)>;
struct WaitTestState {
void signal() {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
invariant(!isSignaled);
isSignaled = true;
cv.notify_all();
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("WaitTestState::mutex");
stdx::condition_variable cv;
bool isSignaled = false;
};
@@ -674,7 +674,7 @@ public:
opCtx->setDeadlineByDate(maxTime, ErrorCodes::ExceededTimeLimit);
}
auto predicate = [state] { return state->isSignaled; };
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
barrier->countDownAndWait();
return waitFn(opCtx, state->cv, lk, predicate);
});
@@ -684,7 +684,7 @@ public:
// Now we know that the waiter task must own the mutex, because it does not signal the
// barrier until it does.
- stdx::lock_guard<stdx::mutex> lk(state->mutex);
+ stdx::lock_guard<Latch> lk(state->mutex);
// Assuming that opCtx has not already been interrupted and that maxTime and until are
// unexpired, we know that the waiter must be blocked in the condition variable, because it
@@ -699,7 +699,7 @@ public:
Date_t maxTime) {
const auto waitFn = [until](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
if (until < Date_t::max()) {
return opCtx->waitForConditionOrInterruptUntil(cv, lk, until, predicate);
@@ -718,7 +718,7 @@ public:
Date_t maxTime) {
const auto waitFn = [duration](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
return opCtx->waitForConditionOrInterruptFor(cv, lk, duration, predicate);
};
@@ -735,7 +735,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepUntil](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepUntil(sleepUntil);
@@ -752,7 +752,7 @@ public:
Date_t maxTime) {
auto waitFn = [sleepFor](OperationContext* opCtx,
stdx::condition_variable& cv,
- stdx::unique_lock<stdx::mutex>& lk,
+ stdx::unique_lock<Latch>& lk,
CvPred predicate) {
lk.unlock();
opCtx->sleepFor(sleepFor);
@@ -956,9 +956,9 @@ TEST(OperationContextTest, TestWaitForConditionOrInterruptNoAssertUntilAPI) {
auto client = serviceCtx->makeClient("OperationContextTest");
auto opCtx = client->makeOperationContext();
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Case (2). Expect a Status::OK with a cv_status::timeout.
Date_t deadline = Date_t::now() + Milliseconds(500);
diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp
index 27832209b69..2d45b49747c 100644
--- a/src/mongo/db/operation_time_tracker.cpp
+++ b/src/mongo/db/operation_time_tracker.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/operation_time_tracker.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace {
@@ -51,12 +51,12 @@ std::shared_ptr<OperationTimeTracker> OperationTimeTracker::get(OperationContext
}
LogicalTime OperationTimeTracker::getMaxOperationTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _maxOperationTime;
}
void OperationTimeTracker::updateOperationTime(LogicalTime newTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (newTime > _maxOperationTime) {
_maxOperationTime = std::move(newTime);
}
diff --git a/src/mongo/db/operation_time_tracker.h b/src/mongo/db/operation_time_tracker.h
index 45b06ccac6e..a259ee22d37 100644
--- a/src/mongo/db/operation_time_tracker.h
+++ b/src/mongo/db/operation_time_tracker.h
@@ -31,7 +31,7 @@
#include "mongo/db/logical_time.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -57,7 +57,7 @@ public:
private:
// protects _maxOperationTime
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OperationTimeTracker::_mutex");
LogicalTime _maxOperationTime;
};
diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
index 88bf08d7ee5..f372db87226 100644
--- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
+++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -55,7 +55,7 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToAbortExpiredTransactions>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PeriodicThreadToAbortExpiredTransactions::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
index 81c46260f34..1705d2d01d1 100644
--- a/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
+++ b/src/mongo/db/periodic_runner_job_decrease_snapshot_cache_pressure.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -59,7 +59,8 @@ private:
inline static const auto _serviceDecoration =
ServiceContext::declareDecoration<PeriodicThreadToDecreaseSnapshotHistoryCachePressure>();
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex =
+ MONGO_MAKE_LATCH("PeriodicThreadToDecreaseSnapshotHistoryCachePressure::_mutex");
std::shared_ptr<PeriodicJobAnchor> _anchor;
};
diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp
index 76ca6839e96..9e8e37e97cd 100644
--- a/src/mongo/db/pipeline/document_source_exchange.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange.cpp
@@ -48,13 +48,13 @@ MONGO_FAIL_POINT_DEFINE(exchangeFailLoadNextBatch);
class MutexAndResourceLock {
OperationContext* _opCtx;
ResourceYielder* _resourceYielder;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
public:
// Must be constructed with the mutex held. 'yielder' may be null if there are no resources
// which need to be yielded while waiting.
MutexAndResourceLock(OperationContext* opCtx,
- stdx::unique_lock<stdx::mutex> m,
+ stdx::unique_lock<Latch> m,
ResourceYielder* yielder)
: _opCtx(opCtx), _resourceYielder(yielder), _lock(std::move(m)) {
invariant(_lock.owns_lock());
@@ -78,7 +78,7 @@ public:
* Releases ownership of the lock to the caller. May only be called when the mutex is held
* (after a call to unlock(), for example).
*/
- stdx::unique_lock<stdx::mutex> releaseLockOwnership() {
+ stdx::unique_lock<Latch> releaseLockOwnership() {
invariant(_lock.owns_lock());
return std::move(_lock);
}
@@ -280,7 +280,7 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx,
size_t consumerId,
ResourceYielder* resourceYielder) {
// Grab a lock.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (;;) {
// Guard against some of the trickiness we do with moving the lock to/from the
@@ -438,7 +438,7 @@ size_t Exchange::getTargetConsumer(const Document& input) {
}
void Exchange::dispose(OperationContext* opCtx, size_t consumerId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_disposeRunDown < getConsumers());
diff --git a/src/mongo/db/pipeline/document_source_exchange.h b/src/mongo/db/pipeline/document_source_exchange.h
index e30d66698d1..df423ff28bb 100644
--- a/src/mongo/db/pipeline/document_source_exchange.h
+++ b/src/mongo/db/pipeline/document_source_exchange.h
@@ -36,8 +36,8 @@
#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/exchange_spec_gen.h"
#include "mongo/db/pipeline/field_path.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -170,8 +170,8 @@ private:
std::unique_ptr<Pipeline, PipelineDeleter> _pipeline;
// Synchronization.
- stdx::mutex _mutex;
- stdx::condition_variable_any _haveBufferSpace;
+ Mutex _mutex = MONGO_MAKE_LATCH("Exchange::_mutex");
+ stdx::condition_variable _haveBufferSpace;
// A thread that is currently loading the exchange buffers.
size_t _loadingThreadId{kInvalidThreadId};
diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp
index c6a5c4945fd..38145832b22 100644
--- a/src/mongo/db/pipeline/document_source_exchange_test.cpp
+++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp
@@ -65,7 +65,7 @@ namespace {
*/
class MutexYielder : public ResourceYielder {
public:
- MutexYielder(stdx::mutex* mutex) : _lock(*mutex, stdx::defer_lock) {}
+ MutexYielder(Mutex* mutex) : _lock(*mutex, stdx::defer_lock) {}
void yield(OperationContext* opCtx) override {
_lock.unlock();
@@ -75,12 +75,12 @@ public:
_lock.lock();
}
- stdx::unique_lock<stdx::mutex>& getLock() {
+ stdx::unique_lock<Latch>& getLock() {
return _lock;
}
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
};
/**
@@ -523,11 +523,10 @@ TEST_F(DocumentSourceExchangeTest, RandomExchangeNConsumerResourceYielding) {
// thread holds this while it calls getNext(). This is to simulate the case where a thread may
// hold some "real" resources which need to be yielded while waiting, such as the Session, or
// the locks held in a transaction.
- stdx::mutex artificalGlobalMutex;
+ auto artificalGlobalMutex = MONGO_MAKE_LATCH();
boost::intrusive_ptr<Exchange> ex =
new Exchange(std::move(spec), unittest::assertGet(Pipeline::create({source}, getExpCtx())));
-
std::vector<ThreadInfo> threads;
for (size_t idx = 0; idx < nConsumers; ++idx) {
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index 5dc3778ecc6..9d03e0b1012 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -553,7 +553,7 @@ Status PlanCache::set(const CanonicalQuery& query,
const auto key = computeKey(query);
const size_t newWorks = why->stats[0]->common.works;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
bool isNewEntryActive = false;
uint32_t queryHash;
uint32_t planCacheKey;
@@ -608,7 +608,7 @@ void PlanCache::deactivate(const CanonicalQuery& query) {
}
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -625,7 +625,7 @@ PlanCache::GetResult PlanCache::get(const CanonicalQuery& query) const {
}
PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry = nullptr;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -642,7 +642,7 @@ PlanCache::GetResult PlanCache::get(const PlanCacheKey& key) const {
Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
PlanCacheKey ck = computeKey(cq);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(ck, &entry);
if (!cacheStatus.isOK()) {
@@ -659,12 +659,12 @@ Status PlanCache::feedback(const CanonicalQuery& cq, double score) {
}
Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.remove(computeKey(canonicalQuery));
}
void PlanCache::clear() {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
_cache.clear();
}
@@ -679,7 +679,7 @@ PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQuery& query) const {
PlanCacheKey key = computeKey(query);
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
PlanCacheEntry* entry;
Status cacheStatus = _cache.get(key, &entry);
if (!cacheStatus.isOK()) {
@@ -691,7 +691,7 @@ StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQ
}
std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
std::vector<std::unique_ptr<PlanCacheEntry>> entries;
for (auto&& cacheEntry : _cache) {
@@ -703,7 +703,7 @@ std::vector<std::unique_ptr<PlanCacheEntry>> PlanCache::getAllEntries() const {
}
size_t PlanCache::size() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
return _cache.size();
}
@@ -715,7 +715,7 @@ std::vector<BSONObj> PlanCache::getMatchingStats(
const std::function<BSONObj(const PlanCacheEntry&)>& serializationFunc,
const std::function<bool(const BSONObj&)>& filterFunc) const {
std::vector<BSONObj> results;
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ stdx::lock_guard<Latch> cacheLock(_cacheMutex);
for (auto&& cacheEntry : _cache) {
const auto entry = cacheEntry.second;
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 8fc9b6bf3fe..06e648be653 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -39,7 +39,7 @@
#include "mongo/db/query/plan_cache_indexability.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/container_size_helper.h"
namespace mongo {
@@ -605,7 +605,7 @@ private:
LRUKeyValue<PlanCacheKey, PlanCacheEntry, PlanCacheKeyHasher> _cache;
// Protects _cache.
- mutable stdx::mutex _cacheMutex;
+ mutable Mutex _cacheMutex = MONGO_MAKE_LATCH("PlanCache::_cacheMutex");
// Holds computed information about the collection's indexes. Used for generating plan
// cache keys.
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index 7a5fa236f75..794a959fa1f 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -557,7 +557,6 @@ TEST_F(QueryPlannerWildcardTest, OrEqualityWithTwoPredicatesUsesTwoPaths) {
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {'$_path': 1, b: 1},"
"bounds: {'$_path': [['b','b',true,true]], b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoIndexes) {
@@ -572,7 +571,6 @@ TEST_F(QueryPlannerWildcardTest, OrWithOneRegularAndOneWildcardIndexPathUsesTwoI
"bounds: {'$_path': [['a','a',true,true]], a: [[5,5,true,true]]}}}, "
"{ixscan: {filter: null, pattern: {b: 1},"
"bounds: {b: [[10,10,true,true]]}}}]}}}}");
- ;
}
TEST_F(QueryPlannerWildcardTest, BasicSkip) {
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index da477a862e1..5060d6d9ac8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -78,7 +78,7 @@ AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
const CanonicalQuery::QueryShapeString& key) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -90,7 +90,7 @@ boost::optional<AllowedIndicesFilter> QuerySettings::getAllowedIndicesFilter(
}
std::vector<AllowedIndexEntry> QuerySettings::getAllAllowedIndices() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
std::vector<AllowedIndexEntry> entries;
for (const auto& entryPair : _allowedIndexEntryMap) {
entries.push_back(entryPair.second);
@@ -109,7 +109,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
const BSONObj collation =
canonicalQuery.getCollator() ? canonicalQuery.getCollator()->getSpec().toBSON() : BSONObj();
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.erase(key);
_allowedIndexEntryMap.emplace(
std::piecewise_construct,
@@ -118,7 +118,7 @@ void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
}
void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString& key) {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
// Nothing to do if key does not exist in query settings.
@@ -130,7 +130,7 @@ void QuerySettings::removeAllowedIndices(const CanonicalQuery::QueryShapeString&
}
void QuerySettings::clearAllowedIndices() {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ stdx::lock_guard<Latch> cacheLock(_mutex);
_allowedIndexEntryMap.clear();
}
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index 4fac8e39161..f317a2780c2 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -37,7 +37,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h"
#include "mongo/db/query/plan_cache.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
/**
* Protects data in query settings.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("QuerySettings::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
index 8207e58fed4..5cd75844f95 100644
--- a/src/mongo/db/read_concern_mongod.cpp
+++ b/src/mongo/db/read_concern_mongod.cpp
@@ -73,7 +73,7 @@ public:
*/
std::tuple<bool, std::shared_ptr<Notification<Status>>> getOrCreateWriteRequest(
LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto lastEl = _writeRequests.rbegin();
if (lastEl != _writeRequests.rend() && lastEl->first >= clusterTime.asTimestamp()) {
return std::make_tuple(false, lastEl->second);
@@ -88,7 +88,7 @@ public:
* Erases writeRequest that happened at clusterTime
*/
void deleteWriteRequest(LogicalTime clusterTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto el = _writeRequests.find(clusterTime.asTimestamp());
invariant(el != _writeRequests.end());
invariant(el->second);
@@ -97,7 +97,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WriteRequestSynchronizer::_mutex");
std::map<Timestamp, std::shared_ptr<Notification<Status>>> _writeRequests;
};
diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp
index 1b99507fc5c..77b086af97e 100644
--- a/src/mongo/db/repl/abstract_async_component.cpp
+++ b/src/mongo/db/repl/abstract_async_component.cpp
@@ -52,7 +52,7 @@ std::string AbstractAsyncComponent::_getComponentName() const {
}
bool AbstractAsyncComponent::isActive() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isActive_inlock();
}
@@ -61,7 +61,7 @@ bool AbstractAsyncComponent::_isActive_inlock() noexcept {
}
bool AbstractAsyncComponent::_isShuttingDown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _isShuttingDown_inlock();
}
@@ -70,7 +70,7 @@ bool AbstractAsyncComponent::_isShuttingDown_inlock() noexcept {
}
Status AbstractAsyncComponent::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -97,7 +97,7 @@ Status AbstractAsyncComponent::startup() noexcept {
}
void AbstractAsyncComponent::shutdown() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -116,17 +116,17 @@ void AbstractAsyncComponent::shutdown() noexcept {
}
void AbstractAsyncComponent::join() noexcept {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
AbstractAsyncComponent::State AbstractAsyncComponent::getState_forTest() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _state;
}
void AbstractAsyncComponent::_transitionToComplete() noexcept {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_transitionToComplete_inlock();
}
@@ -138,13 +138,13 @@ void AbstractAsyncComponent::_transitionToComplete_inlock() noexcept {
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(
const executor::TaskExecutor::CallbackArgs& callbackArgs, const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(callbackArgs, message);
}
Status AbstractAsyncComponent::_checkForShutdownAndConvertStatus(const Status& status,
const std::string& message) {
- stdx::unique_lock<stdx::mutex> lk(*_getMutex());
+ stdx::unique_lock<Latch> lk(*_getMutex());
return _checkForShutdownAndConvertStatus_inlock(status, message);
}
diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h
index 64d88ad41e8..c5ce2da5afa 100644
--- a/src/mongo/db/repl/abstract_async_component.h
+++ b/src/mongo/db/repl/abstract_async_component.h
@@ -37,8 +37,8 @@
#include "mongo/base/static_assert.h"
#include "mongo/base/status.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -207,7 +207,7 @@ private:
/**
* Returns mutex to guard this component's state variable.
*/
- virtual stdx::mutex* _getMutex() noexcept = 0;
+ virtual Mutex* _getMutex() noexcept = 0;
private:
// All member variables are labeled with one of the following codes indicating the
@@ -259,7 +259,7 @@ Status AbstractAsyncComponent::_startupComponent_inlock(std::unique_ptr<T>& comp
template <typename T>
Status AbstractAsyncComponent::_startupComponent(std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
return _startupComponent_inlock(component);
}
@@ -275,7 +275,7 @@ void AbstractAsyncComponent::_shutdownComponent_inlock(const std::unique_ptr<T>&
template <typename T>
void AbstractAsyncComponent::_shutdownComponent(const std::unique_ptr<T>& component) {
- stdx::lock_guard<stdx::mutex> lock(*_getMutex());
+ stdx::lock_guard<Latch> lock(*_getMutex());
_shutdownComponent_inlock(component);
}
diff --git a/src/mongo/db/repl/abstract_async_component_test.cpp b/src/mongo/db/repl/abstract_async_component_test.cpp
index c6f3703b960..cdf892e4885 100644
--- a/src/mongo/db/repl/abstract_async_component_test.cpp
+++ b/src/mongo/db/repl/abstract_async_component_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/task_executor_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
@@ -95,10 +95,10 @@ public:
private:
Status _doStartup_inlock() noexcept override;
void _doShutdown_inlock() noexcept override;
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
// Used by AbstractAsyncComponent to guard start changes.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockAsyncComponent::_mutex");
public:
// Returned by _doStartup_inlock(). Override for testing.
@@ -125,7 +125,7 @@ Status MockAsyncComponent::scheduleWorkAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAndSaveHandle_inlock(std::move(work), handle, name);
}
@@ -134,12 +134,12 @@ Status MockAsyncComponent::scheduleWorkAtAndSaveHandle_forTest(
executor::TaskExecutor::CallbackFn work,
executor::TaskExecutor::CallbackHandle* handle,
const std::string& name) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _scheduleWorkAtAndSaveHandle_inlock(when, std::move(work), handle, name);
}
void MockAsyncComponent::cancelHandle_forTest(executor::TaskExecutor::CallbackHandle handle) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelHandle_inlock(handle);
}
@@ -160,7 +160,7 @@ Status MockAsyncComponent::_doStartup_inlock() noexcept {
void MockAsyncComponent::_doShutdown_inlock() noexcept {}
-stdx::mutex* MockAsyncComponent::_getMutex() noexcept {
+Mutex* MockAsyncComponent::_getMutex() noexcept {
return &_mutex;
}
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.cpp b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
index 4bb2762219a..0cdf534cfe7 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.cpp
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/repl_server_parameters_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -93,7 +93,7 @@ Milliseconds AbstractOplogFetcher::_getGetMoreMaxTime() const {
}
std::string AbstractOplogFetcher::toString() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
str::stream msg;
msg << _getComponentName() << " -"
<< " last optime fetched: " << _lastFetched.toString();
@@ -118,7 +118,7 @@ void AbstractOplogFetcher::_makeAndScheduleFetcherCallback(
Status scheduleStatus = Status::OK();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_fetcher = _makeFetcher(findCommandObj, metadataObj, _getInitialFindMaxTime());
scheduleStatus = _scheduleFetcher_inlock();
}
@@ -144,7 +144,7 @@ void AbstractOplogFetcher::_doShutdown_inlock() noexcept {
}
}
-stdx::mutex* AbstractOplogFetcher::_getMutex() noexcept {
+Mutex* AbstractOplogFetcher::_getMutex() noexcept {
return &_mutex;
}
@@ -158,12 +158,12 @@ OpTime AbstractOplogFetcher::getLastOpTimeFetched_forTest() const {
}
OpTime AbstractOplogFetcher::_getLastOpTimeFetched() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _lastFetched;
}
BSONObj AbstractOplogFetcher::getCommandObject_forTest() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _fetcher->getCommandObject();
}
@@ -198,7 +198,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
_makeFindCommandObject(_nss, _getLastOpTimeFetched(), _getRetriedFindMaxTime());
BSONObj metadataObj = _makeMetadataObject();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_fetcherRestarts == _maxFetcherRestarts) {
log() << "Error returned from oplog query (no more query restarts left): "
<< redact(responseStatus);
@@ -230,7 +230,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
// Reset fetcher restart counter on successful response.
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_isActive_inlock());
_fetcherRestarts = 0;
}
@@ -275,7 +275,7 @@ void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
LOG(3) << _getComponentName()
<< " setting last fetched optime ahead after batch: " << lastDoc;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastFetched = lastDoc;
}
@@ -296,7 +296,7 @@ void AbstractOplogFetcher::_finishCallback(Status status) {
_onShutdownCallbackFn(status);
decltype(_onShutdownCallbackFn) onShutdownCallbackFn;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_transitionToComplete_inlock();
// Release any resources that might be held by the '_onShutdownCallbackFn' function object.
diff --git a/src/mongo/db/repl/abstract_oplog_fetcher.h b/src/mongo/db/repl/abstract_oplog_fetcher.h
index 81497bf1258..19f9873a6e3 100644
--- a/src/mongo/db/repl/abstract_oplog_fetcher.h
+++ b/src/mongo/db/repl/abstract_oplog_fetcher.h
@@ -36,7 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/abstract_async_component.h"
#include "mongo/db/repl/optime_with.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -148,7 +148,7 @@ protected:
virtual void _doShutdown_inlock() noexcept override;
private:
- stdx::mutex* _getMutex() noexcept override;
+ Mutex* _getMutex() noexcept override;
/**
* This function must be overriden by subclass oplog fetchers to specify what `find` command
@@ -214,7 +214,7 @@ private:
const std::size_t _maxFetcherRestarts;
// Protects member data of this AbstractOplogFetcher.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AbstractOplogFetcher::_mutex");
// Function to call when the oplog fetcher shuts down.
OnShutdownCallbackFn _onShutdownCallbackFn;
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index 24841605f83..b3e5870aa2c 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -148,13 +148,13 @@ void BaseClonerTest::clear() {
}
void BaseClonerTest::setStatus(const Status& status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_status = status;
_setStatusCondition.notify_all();
}
const Status& BaseClonerTest::getStatus() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _status;
}
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index c4d56c00397..d0e5a9b9289 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -41,8 +41,8 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -135,7 +135,7 @@ protected:
private:
// Protects member data of this base cloner fixture.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BaseCloner::_mutex");
stdx::condition_variable _setStatusCondition;
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 870f9d54e45..3a0cb62de62 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -134,7 +134,7 @@ void BackgroundSync::startup(OperationContext* opCtx) {
}
void BackgroundSync::shutdown(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
@@ -158,7 +158,7 @@ void BackgroundSync::join(OperationContext* opCtx) {
}
bool BackgroundSync::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown_inlock();
}
@@ -241,7 +241,7 @@ void BackgroundSync::_produce() {
HostAndPort source;
SyncSourceResolverResponse syncSourceResp;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_lastOpTimeFetched.isNull()) {
// then we're initial syncing and we're still waiting for this to be set
lock.unlock();
@@ -264,7 +264,7 @@ void BackgroundSync::_produce() {
auto opCtx = cc().makeOperationContext();
minValidSaved = _replicationProcess->getConsistencyMarkers()->getMinValid(opCtx.get());
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -294,7 +294,7 @@ void BackgroundSync::_produce() {
fassert(40349, status);
_syncSourceResolver->join();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceResolver.reset();
}
@@ -340,7 +340,7 @@ void BackgroundSync::_produce() {
return;
} else if (syncSourceResp.isOK() && !syncSourceResp.getSyncSource().empty()) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_syncSourceHost = syncSourceResp.getSyncSource();
source = _syncSourceHost;
}
@@ -378,7 +378,7 @@ void BackgroundSync::_produce() {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -426,7 +426,7 @@ void BackgroundSync::_produce() {
},
onOplogFetcherShutdownCallbackFn,
bgSyncOplogFetcherBatchSize);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -502,7 +502,7 @@ Status BackgroundSync::_enqueueDocuments(Fetcher::Documents::const_iterator begi
// are done to prevent going into shutdown. This avoids a race where shutdown() clears the
// buffer between the time we check _inShutdown and the point where we finish writing to the
// buffer.
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return Status::OK();
}
@@ -554,7 +554,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
OpTime lastOpTimeFetched;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
lastOpTimeFetched = _lastOpTimeFetched;
}
@@ -631,7 +631,7 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint(
rollbackRemoteOplogQueryBatchSize.load());
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != ProducerState::Running) {
return;
}
@@ -668,18 +668,18 @@ void BackgroundSync::_fallBackOnRollbackViaRefetch(
}
HostAndPort BackgroundSync::getSyncTarget() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _syncSourceHost;
}
void BackgroundSync::clearSyncTarget() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
log() << "Resetting sync source to empty, which was " << _syncSourceHost;
_syncSourceHost = HostAndPort();
}
void BackgroundSync::stop(bool resetLastFetchedOptime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_state = ProducerState::Stopped;
log() << "Stopping replication producer";
@@ -709,7 +709,7 @@ void BackgroundSync::start(OperationContext* opCtx) {
do {
lastAppliedOpTime = _readLastAppliedOpTime(opCtx);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Double check the state after acquiring the mutex.
if (_state != ProducerState::Starting) {
return;
@@ -779,12 +779,12 @@ bool BackgroundSync::shouldStopFetching() const {
}
BackgroundSync::ProducerState BackgroundSync::getState() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _state;
}
void BackgroundSync::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Let producer run if it's already running.
if (_state == ProducerState::Stopped) {
_state = ProducerState::Starting;
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index e44427a656d..0bacdc71d29 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -43,8 +43,8 @@
#include "mongo/db/repl/rollback_impl.h"
#include "mongo/db/repl/sync_source_resolver.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -236,7 +236,7 @@ private:
// Protects member data of BackgroundSync.
// Never hold the BackgroundSync mutex when trying to acquire the ReplicationCoordinator mutex.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BackgroundSync::_mutex"); // (S)
OpTime _lastOpTimeFetched; // (M)
diff --git a/src/mongo/db/repl/callback_completion_guard.h b/src/mongo/db/repl/callback_completion_guard.h
index 4effb49c1f2..4ed13f55dff 100644
--- a/src/mongo/db/repl/callback_completion_guard.h
+++ b/src/mongo/db/repl/callback_completion_guard.h
@@ -33,7 +33,7 @@
#include <boost/optional.hpp>
#include <functional>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -83,9 +83,9 @@ public:
* Requires either a unique_lock or lock_guard to be passed in to ensure that we call
* _cancelRemainingWork_inlock()) while we have a lock on the callers's mutex.
*/
- void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::lock_guard<Latch>& lock,
const Result& result);
- void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<stdx::mutex>& lock,
+ void setResultAndCancelRemainingWork_inlock(const stdx::unique_lock<Latch>& lock,
const Result& result);
private:
@@ -124,13 +124,13 @@ CallbackCompletionGuard<Result>::~CallbackCompletionGuard() {
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::lock_guard<stdx::mutex>& lock, const Result& result) {
+ const stdx::lock_guard<Latch>& lock, const Result& result) {
_setResultAndCancelRemainingWork_inlock(result);
}
template <typename Result>
void CallbackCompletionGuard<Result>::setResultAndCancelRemainingWork_inlock(
- const stdx::unique_lock<stdx::mutex>& lock, const Result& result) {
+ const stdx::unique_lock<Latch>& lock, const Result& result) {
invariant(lock.owns_lock());
_setResultAndCancelRemainingWork_inlock(result);
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 5bb4fefbc08..d592d3fbfba 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -88,7 +88,7 @@ private:
std::unique_ptr<stdx::thread> _quorumCheckThread;
Status _quorumCheckStatus;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CheckQuorumTest::_mutex");
bool _isQuorumCheckDone;
};
@@ -109,13 +109,13 @@ Status CheckQuorumTest::waitForQuorumCheck() {
}
bool CheckQuorumTest::isQuorumCheckDone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isQuorumCheckDone;
}
void CheckQuorumTest::_runQuorumCheck(const ReplSetConfig& config, int myIndex) {
_quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isQuorumCheckDone = true;
}
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index c5270c5370d..62f601affd9 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -57,8 +57,8 @@ namespace mongo {
namespace repl {
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
constexpr auto kCountResponseDocumentCountFieldName = "n"_sd;
@@ -199,7 +199,7 @@ bool CollectionCloner::_isActive_inlock() const {
}
bool CollectionCloner::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -230,7 +230,7 @@ Status CollectionCloner::startup() noexcept {
}
void CollectionCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -263,12 +263,12 @@ void CollectionCloner::_cancelRemainingWork_inlock() {
}
CollectionCloner::Stats CollectionCloner::getStats() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stats;
}
void CollectionCloner::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() {
return (_queryState == QueryState::kNotStarted || _queryState == QueryState::kFinished) &&
!_isActive_inlock();
@@ -288,7 +288,7 @@ void CollectionCloner::setScheduleDbWorkFn_forTest(ScheduleDbWorkFn scheduleDbWo
}
void CollectionCloner::setCreateClientFn_forTest(const CreateClientFn& createClientFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_createClientFn = createClientFn;
}
@@ -474,7 +474,7 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
auto cancelRemainingWorkInLock = [this]() { _cancelRemainingWork_inlock(); };
auto finishCallbackFn = [this](const Status& status) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_queryState = QueryState::kFinished;
_clientConnection.reset();
}
@@ -494,13 +494,13 @@ void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::Ca
void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& callbackData,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!callbackData.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, callbackData.status);
return;
}
bool queryStateOK = false;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
queryStateOK = _queryState == QueryState::kNotStarted;
if (queryStateOK) {
_queryState = QueryState::kRunning;
@@ -525,12 +525,12 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
Status clientConnectionStatus = _clientConnection->connect(_source, StringData());
if (!clientConnectionStatus.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, clientConnectionStatus);
return;
}
if (!replAuthenticate(_clientConnection.get())) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(
lock,
{ErrorCodes::AuthenticationFailed,
@@ -552,7 +552,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
} catch (const DBException& e) {
auto queryStatus = e.toStatus().withContext(str::stream() << "Error querying collection '"
<< _sourceNss.ns());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (queryStatus.code() == ErrorCodes::OperationFailed ||
queryStatus.code() == ErrorCodes::CursorNotFound ||
queryStatus.code() == ErrorCodes::QueryPlanKilled) {
@@ -572,7 +572,7 @@ void CollectionCloner::_runQuery(const executor::TaskExecutor::CallbackArgs& cal
}
}
waitForDbWorker();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, Status::OK());
}
@@ -580,7 +580,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
DBClientCursorBatchIterator& iter) {
_stats.receivedBatches++;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::CallbackCanceled,
"Collection cloning cancelled.",
_queryState != QueryState::kCanceling);
@@ -621,7 +621,7 @@ void CollectionCloner::_handleNextBatch(std::shared_ptr<OnCompletionGuard> onCom
}
void CollectionCloner::_verifyCollectionWasDropped(
- const stdx::unique_lock<stdx::mutex>& lk,
+ const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// If we already have a _verifyCollectionDroppedScheduler, just return; the existing
@@ -684,7 +684,7 @@ void CollectionCloner::_insertDocumentsCallback(
const executor::TaskExecutor::CallbackArgs& cbd,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
if (!cbd.status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, cbd.status);
return;
}
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index 817925ed765..ba8139dd98f 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -48,8 +48,8 @@
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/progress_meter.h"
@@ -239,7 +239,7 @@ private:
* Verifies that an error from the query was the result of a collection drop. If
* so, cloning is stopped with no error. Otherwise it is stopped with the given error.
*/
- void _verifyCollectionWasDropped(const stdx::unique_lock<stdx::mutex>& lk,
+ void _verifyCollectionWasDropped(const stdx::unique_lock<Latch>& lk,
Status batchStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard);
@@ -259,7 +259,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CollectionCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R) Not owned by us.
ThreadPool* _dbWorkThreadPool; // (R) Not owned by us.
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index aabb5619894..949d419c746 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -65,7 +65,7 @@ public:
: MockDBClientConnection(remote), _net(net) {}
virtual ~FailableMockDBClientConnection() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_paused = false;
_cond.notify_all();
_cond.wait(lk, [this] { return !_resuming; });
@@ -86,13 +86,13 @@ public:
int batchSize) override {
ON_BLOCK_EXIT([this]() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_queryCount++;
}
_cond.notify_all();
});
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waiting = _paused;
_cond.notify_all();
while (_paused) {
@@ -118,14 +118,14 @@ public:
void pause() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_paused = true;
}
_cond.notify_all();
}
void resume() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_resuming = false;
@@ -135,13 +135,13 @@ public:
// Waits for the next query after pause() is called to start.
void waitForPausedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cond.wait(lk, [this] { return _waiting; });
}
// Resumes, then waits for the next query to run after resume() is called to complete.
void resumeAndWaitForResumedQuery() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_resuming = true;
_resume(&lk);
_cond.notify_all(); // This is to wake up the paused thread.
@@ -152,7 +152,7 @@ public:
private:
executor::NetworkInterfaceMock* _net;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("FailableMockDBClientConnection::_mutex");
stdx::condition_variable _cond;
bool _paused = false;
bool _waiting = false;
@@ -162,7 +162,7 @@ private:
Status _failureForConnect = Status::OK();
Status _failureForQuery = Status::OK();
- void _resume(stdx::unique_lock<stdx::mutex>* lk) {
+ void _resume(stdx::unique_lock<Latch>* lk) {
invariant(lk->owns_lock());
_paused = false;
_resumedQueryCount = _queryCount;
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 63a00583854..55eab0a0aa4 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -57,8 +57,8 @@ MONGO_FAIL_POINT_DEFINE(initialSyncHangBeforeListCollections);
namespace {
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
using executor::RemoteCommandRequest;
const char* kNameFieldName = "name";
@@ -208,7 +208,7 @@ Status DatabaseCloner::startup() noexcept {
}
void DatabaseCloner::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -256,7 +256,7 @@ void DatabaseCloner::setStartCollectionClonerFn(
}
DatabaseCloner::State DatabaseCloner::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index e2790956089..666f23610fb 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -41,8 +41,8 @@
#include "mongo/db/repl/base_cloner.h"
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -201,7 +201,7 @@ private:
/**
* Calls the above method after unlocking.
*/
- void _finishCallback_inlock(stdx::unique_lock<stdx::mutex>& lk, const Status& status);
+ void _finishCallback_inlock(stdx::unique_lock<Latch>& lk, const Status& status);
//
// All member variables are labeled with one of the following codes indicating the
@@ -212,7 +212,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
// (RT) Read-only in concurrent operation; synchronized externally by tests
//
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabaseCloner::_mutex");
mutable stdx::condition_variable _condition; // (M)
executor::TaskExecutor* _executor; // (R)
ThreadPool* _dbWorkThreadPool; // (R)
diff --git a/src/mongo/db/repl/databases_cloner.cpp b/src/mongo/db/repl/databases_cloner.cpp
index 554acab9651..f0d746cec52 100644
--- a/src/mongo/db/repl/databases_cloner.cpp
+++ b/src/mongo/db/repl/databases_cloner.cpp
@@ -56,8 +56,8 @@ namespace {
using Request = executor::RemoteCommandRequest;
using Response = executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
} // namespace
diff --git a/src/mongo/db/repl/databases_cloner.h b/src/mongo/db/repl/databases_cloner.h
index 890c6c2a7e1..e5ea692d8bc 100644
--- a/src/mongo/db/repl/databases_cloner.h
+++ b/src/mongo/db/repl/databases_cloner.h
@@ -42,8 +42,8 @@
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/database_cloner.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/net/hostandport.h"
@@ -138,10 +138,10 @@ private:
void _setStatus_inlock(Status s);
/** Will fail the cloner, call the completion function, and become inactive. */
- void _fail_inlock(stdx::unique_lock<stdx::mutex>* lk, Status s);
+ void _fail_inlock(stdx::unique_lock<Latch>* lk, Status s);
/** Will call the completion function, and become inactive. */
- void _succeed_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _succeed_inlock(stdx::unique_lock<Latch>* lk);
/** Called each time a database clone is finished */
void _onEachDBCloneFinish(const Status& status, const std::string& name);
@@ -175,7 +175,7 @@ private:
// (M) Reads and writes guarded by _mutex
// (S) Self-synchronizing; access in any way from any context.
//
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("DatabasesCloner::_mutex"); // (S)
Status _status{ErrorCodes::NotYetInitialized, ""}; // (M) If it is not OK, we stop everything.
executor::TaskExecutor* _exec; // (R) executor to schedule things with
ThreadPool* _dbWorkThreadPool; // (R) db worker thread pool for collection cloning.
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index ba75f96c6e4..fa386d4b2c7 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -43,7 +43,7 @@
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/task_executor_proxy.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/thread_name.h"
@@ -57,9 +57,9 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using mutex = stdx::mutex;
+using LockGuard = stdx::lock_guard<Latch>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using mutex = Mutex;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
using namespace unittest;
using Responses = std::vector<std::pair<std::string, BSONObj>>;
@@ -288,7 +288,7 @@ protected:
void runCompleteClone(Responses responses) {
Status result{Status::OK()};
bool done = false;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cvDone;
DatabasesCloner cloner{&getStorage(),
&getExecutor(),
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index cb4c85c5cd4..4c380e4c8c5 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -80,7 +80,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
const OpTime& dropOpTime,
const NamespaceString& dropPendingNamespace) {
invariant(dropPendingNamespace.isDropPendingNamespace());
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(dropOpTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -97,7 +97,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
_dropPendingNamespaces.insert(std::make_pair(dropOpTime, dropPendingNamespace));
if (opCtx->lockState()->inAWriteUnitOfWork()) {
opCtx->recoveryUnit()->onRollback([this, dropPendingNamespace, dropOpTime]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(dropOpTime);
const auto& lowerBound = equalRange.first;
@@ -114,7 +114,7 @@ void DropPendingCollectionReaper::addDropPendingNamespace(
}
boost::optional<OpTime> DropPendingCollectionReaper::getEarliestDropOpTime() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
if (it == _dropPendingNamespaces.cend()) {
return boost::none;
@@ -129,7 +129,7 @@ bool DropPendingCollectionReaper::rollBackDropPendingCollection(
const auto pendingNss = collectionNamespace.makeDropPendingNamespace(opTime);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingNamespaces.equal_range(opTime);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -154,7 +154,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
const OpTime& opTime) {
DropPendingNamespaces toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingNamespaces.cbegin();
it != _dropPendingNamespaces.cend() && it->first <= opTime;
++it) {
@@ -194,7 +194,7 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropOpTime()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingNamespaces.cbegin();
while (it != _dropPendingNamespaces.cend() && it->first <= opTime) {
if (toDrop.find(it->first) != toDrop.cend()) {
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.h b/src/mongo/db/repl/drop_pending_collection_reaper.h
index 48795159066..18c359ada2a 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.h
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.h
@@ -36,7 +36,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -101,7 +101,7 @@ public:
void dropCollectionsOlderThan(OperationContext* opCtx, const OpTime& opTime);
void clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingNamespaces.clear();
}
@@ -127,7 +127,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DropPendingCollectionReaper::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index d3f03d1276c..8f19951e265 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -116,8 +116,8 @@ using Event = executor::TaskExecutor::EventHandle;
using Handle = executor::TaskExecutor::CallbackHandle;
using Operations = MultiApplier::Operations;
using QueryResponseStatus = StatusWith<Fetcher::QueryResponse>;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
// Used to reset the oldest timestamp during initial sync to a non-null timestamp.
const Timestamp kTimestampOne(0, 1);
@@ -197,7 +197,7 @@ InitialSyncer::~InitialSyncer() {
}
bool InitialSyncer::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -210,7 +210,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
invariant(opCtx);
invariant(initialSyncMaxAttempts >= 1U);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -243,7 +243,7 @@ Status InitialSyncer::startup(OperationContext* opCtx,
}
Status InitialSyncer::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -281,22 +281,22 @@ void InitialSyncer::_cancelRemainingWork_inlock() {
}
void InitialSyncer::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stateCondition.wait(lk, [this]() { return !_isActive_inlock(); });
}
InitialSyncer::State InitialSyncer::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
Date_t InitialSyncer::getWallClockTime_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastApplied.wallTime;
}
bool InitialSyncer::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isShuttingDown_inlock();
}
@@ -468,7 +468,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback(
// Lock guard must be declared after completion guard because completion guard destructor
// has to run outside lock.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_oplogApplier = {};
@@ -522,7 +522,7 @@ void InitialSyncer::_chooseSyncSourceCallback(
std::uint32_t chooseSyncSourceAttempt,
std::uint32_t chooseSyncSourceMaxAttempts,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Cancellation should be treated the same as other errors. In this case, the most likely cause
// of a failed _chooseSyncSourceCallback() task is a cancellation triggered by
// InitialSyncer::shutdown() or the task executor shutting down.
@@ -678,7 +678,7 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock(
void InitialSyncer::_rollbackCheckerResetCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting base rollback ID");
if (!status.isOK()) {
@@ -696,7 +696,7 @@ void InitialSyncer::_rollbackCheckerResetCallback(
void InitialSyncer::_getBeginFetchingOpTimeCallback(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(),
"error while getting oldest active transaction timestamp for begin fetching timestamp");
@@ -746,7 +746,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp(
const StatusWith<Fetcher::QueryResponse>& result,
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting last oplog entry for begin timestamp");
if (!status.isOK()) {
@@ -803,7 +803,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
std::shared_ptr<OnCompletionGuard> onCompletionGuard,
const OpTime& lastOpTime,
OpTime& beginFetchingOpTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error while getting the remote feature compatibility version");
if (!status.isOK()) {
@@ -983,7 +983,7 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
void InitialSyncer::_oplogFetcherCallback(const Status& oplogFetcherFinishStatus,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Finished fetching oplog during initial sync: " << redact(oplogFetcherFinishStatus)
<< ". Last fetched optime: " << _lastFetched.toString();
@@ -1030,7 +1030,7 @@ void InitialSyncer::_databasesClonerCallback(const Status& databaseClonerFinishS
}
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(databaseClonerFinishStatus,
"error cloning databases");
if (!status.isOK()) {
@@ -1055,7 +1055,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
OpTimeAndWallTime resultOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(
result.getStatus(), "error fetching last oplog entry for stop timestamp");
if (!status.isOK()) {
@@ -1102,7 +1102,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
TimestampedBSONObj{oplogSeedDoc, resultOpTimeAndWallTime.opTime.getTimestamp()},
resultOpTimeAndWallTime.opTime.getTerm());
if (!status.isOK()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);
return;
}
@@ -1111,7 +1111,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
opCtx.get(), resultOpTimeAndWallTime.opTime.getTimestamp(), orderedCommit);
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_lastApplied = resultOpTimeAndWallTime;
log() << "No need to apply operations. (currently at "
<< _initialSyncState->stopTimestamp.toBSON() << ")";
@@ -1123,7 +1123,7 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
void InitialSyncer::_getNextApplierBatchCallback(
const executor::TaskExecutor::CallbackArgs& callbackArgs,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(callbackArgs, "error getting next applier batch");
if (!status.isOK()) {
@@ -1223,7 +1223,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
OpTimeAndWallTime lastApplied,
std::uint32_t numApplied,
std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status =
_checkForShutdownAndConvertStatus_inlock(multiApplierStatus, "error applying batch");
@@ -1260,7 +1260,7 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus,
void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
const RollbackChecker::Result& result, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _checkForShutdownAndConvertStatus_inlock(result.getStatus(),
"error while getting last rollback ID");
if (!status.isOK()) {
@@ -1311,7 +1311,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
log() << "Initial sync attempt finishing up.";
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
log() << "Initial Sync Attempt Statistics: " << redact(_getInitialSyncProgress_inlock());
auto runTime = _initialSyncState ? _initialSyncState->timer.millis() : 0;
@@ -1384,7 +1384,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before we transition the state to Complete.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto opCtx = makeOpCtx();
_tearDown_inlock(opCtx.get(), lastApplied);
@@ -1414,7 +1414,7 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
// before InitialSyncer::join() returns.
onCompletion = {};
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_stateCondition.notify_all();
@@ -1450,8 +1450,7 @@ Status InitialSyncer::_scheduleLastOplogEntryFetcher_inlock(Fetcher::CallbackFn
}
void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
@@ -1506,8 +1505,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
}
void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard) {
// We should check our current state because shutdown() could have been called before
// we re-acquired the lock.
if (_isShuttingDown_inlock()) {
diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h
index 6ad23526d85..c83f4c134df 100644
--- a/src/mongo/db/repl/initial_syncer.h
+++ b/src/mongo/db/repl/initial_syncer.h
@@ -52,8 +52,8 @@
#include "mongo/db/repl/rollback_checker.h"
#include "mongo/db/repl/sync_source_selector.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/net/hostandport.h"
@@ -510,8 +510,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Schedules a rollback checker to get the rollback ID after data cloning or applying. This
@@ -521,8 +520,7 @@ private:
* Passes 'lock' through to completion guard.
*/
void _scheduleRollbackCheckerCheckForRollback_inlock(
- const stdx::lock_guard<stdx::mutex>& lock,
- std::shared_ptr<OnCompletionGuard> onCompletionGuard);
+ const stdx::lock_guard<Latch>& lock, std::shared_ptr<OnCompletionGuard> onCompletionGuard);
/**
* Checks the given status (or embedded status inside the callback args) and current data
@@ -582,7 +580,7 @@ private:
// (MX) Must hold _mutex and be in a callback in _exec to write; must either hold
// _mutex or be in a callback in _exec to read.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InitialSyncer::_mutex"); // (S)
const InitialSyncerOptions _opts; // (R)
std::unique_ptr<DataReplicatorExternalState> _dataReplicatorExternalState; // (R)
executor::TaskExecutor* _exec; // (R)
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 9abdc1f9b7d..4a6a9176ac7 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -60,7 +60,7 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/fail_point_service.h"
@@ -104,9 +104,9 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using unittest::log;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using NetworkGuard = executor::NetworkInterfaceMock::InNetworkGuard;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
struct CollectionCloneInfo {
std::shared_ptr<CollectionMockStats> stats = std::make_shared<CollectionMockStats>();
@@ -244,7 +244,9 @@ protected:
int documentsInsertedCount = 0;
};
- stdx::mutex _storageInterfaceWorkDoneMutex; // protects _storageInterfaceWorkDone.
+ // protects _storageInterfaceWorkDone.
+ Mutex _storageInterfaceWorkDoneMutex =
+ MONGO_MAKE_LATCH("InitialSyncerTest::_storageInterfaceWorkDoneMutex");
StorageInterfaceResults _storageInterfaceWorkDone;
void setUp() override {
diff --git a/src/mongo/db/repl/local_oplog_info.cpp b/src/mongo/db/repl/local_oplog_info.cpp
index 069c199def1..b17da6d88c5 100644
--- a/src/mongo/db/repl/local_oplog_info.cpp
+++ b/src/mongo/db/repl/local_oplog_info.cpp
@@ -95,7 +95,7 @@ void LocalOplogInfo::resetCollection() {
}
void LocalOplogInfo::setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
LogicalClock::get(service)->setClusterTimeFromTrustedSource(LogicalTime(newTime));
}
@@ -120,7 +120,7 @@ std::vector<OplogSlot> LocalOplogInfo::getNextOpTimes(OperationContext* opCtx, s
// Allow the storage engine to start the transaction outside the critical section.
opCtx->recoveryUnit()->preallocateSnapshot();
- stdx::lock_guard<stdx::mutex> lk(_newOpMutex);
+ stdx::lock_guard<Latch> lk(_newOpMutex);
ts = LogicalClock::get(opCtx)->reserveTicks(count).asTimestamp();
const bool orderedCommit = false;
diff --git a/src/mongo/db/repl/local_oplog_info.h b/src/mongo/db/repl/local_oplog_info.h
index 67ab7e0560d..96cdb259f36 100644
--- a/src/mongo/db/repl/local_oplog_info.h
+++ b/src/mongo/db/repl/local_oplog_info.h
@@ -92,7 +92,7 @@ private:
// Synchronizes the section where a new Timestamp is generated and when it is registered in the
// storage engine.
- mutable stdx::mutex _newOpMutex;
+ mutable Mutex _newOpMutex = MONGO_MAKE_LATCH("LocaloplogInfo::_newOpMutex");
};
} // namespace repl
diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp
index 99f09fa2484..02c993a0e67 100644
--- a/src/mongo/db/repl/multiapplier.cpp
+++ b/src/mongo/db/repl/multiapplier.cpp
@@ -60,7 +60,7 @@ MultiApplier::~MultiApplier() {
}
bool MultiApplier::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -69,7 +69,7 @@ bool MultiApplier::_isActive_inlock() const {
}
Status MultiApplier::startup() noexcept {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
@@ -96,7 +96,7 @@ Status MultiApplier::startup() noexcept {
}
void MultiApplier::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (_state) {
case State::kPreStart:
// Transition directly from PreStart to Complete if not started yet.
@@ -117,12 +117,12 @@ void MultiApplier::shutdown() {
}
void MultiApplier::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
MultiApplier::State MultiApplier::getState_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
@@ -153,14 +153,14 @@ void MultiApplier::_finishCallback(const Status& result) {
// destroyed outside the lock.
decltype(_onCompletion) onCompletion;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_onCompletion);
std::swap(_onCompletion, onCompletion);
}
onCompletion(result);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(State::kComplete != _state);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/multiapplier.h b/src/mongo/db/repl/multiapplier.h
index 119cd58bc89..406888746b1 100644
--- a/src/mongo/db/repl/multiapplier.h
+++ b/src/mongo/db/repl/multiapplier.h
@@ -43,8 +43,8 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -149,7 +149,7 @@ private:
CallbackFn _onCompletion;
// Protects member data of this MultiApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MultiApplier::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp
index d1bc540ab2c..29ee8017bfb 100644
--- a/src/mongo/db/repl/noop_writer.cpp
+++ b/src/mongo/db/repl/noop_writer.cpp
@@ -71,7 +71,7 @@ public:
: _thread([this, noopWrite, waitTime] { run(waitTime, std::move(noopWrite)); }) {}
~PeriodicNoopRunner() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_inShutdown = true;
_cv.notify_all();
lk.unlock();
@@ -85,7 +85,7 @@ private:
const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
OperationContext& opCtx = *opCtxPtr;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_cv.wait_for(lk, waitTime.toSystemDuration(), [&] { return _inShutdown; });
@@ -104,7 +104,7 @@ private:
/**
* Mutex for the CV
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicNoopRunner::_mutex");
/**
* CV to wait for.
@@ -127,7 +127,7 @@ NoopWriter::~NoopWriter() {
}
Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lastKnownOpTime = lastKnownOpTime;
invariant(!_noopRunner);
@@ -140,7 +140,7 @@ Status NoopWriter::startWritingPeriodicNoops(OpTime lastKnownOpTime) {
}
void NoopWriter::stopWritingPeriodicNoops() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_noopRunner.reset();
}
diff --git a/src/mongo/db/repl/noop_writer.h b/src/mongo/db/repl/noop_writer.h
index c9cc5712386..999bc889a1d 100644
--- a/src/mongo/db/repl/noop_writer.h
+++ b/src/mongo/db/repl/noop_writer.h
@@ -32,7 +32,7 @@
#include <functional>
#include "mongo/db/repl/optime.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -75,7 +75,7 @@ private:
* Protects member data of this class during start and stop. There is no need to synchronize
* access once its running because its run by a one thread only.
*/
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("NoopWriter::_mutex");
std::unique_ptr<PeriodicNoopRunner> _noopRunner;
};
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index 1e1d020d6ad..5242a9c917b 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -75,12 +75,12 @@ Future<void> OplogApplier::startup() {
void OplogApplier::shutdown() {
_shutdown();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool OplogApplier::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/oplog_applier.h b/src/mongo/db/repl/oplog_applier.h
index a75f18f96f3..f6049c71943 100644
--- a/src/mongo/db/repl/oplog_applier.h
+++ b/src/mongo/db/repl/oplog_applier.h
@@ -40,7 +40,7 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -219,7 +219,7 @@ private:
Observer* const _observer;
// Protects member data of OplogApplier.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogApplier::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp
index 642a1db0078..69e25926631 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection.cpp
@@ -106,7 +106,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If we are starting from an existing collection, we must populate the in memory state of the
// buffer.
auto sizeResult = _storageInterface->getCollectionSize(opCtx, _nss);
@@ -148,7 +148,7 @@ void OplogBufferCollection::startup(OperationContext* opCtx) {
void OplogBufferCollection::shutdown(OperationContext* opCtx) {
if (_options.dropCollectionAtShutdown) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_size = 0;
_count = 0;
@@ -167,7 +167,7 @@ void OplogBufferCollection::push(OperationContext* opCtx,
}
size_t numDocs = std::distance(begin, end);
std::vector<InsertStatement> docsToInsert(numDocs);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto ts = _lastPushedTimestamp;
auto sentinelCount = _sentinelCount;
std::transform(begin, end, docsToInsert.begin(), [&sentinelCount, &ts](const Value& value) {
@@ -193,7 +193,7 @@ void OplogBufferCollection::push(OperationContext* opCtx,
void OplogBufferCollection::waitForSpace(OperationContext* opCtx, std::size_t size) {}
bool OplogBufferCollection::isEmpty() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count == 0;
}
@@ -202,17 +202,17 @@ std::size_t OplogBufferCollection::getMaxSize() const {
}
std::size_t OplogBufferCollection::getSize() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _size;
}
std::size_t OplogBufferCollection::getCount() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _count;
}
void OplogBufferCollection::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dropCollection(opCtx);
_createCollection(opCtx);
_size = 0;
@@ -224,7 +224,7 @@ void OplogBufferCollection::clear(OperationContext* opCtx) {
}
bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -232,7 +232,7 @@ bool OplogBufferCollection::tryPop(OperationContext* opCtx, Value* value) {
}
bool OplogBufferCollection::waitForData(Seconds waitDuration) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_cvNoLongerEmpty.wait_for(
lk, waitDuration.toSystemDuration(), [&]() { return _count != 0; })) {
return false;
@@ -241,7 +241,7 @@ bool OplogBufferCollection::waitForData(Seconds waitDuration) {
}
bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_count == 0) {
return false;
}
@@ -251,7 +251,7 @@ bool OplogBufferCollection::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferCollection::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto lastDocumentPushed = _lastDocumentPushed_inlock(opCtx);
if (lastDocumentPushed) {
BSONObj entryObj = extractEmbeddedOplogDocument(*lastDocumentPushed);
@@ -356,23 +356,23 @@ void OplogBufferCollection::_dropCollection(OperationContext* opCtx) {
}
std::size_t OplogBufferCollection::getSentinelCount_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _sentinelCount;
}
Timestamp OplogBufferCollection::getLastPushedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPushedTimestamp;
}
Timestamp OplogBufferCollection::getLastPoppedTimestamp_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastPoppedKey.isEmpty() ? Timestamp()
: _lastPoppedKey[""].Obj()[kTimestampFieldName].timestamp();
}
std::queue<BSONObj> OplogBufferCollection::getPeekCache_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _peekCache;
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h
index 112f7dd71a6..40356c834be 100644
--- a/src/mongo/db/repl/oplog_buffer_collection.h
+++ b/src/mongo/db/repl/oplog_buffer_collection.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/queue.h"
namespace mongo {
@@ -177,7 +177,7 @@ private:
stdx::condition_variable _cvNoLongerEmpty;
// Protects member data below and synchronizes it with the underlying collection.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogBufferCollection::_mutex");
// Number of documents in buffer.
std::size_t _count = 0;
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp
index 45b6803abcf..3e2705511bb 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.cpp
+++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp
@@ -51,8 +51,8 @@ void OplogBufferProxy::startup(OperationContext* opCtx) {
void OplogBufferProxy::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
}
@@ -65,7 +65,7 @@ void OplogBufferProxy::push(OperationContext* opCtx,
if (begin == end) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
_lastPushed = *(end - 1);
_target->push(opCtx, begin, end);
}
@@ -91,16 +91,16 @@ std::size_t OplogBufferProxy::getCount() const {
}
void OplogBufferProxy::clear(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
_lastPushed.reset();
_lastPeeked.reset();
_target->clear(opCtx);
}
bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> backLock(_lastPushedMutex);
- stdx::lock_guard<stdx::mutex> frontLock(_lastPeekedMutex);
+ stdx::lock_guard<Latch> backLock(_lastPushedMutex);
+ stdx::lock_guard<Latch> frontLock(_lastPeekedMutex);
if (!_target->tryPop(opCtx, value)) {
return false;
}
@@ -114,7 +114,7 @@ bool OplogBufferProxy::tryPop(OperationContext* opCtx, Value* value) {
bool OplogBufferProxy::waitForData(Seconds waitDuration) {
{
- stdx::unique_lock<stdx::mutex> lk(_lastPushedMutex);
+ stdx::unique_lock<Latch> lk(_lastPushedMutex);
if (_lastPushed) {
return true;
}
@@ -123,7 +123,7 @@ bool OplogBufferProxy::waitForData(Seconds waitDuration) {
}
bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
if (_lastPeeked) {
*value = *_lastPeeked;
return true;
@@ -137,7 +137,7 @@ bool OplogBufferProxy::peek(OperationContext* opCtx, Value* value) {
boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lk(_lastPushedMutex);
+ stdx::lock_guard<Latch> lk(_lastPushedMutex);
if (!_lastPushed) {
return boost::none;
}
@@ -145,7 +145,7 @@ boost::optional<OplogBuffer::Value> OplogBufferProxy::lastObjectPushed(
}
boost::optional<OplogBuffer::Value> OplogBufferProxy::getLastPeeked_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_lastPeekedMutex);
+ stdx::lock_guard<Latch> lk(_lastPeekedMutex);
return _lastPeeked;
}
diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h
index 3fdcec8a27b..5effffd815c 100644
--- a/src/mongo/db/repl/oplog_buffer_proxy.h
+++ b/src/mongo/db/repl/oplog_buffer_proxy.h
@@ -33,7 +33,7 @@
#include <memory>
#include "mongo/db/repl/oplog_buffer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -80,10 +80,10 @@ private:
std::unique_ptr<OplogBuffer> _target;
// If both mutexes have to be acquired, acquire _lastPushedMutex first.
- mutable stdx::mutex _lastPushedMutex;
+ mutable Mutex _lastPushedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPushedMutex");
boost::optional<Value> _lastPushed;
- mutable stdx::mutex _lastPeekedMutex;
+ mutable Mutex _lastPeekedMutex = MONGO_MAKE_LATCH("OplogBufferProxy::_lastPeekedMutex");
boost::optional<Value> _lastPeeked;
};
diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp
index a39208720ce..d6e23d6c3ac 100644
--- a/src/mongo/db/repl/oplog_test.cpp
+++ b/src/mongo/db/repl/oplog_test.cpp
@@ -45,7 +45,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -165,7 +165,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
// Run 2 concurrent logOp() requests using the thread pool.
// Use a barrier with a thread count of 3 to ensure both logOp() tasks are complete before this
// test thread can proceed with shutting the thread pool down.
- stdx::mutex mtx;
+ auto mtx = MONGO_MAKE_LATCH();
unittest::Barrier barrier(3U);
const NamespaceString nss1("test1.coll");
const NamespaceString nss2("test2.coll");
@@ -200,7 +200,7 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
std::reverse(oplogEntries->begin(), oplogEntries->end());
// Look up namespaces and their respective optimes (returned by logOp()) in the map.
- stdx::lock_guard<stdx::mutex> lock(mtx);
+ stdx::lock_guard<Latch> lock(mtx);
ASSERT_EQUALS(2U, opTimeNssMap->size());
}
@@ -210,10 +210,10 @@ void _testConcurrentLogOp(const F& makeTaskFunction,
* Returns optime of generated oplog entry.
*/
OpTime _logOpNoopWithMsg(OperationContext* opCtx,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
// logOp() must be called while holding lock because ephemeralForTest storage engine does not
// support concurrent updates to its internal state.
@@ -239,7 +239,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithoutDocLockingSupport) {
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -272,7 +272,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupport) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -304,7 +304,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -322,7 +322,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertFirstOplogEntry) {
// Revert the first logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the more recent optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto firstOpTimeAndNss = *(opTimeNssMap->cbegin());
if (opTime == firstOpTimeAndNss.first) {
ASSERT_EQUALS(nss, firstOpTimeAndNss.second)
@@ -351,7 +351,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
ForceSupportsDocLocking support(true);
_testConcurrentLogOp(
[](const NamespaceString& nss,
- stdx::mutex* mtx,
+ Mutex* mtx,
OpTimeNamespaceStringMap* opTimeNssMap,
unittest::Barrier* barrier) {
return [=] {
@@ -369,7 +369,7 @@ TEST_F(OplogTest, ConcurrentLogOpWithDocLockingSupportRevertLastOplogEntry) {
// Revert the last logOp() call and confirm that there are no holes in the
// oplog after committing the oplog entry with the earlier optime.
{
- stdx::lock_guard<stdx::mutex> lock(*mtx);
+ stdx::lock_guard<Latch> lock(*mtx);
auto lastOpTimeAndNss = *(opTimeNssMap->crbegin());
if (opTime == lastOpTimeAndNss.first) {
ASSERT_EQUALS(nss, lastOpTimeAndNss.second)
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.cpp b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
index 61f46bf0bef..5c698190445 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.cpp
@@ -36,12 +36,12 @@ namespace repl {
void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
{
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = {};
_oplogTruncateAfterPoint = {};
_appliedThrough = {};
@@ -49,64 +49,64 @@ void ReplicationConsistencyMarkersMock::initializeMinValidDocument(OperationCont
}
bool ReplicationConsistencyMarkersMock::getInitialSyncFlag(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
return _initialSyncFlag;
}
void ReplicationConsistencyMarkersMock::setInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = true;
}
void ReplicationConsistencyMarkersMock::clearInitialSyncFlag(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncFlagMutex);
+ stdx::lock_guard<Latch> lock(_initialSyncFlagMutex);
_initialSyncFlag = false;
}
OpTime ReplicationConsistencyMarkersMock::getMinValid(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _minValid;
}
void ReplicationConsistencyMarkersMock::setMinValid(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = minValid;
}
void ReplicationConsistencyMarkersMock::setMinValidToAtLeast(OperationContext* opCtx,
const OpTime& minValid) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_minValid = std::max(_minValid, minValid);
}
void ReplicationConsistencyMarkersMock::setOplogTruncateAfterPoint(OperationContext* opCtx,
const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_oplogTruncateAfterPoint = timestamp;
}
Timestamp ReplicationConsistencyMarkersMock::getOplogTruncateAfterPoint(
OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _oplogTruncateAfterPoint;
}
void ReplicationConsistencyMarkersMock::setAppliedThrough(OperationContext* opCtx,
const OpTime& optime,
bool setTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = optime;
}
void ReplicationConsistencyMarkersMock::clearAppliedThrough(OperationContext* opCtx,
const Timestamp& writeTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
_appliedThrough = {};
}
OpTime ReplicationConsistencyMarkersMock::getAppliedThrough(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_minValidBoundariesMutex);
+ stdx::lock_guard<Latch> lock(_minValidBoundariesMutex);
return _appliedThrough;
}
diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.h b/src/mongo/db/repl/replication_consistency_markers_mock.h
index 3215264110f..3fe3c2670f5 100644
--- a/src/mongo/db/repl/replication_consistency_markers_mock.h
+++ b/src/mongo/db/repl/replication_consistency_markers_mock.h
@@ -31,7 +31,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -73,10 +73,12 @@ public:
Status createInternalCollections(OperationContext* opCtx) override;
private:
- mutable stdx::mutex _initialSyncFlagMutex;
+ mutable Mutex _initialSyncFlagMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_initialSyncFlagMutex");
bool _initialSyncFlag = false;
- mutable stdx::mutex _minValidBoundariesMutex;
+ mutable Mutex _minValidBoundariesMutex =
+ MONGO_MAKE_LATCH("ReplicationConsistencyMarkersMock::_minValidBoundariesMutex");
OpTime _appliedThrough;
OpTime _minValid;
Timestamp _oplogTruncateAfterPoint;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 5cf10287b04..d1975168f77 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -198,7 +198,7 @@ bool ReplicationCoordinatorExternalStateImpl::isInitialSyncFlagSet(OperationCont
void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
OperationContext* opCtx, ReplicationCoordinator* replCoord) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
// We've shut down the external state, don't start again.
if (_inShutdown)
@@ -248,12 +248,12 @@ void ReplicationCoordinatorExternalStateImpl::startSteadyStateReplication(
}
void ReplicationCoordinatorExternalStateImpl::stopDataReplication(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_stopDataReplication_inlock(opCtx, lk);
}
void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
- OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock) {
+ OperationContext* opCtx, stdx::unique_lock<Latch>& lock) {
// Make sue no other _stopDataReplication calls are in progress.
_dataReplicationStopped.wait(lock, [this]() { return !_stoppingDataReplication; });
_stoppingDataReplication = true;
@@ -308,7 +308,7 @@ void ReplicationCoordinatorExternalStateImpl::_stopDataReplication_inlock(
void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& settings) {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_startedThreads) {
return;
}
@@ -331,7 +331,7 @@ void ReplicationCoordinatorExternalStateImpl::startThreads(const ReplSettings& s
}
void ReplicationCoordinatorExternalStateImpl::shutdown(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_threadMutex);
+ stdx::unique_lock<Latch> lk(_threadMutex);
_inShutdown = true;
if (!_startedThreads) {
return;
@@ -772,28 +772,28 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
}
void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->clearSyncTarget();
}
}
void ReplicationCoordinatorExternalStateImpl::stopProducer() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->stop(false);
}
}
void ReplicationCoordinatorExternalStateImpl::startProducerIfStopped() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
_bgSync->startProducerIfStopped();
}
}
bool ReplicationCoordinatorExternalStateImpl::tooStale() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ stdx::lock_guard<Latch> lk(_threadMutex);
if (_bgSync) {
return _bgSync->tooStale();
}
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 645ac39e28b..1469635a97f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -39,7 +39,7 @@
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/snapshot_manager.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -121,7 +121,7 @@ private:
/**
* Stops data replication and returns with 'lock' locked.
*/
- void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lock);
+ void _stopDataReplication_inlock(OperationContext* opCtx, stdx::unique_lock<Latch>& lock);
/**
* Called when the instance transitions to primary in order to notify a potentially sharded host
@@ -142,7 +142,7 @@ private:
ServiceContext* _service;
// Guards starting threads and setting _startedThreads
- stdx::mutex _threadMutex;
+ Mutex _threadMutex = MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_threadMutex");
// Flag for guarding against concurrent data replication stopping.
bool _stoppingDataReplication = false;
@@ -188,7 +188,8 @@ private:
Future<void> _oplogApplierShutdownFuture;
// Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- stdx::mutex _nextThreadIdMutex;
+ Mutex _nextThreadIdMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateImpl::_nextThreadIdMutex");
// Number used to uniquely name threads.
long long _nextThreadId = 0;
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index c6167b82e1d..75bdac91439 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -146,7 +146,7 @@ StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteD
Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
OperationContext* opCtx, const LastVote& lastVote) {
{
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
while (_storeLocalLastVoteDocumentShouldHang) {
_shouldHangLastVoteCondVar.wait(lock);
}
@@ -211,7 +211,7 @@ void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentStatu
}
void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ stdx::unique_lock<Latch> lock(_shouldHangLastVoteMutex);
_storeLocalLastVoteDocumentShouldHang = hang;
if (!hang) {
_shouldHangLastVoteCondVar.notify_all();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 0ce548743b0..5cebab1e820 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -37,8 +37,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/last_vote.h"
#include "mongo/db/repl/replication_coordinator_external_state.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/net/hostandport.h"
@@ -197,7 +197,8 @@ private:
Status _storeLocalConfigDocumentStatus;
Status _storeLocalLastVoteDocumentStatus;
// mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- stdx::mutex _shouldHangLastVoteMutex;
+ Mutex _shouldHangLastVoteMutex =
+ MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateMock::_shouldHangLastVoteMutex");
stdx::condition_variable _shouldHangLastVoteCondVar;
bool _storeLocalLastVoteDocumentShouldHang;
bool _connectionsClosed;
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index ba1739049fb..8587b3a56a0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -82,9 +82,9 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/executor/connection_pool_stats.h"
#include "mongo/executor/network_interface.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/oplog_query_metadata.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -154,7 +154,7 @@ private:
const bool _initialState;
};
-void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const std::function<void()>& fn) {
+void lockAndCall(stdx::unique_lock<Latch>* lk, const std::function<void()>& fn) {
if (!lk->owns_lock()) {
lk->lock();
}
@@ -233,7 +233,7 @@ public:
* _list is guarded by ReplicationCoordinatorImpl::_mutex, thus it is illegal to construct one
* of these without holding _mutex
*/
- WaiterGuard(const stdx::unique_lock<stdx::mutex>& lock, WaiterList* list, Waiter* waiter)
+ WaiterGuard(const stdx::unique_lock<Latch>& lock, WaiterList* list, Waiter* waiter)
: _lock(lock), _list(list), _waiter(waiter) {
invariant(_lock.owns_lock());
list->add_inlock(_waiter);
@@ -245,7 +245,7 @@ public:
}
private:
- const stdx::unique_lock<stdx::mutex>& _lock;
+ const stdx::unique_lock<Latch>& _lock;
WaiterList* _list;
Waiter* _waiter;
};
@@ -374,7 +374,7 @@ void ReplicationCoordinatorImpl::waitForStartUpComplete_forTest() {
void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
CallbackHandle handle;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
}
@@ -386,12 +386,12 @@ void ReplicationCoordinatorImpl::_waitForStartUpComplete() {
}
ReplSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _rsConfig;
}
Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_handleElectionTimeoutCbh.isValid()) {
return Date_t();
}
@@ -399,12 +399,12 @@ Date_t ReplicationCoordinatorImpl::getElectionTimeout_forTest() const {
}
Milliseconds ReplicationCoordinatorImpl::getRandomizedElectionOffset_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getRandomizedElectionOffset_inlock();
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_priorityTakeoverCbh.isValid()) {
return boost::none;
}
@@ -412,7 +412,7 @@ boost::optional<Date_t> ReplicationCoordinatorImpl::getPriorityTakeover_forTest(
}
boost::optional<Date_t> ReplicationCoordinatorImpl::getCatchupTakeover_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_catchupTakeoverCbh.isValid()) {
return boost::none;
}
@@ -425,12 +425,12 @@ executor::TaskExecutor::CallbackHandle ReplicationCoordinatorImpl::getCatchupTak
}
OpTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getCurrentCommittedSnapshotOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getCurrentCommittedSnapshotOpTimeAndWallTime_inlock();
}
@@ -481,7 +481,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
log() << "Did not find local initialized voted for document at startup.";
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->loadLastVote(lastVote.getValue());
}
@@ -542,7 +542,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
handle = CallbackHandle{};
}
fassert(40446, handle);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_finishLoadLocalConfigCbh = std::move(handle.getValue());
return false;
@@ -644,7 +644,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), lastOpTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
invariant(_rsConfigState == kConfigStartingUp);
const PostMemberStateUpdateAction action =
_setCurrentRSConfig(lock, opCtx.get(), localConfig, myIndex.getValue());
@@ -661,7 +661,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
}
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Step down is impossible, so we don't need to wait for the returned event.
_updateTerm_inlock(term);
}
@@ -677,7 +677,7 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
void ReplicationCoordinatorImpl::_stopDataReplication(OperationContext* opCtx) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_initialSyncer.swap(initialSyncerCopy);
}
if (initialSyncerCopy) {
@@ -719,7 +719,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
auto onCompletion = [this, startCompleted](const StatusWith<OpTimeAndWallTime>& opTimeStatus) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (opTimeStatus == ErrorCodes::CallbackCanceled) {
log() << "Initial Sync has been cancelled: " << opTimeStatus.getStatus();
return;
@@ -760,11 +760,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
try {
{
// Must take the lock to set _initialSyncer, but not call it.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_inShutdown) {
- log() << "Initial Sync not starting because replication is shutting down.";
- return;
- }
+ stdx::lock_guard<Latch> lock(_mutex);
initialSyncerCopy = std::make_shared<InitialSyncer>(
createInitialSyncerOptions(this, _externalState.get()),
std::make_unique<DataReplicatorExternalStateInitialSync>(this,
@@ -817,7 +813,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
storageGlobalParams.readOnly = true;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_setConfigState_inlock(kConfigReplicationDisabled);
return;
}
@@ -828,7 +824,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
_storage->initializeStorageControlsForReplication(opCtx->getServiceContext());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
fassert(18822, !_inShutdown);
_setConfigState_inlock(kConfigStartingUp);
_topCoord->setStorageEngineSupportsReadCommitted(
@@ -844,7 +840,7 @@ void ReplicationCoordinatorImpl::startup(OperationContext* opCtx) {
if (doneLoadingConfig) {
// If we're not done loading the config, then the config state will be set by
// _finishLoadLocalConfig.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_rsConfig.isInitialized());
_setConfigState_inlock(kConfigUninitialized);
}
@@ -870,7 +866,7 @@ void ReplicationCoordinatorImpl::shutdown(OperationContext* opCtx) {
// Used to shut down outside of the lock.
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
fassert(28533, !_inShutdown);
_inShutdown = true;
if (_rsConfigState == kConfigPreStart) {
@@ -918,12 +914,12 @@ ReplicationCoordinator::Mode ReplicationCoordinatorImpl::getReplicationMode() co
}
MemberState ReplicationCoordinatorImpl::getMemberState() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _getMemberState_inlock();
}
std::vector<MemberData> ReplicationCoordinatorImpl::getMemberData() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMemberData();
}
@@ -937,7 +933,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this, expectedState]() { return _memberState == expectedState; };
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -949,7 +945,7 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
}
Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_rsConfig.isInitialized());
if (_selfIndex == -1) {
// We aren't currently in the set. Return 0 seconds so we can clear out the applier's
@@ -960,7 +956,7 @@ Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
}
void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->clearSyncSourceBlacklist();
}
@@ -977,7 +973,7 @@ Status ReplicationCoordinatorImpl::setFollowerMode(const MemberState& newState)
Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
const MemberState& newState) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (newState == _topCoord->getMemberState()) {
return Status::OK();
}
@@ -1008,7 +1004,7 @@ Status ReplicationCoordinatorImpl::_setFollowerMode(OperationContext* opCtx,
}
ReplicationCoordinator::ApplierState ReplicationCoordinatorImpl::getApplierState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _applierState;
}
@@ -1040,7 +1036,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
// When we go to drop all temp collections, we must replicate the drops.
invariant(opCtx->writesAreReplicated());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_applierState != ApplierState::Draining) {
return;
}
@@ -1101,7 +1097,7 @@ Status ReplicationCoordinatorImpl::waitForDrainFinish(Milliseconds timeout) {
return Status(ErrorCodes::BadValue, "Timeout duration cannot be negative");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [this]() { return _applierState != ApplierState::Draining; };
if (!_drainFinishedCond.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
@@ -1116,7 +1112,7 @@ void ReplicationCoordinatorImpl::signalUpstreamUpdater() {
}
void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_topCoord->setMyHeartbeatMessage(_replExecutor->now(), msg);
}
@@ -1127,7 +1123,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
const auto opTime = opTimeAndWallTime.opTime;
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto myLastAppliedOpTime = _getMyLastAppliedOpTime_inlock();
if (opTime > myLastAppliedOpTime) {
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, consistency);
@@ -1153,7 +1149,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTimeForward(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (opTimeAndWallTime.opTime > _getMyLastDurableOpTime_inlock()) {
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
@@ -1167,7 +1163,7 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
// applied optime is never greater than the latest cluster time in the logical clock.
_externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// The optime passed to this function is required to represent a consistent database state.
_setMyLastAppliedOpTimeAndWallTime(lock, opTimeAndWallTime, false, DataConsistency::Consistent);
_reportUpstream_inlock(std::move(lock));
@@ -1175,13 +1171,13 @@ void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTime(
void ReplicationCoordinatorImpl::setMyLastDurableOpTimeAndWallTime(
const OpTimeAndWallTime& opTimeAndWallTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_setMyLastDurableOpTimeAndWallTime(lock, opTimeAndWallTime, false);
_reportUpstream_inlock(std::move(lock));
}
void ReplicationCoordinatorImpl::resetMyLastOpTimes() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_resetMyLastOpTimes(lock);
_reportUpstream_inlock(std::move(lock));
}
@@ -1196,7 +1192,7 @@ void ReplicationCoordinatorImpl::_resetMyLastOpTimes(WithLock lk) {
_stableOpTimeCandidates.clear();
}
-void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock) {
+void ReplicationCoordinatorImpl::_reportUpstream_inlock(stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
if (getReplicationMode() != modeReplSet) {
@@ -1283,22 +1279,22 @@ void ReplicationCoordinatorImpl::_setMyLastDurableOpTimeAndWallTime(
}
OpTime ReplicationCoordinatorImpl::getMyLastAppliedOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastAppliedOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastAppliedOpTimeAndWallTime_inlock();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getMyLastDurableOpTimeAndWallTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTimeAndWallTime_inlock();
}
OpTime ReplicationCoordinatorImpl::getMyLastDurableOpTime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyLastDurableOpTime_inlock();
}
@@ -1405,7 +1401,7 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
}
}
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (isMajorityCommittedRead && !_externalState->snapshotsEnabled()) {
return {ErrorCodes::CommandNotSupported,
@@ -1572,7 +1568,7 @@ Status ReplicationCoordinatorImpl::setLastDurableOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1591,7 +1587,7 @@ Status ReplicationCoordinatorImpl::setLastAppliedOptime_forTest(long long cfgVer
long long memberId,
const OpTime& opTime,
Date_t wallTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
if (wallTime == Date_t()) {
@@ -1691,7 +1687,7 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli
OperationContext* opCtx, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
Timer timer;
WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto status = _awaitReplication_inlock(&lock, opCtx, opTime, fixedWriteConcern);
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
@@ -1714,7 +1710,7 @@ BSONObj ReplicationCoordinatorImpl::_getReplicationProgress(WithLock wl) const {
return progress.obj();
}
Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
- stdx::unique_lock<stdx::mutex>* lock,
+ stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern) {
@@ -1834,7 +1830,7 @@ Status ReplicationCoordinatorImpl::_awaitReplication_inlock(
void ReplicationCoordinatorImpl::waitForStepDownAttempt_forTest() {
auto isSteppingDown = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If true, we know that a stepdown is underway.
return (_topCoord->isSteppingDown());
};
@@ -1933,7 +1929,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn()
// X mode for the first time. This ensures that no writing operations will continue
// after the node's term change.
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_stopKillingOps.wait_for(
lock, Milliseconds(10).toSystemDuration(), [this] { return _killSignaled; })) {
log() << "Stopped killing user operations";
@@ -1949,7 +1945,7 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_stopAndWaitForKi
return;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_killSignaled = true;
_stopKillingOps.notify_all();
}
@@ -2009,7 +2005,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
auto deadline = force ? stepDownUntil : waitUntil;
AutoGetRstlForStepUpStepDown arsd(this, opCtx, deadline);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
opCtx->checkForInterrupt();
@@ -2043,7 +2039,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
stepdownHangBeforePerformingPostMemberStateUpdateActions.shouldFail())) {
mongo::sleepsecs(1);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_inShutdown) {
break;
}
@@ -2149,7 +2145,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx,
}
void ReplicationCoordinatorImpl::_performElectionHandoff() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto candidateIndex = _topCoord->chooseElectionHandoffCandidate();
if (candidateIndex < 0) {
@@ -2198,7 +2194,7 @@ bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(getReplicationMode() == modeReplSet);
return _getMemberState_inlock().primary();
}
@@ -2227,7 +2223,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont
}
bool ReplicationCoordinatorImpl::canAcceptNonLocalWrites() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _readWriteAbility->canAcceptNonLocalWrites(lk);
}
@@ -2259,7 +2255,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opC
return true;
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_memberState.rollback()) {
return false;
}
@@ -2287,7 +2283,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext
// Oplog reads are not allowed during STARTUP state, but we make an exception for internal
// reads. Internal reads are required for cleaning up unfinished apply batches.
if (!isPrimaryOrSecondary && getReplicationMode() == modeReplSet && ns.isOplog()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if ((_memberState.startup() && client->isFromUserConnection()) || _memberState.startup2() ||
_memberState.rollback()) {
return Status{ErrorCodes::NotMasterOrSecondary,
@@ -2331,17 +2327,17 @@ bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* o
}
OID ReplicationCoordinatorImpl::getElectionId() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _electionId;
}
int ReplicationCoordinatorImpl::getMyId() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _getMyId_inlock();
}
HostAndPort ReplicationCoordinatorImpl::getMyHostAndPort() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.getMemberAt(_selfIndex).getHostAndPort();
}
@@ -2358,7 +2354,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
f = [&finishedEvent, this]() { _replExecutor->signalEvent(finishedEvent); };
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_resetMyLastOpTimes(lk);
}
// unlock before calling _startDataReplication().
@@ -2370,7 +2366,7 @@ Status ReplicationCoordinatorImpl::resyncData(OperationContext* opCtx, bool wait
}
StatusWith<BSONObj> ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareReplSetUpdatePositionCommand(
_getCurrentCommittedSnapshotOpTime_inlock());
}
@@ -2382,7 +2378,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
if (responseStyle == ReplSetGetStatusResponseStyle::kInitialSync) {
std::shared_ptr<InitialSyncer> initialSyncerCopy;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
initialSyncerCopy = _initialSyncer;
}
@@ -2397,7 +2393,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus(
BSONObj electionCandidateMetrics =
ReplicationMetrics::get(getServiceContext()).getElectionCandidateMetricsBSON();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Status result(ErrorCodes::InternalError, "didn't set status in prepareStatusResponse");
_topCoord->prepareStatusResponse(
TopologyCoordinator::ReplSetStatusArgs{
@@ -2417,7 +2413,7 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
IsMasterResponse* response, const SplitHorizon::Parameters& horizonParams) {
invariant(getSettings().usingReplSets());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->fillIsMasterForReplSet(response, horizonParams);
OpTime lastOpTime = _getMyLastAppliedOpTime_inlock();
@@ -2440,17 +2436,17 @@ void ReplicationCoordinatorImpl::fillIsMasterForReplSet(
}
void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->fillMemberData(result);
}
ReplSetConfig ReplicationCoordinatorImpl::getConfig() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig;
}
void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
result->append("config", _rsConfig.toBSON());
}
@@ -2458,7 +2454,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
EventHandle evh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
evh = _processReplSetMetadata_inlock(replMetadata);
}
@@ -2468,7 +2464,7 @@ void ReplicationCoordinatorImpl::processReplSetMetadata(const rpc::ReplSetMetada
}
void ReplicationCoordinatorImpl::cancelAndRescheduleElectionTimeout() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cancelAndRescheduleElectionTimeout_inlock();
}
@@ -2481,7 +2477,7 @@ EventHandle ReplicationCoordinatorImpl::_processReplSetMetadata_inlock(
}
bool ReplicationCoordinatorImpl::getMaintenanceMode() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getMaintenanceCount() > 0;
}
@@ -2491,7 +2487,7 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
"can only set maintenance mode on replica set members");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_topCoord->getRole() == TopologyCoordinator::Role::kCandidate) {
return Status(ErrorCodes::NotSecondary, "currently running for election");
}
@@ -2530,7 +2526,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
auto doResync = false;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_topCoord->prepareSyncFromResponse(target, resultObj, &result);
// If we are in the middle of an initial sync, do a resync.
doResync = result.isOK() && _initialSyncer && _initialSyncer->isActive();
@@ -2545,7 +2541,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt
Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
auto result = [=]() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj);
}();
if (!result.isOK()) {
@@ -2569,7 +2565,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
log() << "replSetReconfig admin command received from client; new config: "
<< args.newConfigObj;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
_rsConfigStateChange.wait(lk);
@@ -2625,7 +2621,6 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
if (!status.isOK()) {
error() << "replSetReconfig got " << status << " while parsing " << newConfigObj;
return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
- ;
}
if (newConfig.getReplSetName() != _settings.ourSetName()) {
str::stream errmsg;
@@ -2674,7 +2669,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
executor::TaskExecutor::EventHandle electionFinishedEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
electionFinishedEvent = _cancelElectionIfNeeded_inlock();
}
@@ -2689,7 +2684,7 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
}
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (isForceReconfig && _shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -2748,7 +2743,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
log() << "replSetInitiate admin command received from client";
const auto replEnabled = _settings.usingReplSets();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!replEnabled) {
return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
}
@@ -2837,7 +2832,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
void ReplicationCoordinatorImpl::_finishReplSetInitiate(OperationContext* opCtx,
const ReplSetConfig& newConfig,
int myIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_rsConfigState == kConfigInitiating);
invariant(!_rsConfig.isInitialized());
auto action = _setCurrentRSConfig(lk, opCtx, newConfig, myIndex);
@@ -3065,7 +3060,7 @@ void ReplicationCoordinatorImpl::CatchupState::start_inlock() {
if (!cbData.status.isOK()) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(*mutex);
+ stdx::lock_guard<Latch> lk(*mutex);
// Check whether the callback has been cancelled while holding mutex.
if (cbData.myHandle.isCanceled()) {
return;
@@ -3177,7 +3172,7 @@ void ReplicationCoordinatorImpl::CatchupState::incrementNumCatchUpOps_inlock(int
}
Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->abort_inlock(reason);
return Status::OK();
@@ -3186,14 +3181,14 @@ Status ReplicationCoordinatorImpl::abortCatchupIfNeeded(PrimaryCatchUpConclusion
}
void ReplicationCoordinatorImpl::incrementNumCatchUpOpsIfCatchingUp(int numOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_catchupState) {
_catchupState->incrementNumCatchUpOps_inlock(numOps);
}
}
void ReplicationCoordinatorImpl::signalDropPendingCollectionsRemovedFromStorage() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_wakeReadyWaiters(lock);
}
@@ -3310,7 +3305,7 @@ void ReplicationCoordinatorImpl::_wakeReadyWaiters(WithLock lk) {
Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePositionArgs& updates,
long long* configVersion) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
Status status = Status::OK();
bool somethingChanged = false;
for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
@@ -3332,7 +3327,7 @@ Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePosi
}
bool ReplicationCoordinatorImpl::buildsIndexes() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_selfIndex == -1) {
return true;
}
@@ -3342,12 +3337,12 @@ bool ReplicationCoordinatorImpl::buildsIndexes() {
std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op,
bool durablyWritten) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _topCoord->getHostsWrittenTo(op, durablyWritten);
}
std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_settings.usingReplSets());
std::vector<HostAndPort> nodes;
@@ -3366,7 +3361,7 @@ std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() co
Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
const WriteConcernOptions& writeConcern) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
}
@@ -3383,7 +3378,7 @@ Status ReplicationCoordinatorImpl::_checkIfWriteConcernCanBeSatisfied_inlock(
Status ReplicationCoordinatorImpl::checkIfCommitQuorumCanBeSatisfied(
const CommitQuorumOptions& commitQuorum) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
}
@@ -3416,7 +3411,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
// If the 'commitQuorum' cannot be satisfied with all the members of this replica set, we
// need to inform the caller to avoid hanging while waiting for satisfiability of the
// 'commitQuorum' with 'commitReadyMembers' due to replica set reconfigurations.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status status = _checkIfCommitQuorumCanBeSatisfied(lock, commitQuorum);
if (!status.isOK()) {
return status;
@@ -3427,7 +3422,7 @@ StatusWith<bool> ReplicationCoordinatorImpl::checkIfCommitQuorumIsSatisfied(
}
WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfig.isInitialized()) {
return _rsConfig.getDefaultWriteConcern();
}
@@ -3455,7 +3450,7 @@ bool ReplicationCoordinatorImpl::isReplEnabled() const {
}
HostAndPort ReplicationCoordinatorImpl::chooseNewSyncSource(const OpTime& lastOpTimeFetched) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
HostAndPort oldSyncSource = _topCoord->getSyncSourceAddress();
// Always allow chaining while in catchup and drain mode.
@@ -3480,12 +3475,12 @@ void ReplicationCoordinatorImpl::_unblacklistSyncSource(
if (cbData.status == ErrorCodes::CallbackCanceled)
return;
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->unblacklistSyncSource(host, _replExecutor->now());
}
void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_topCoord->blacklistSyncSource(host, until);
_scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) {
_unblacklistSyncSource(cbData, host);
@@ -3509,7 +3504,7 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opC
_externalState->setGlobalTimestamp(opCtx->getServiceContext(),
lastOpTimeAndWallTime.opTime.getTimestamp());
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
bool isRollbackAllowed = true;
_setMyLastAppliedOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed, consistency);
_setMyLastDurableOpTimeAndWallTime(lock, lastOpTimeAndWallTime, isRollbackAllowed);
@@ -3520,7 +3515,7 @@ bool ReplicationCoordinatorImpl::shouldChangeSyncSource(
const HostAndPort& currentSource,
const rpc::ReplSetMetadata& replMetadata,
boost::optional<rpc::OplogQueryMetadata> oqMetadata) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _topCoord->shouldChangeSyncSource(
currentSource, replMetadata, oqMetadata, _replExecutor->now());
}
@@ -3615,7 +3610,7 @@ void ReplicationCoordinatorImpl::_cleanupStableOpTimeCandidates(
boost::optional<OpTimeAndWallTime>
ReplicationCoordinatorImpl::chooseStableOpTimeFromCandidates_forTest(
const std::set<OpTimeAndWallTime>& candidates, const OpTimeAndWallTime& maximumStableOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _chooseStableOpTimeFromCandidates(lk, candidates, maximumStableOpTime);
}
void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
@@ -3624,12 +3619,12 @@ void ReplicationCoordinatorImpl::cleanupStableOpTimeCandidates_forTest(
}
std::set<OpTimeAndWallTime> ReplicationCoordinatorImpl::getStableOpTimeCandidates_forTest() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _stableOpTimeCandidates;
}
void ReplicationCoordinatorImpl::attemptToAdvanceStableTimestamp() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_setStableTimestampForStorage(lk);
}
@@ -3757,7 +3752,7 @@ void ReplicationCoordinatorImpl::finishRecoveryIfEligible(OperationContext* opCt
void ReplicationCoordinatorImpl::advanceCommitPoint(
const OpTimeAndWallTime& committedOpTimeAndWallTime, bool fromSyncSource) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_advanceCommitPoint(lk, committedOpTimeAndWallTime, fromSyncSource);
}
@@ -3779,12 +3774,12 @@ void ReplicationCoordinatorImpl::_advanceCommitPoint(
}
OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTime();
}
OpTimeAndWallTime ReplicationCoordinatorImpl::getLastCommittedOpTimeAndWallTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _topCoord->getLastCommittedOpTimeAndWallTime();
}
@@ -3798,7 +3793,7 @@ Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
return termStatus;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// We should only enter terminal shutdown from global terminal exit. In that case, rather
// than voting in a term we don't plan to stay alive in, refuse to vote.
@@ -3839,7 +3834,7 @@ void ReplicationCoordinatorImpl::prepareReplMetadata(const BSONObj& metadataRequ
invariant(-1 != rbid);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasReplSetMetadata) {
_prepareReplSetMetadata_inlock(lastOpTimeFromClient, builder);
@@ -3874,7 +3869,7 @@ bool ReplicationCoordinatorImpl::getWriteConcernMajorityShouldJournal_inlock() c
Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
ReplSetHeartbeatResponse* response) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
return Status(ErrorCodes::NotYetInitialized,
"Received heartbeat while still initializing replication system");
@@ -3882,7 +3877,7 @@ Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgs
}
Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto senderHost(args.getSenderHost());
const Date_t now = _replExecutor->now();
@@ -3915,7 +3910,7 @@ long long ReplicationCoordinatorImpl::getTerm() {
EventHandle ReplicationCoordinatorImpl::updateTerm_forTest(
long long term, TopologyCoordinator::UpdateTermResult* updateResult) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
EventHandle finishEvh;
finishEvh = _updateTerm_inlock(term, updateResult);
@@ -3934,7 +3929,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long
EventHandle finishEvh;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
finishEvh = _updateTerm_inlock(term, &updateTermResult);
}
@@ -3983,7 +3978,7 @@ EventHandle ReplicationCoordinatorImpl::_updateTerm_inlock(
void ReplicationCoordinatorImpl::waitUntilSnapshotCommitted(OperationContext* opCtx,
const Timestamp& untilSnapshot) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
uassert(ErrorCodes::NotYetInitialized,
"Cannot use snapshots until replica set is finished initializing.",
@@ -3999,7 +3994,7 @@ size_t ReplicationCoordinatorImpl::getNumUncommittedSnapshots() {
}
void ReplicationCoordinatorImpl::createWMajorityWriteAvailabilityDateWaiter(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto opTimeCB = [this, opTime]() {
ReplicationMetrics::get(getServiceContext())
.setWMajorityWriteAvailabilityDate(_replExecutor->now());
@@ -4045,7 +4040,7 @@ bool ReplicationCoordinatorImpl::_updateCommittedSnapshot(
}
void ReplicationCoordinatorImpl::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropAllSnapshots_inlock();
}
@@ -4091,7 +4086,7 @@ EventHandle ReplicationCoordinatorImpl::_makeEvent() {
WriteConcernOptions ReplicationCoordinatorImpl::populateUnsetWriteConcernOptionsSyncMode(
WriteConcernOptions wc) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _populateUnsetWriteConcernOptionsSyncMode(lock, wc);
}
@@ -4127,7 +4122,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
EventHandle finishEvent;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
finishEvent = _electionFinishedEvent;
}
if (finishEvent.isValid()) {
@@ -4137,7 +4132,7 @@ Status ReplicationCoordinatorImpl::stepUpIfEligible(bool skipDryRun) {
// Step up is considered successful only if we are currently a primary and we are not in the
// process of stepping down. If we know we are going to step down, we should fail the
// replSetStepUp command so caller can retry if necessary.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_getMemberState_inlock().primary())
return Status(ErrorCodes::CommandFailed, "Election failed.");
else if (_topCoord->isSteppingDown())
@@ -4160,7 +4155,7 @@ int64_t ReplicationCoordinatorImpl::_nextRandomInt64_inlock(int64_t limit) {
}
bool ReplicationCoordinatorImpl::setContainsArbiter() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _rsConfig.containsArbiter();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8a19e09562a..6cfb21a22df 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -571,7 +571,7 @@ private:
// Tracks number of operations left running on step down.
size_t _userOpsRunning = 0;
// Protects killSignaled and stopKillingOps cond. variable.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AutoGetRstlForStepUpStepDown::_mutex");
// Signals thread about the change of killSignaled value.
stdx::condition_variable _stopKillingOps;
// Once this is set to true, the killOpThreadFn method will terminate.
@@ -802,7 +802,7 @@ private:
* Helper method for _awaitReplication that takes an already locked unique_lock, but leaves
* operation timing to the caller.
*/
- Status _awaitReplication_inlock(stdx::unique_lock<stdx::mutex>* lock,
+ Status _awaitReplication_inlock(stdx::unique_lock<Latch>* lock,
OperationContext* opCtx,
const OpTime& opTime,
const WriteConcernOptions& writeConcern);
@@ -854,7 +854,7 @@ private:
*
* Lock will be released after this method finishes.
*/
- void _reportUpstream_inlock(stdx::unique_lock<stdx::mutex> lock);
+ void _reportUpstream_inlock(stdx::unique_lock<Latch> lock);
/**
* Helpers to set the last applied and durable OpTime.
@@ -1141,10 +1141,10 @@ private:
*
* Requires "lock" to own _mutex, and returns the same unique_lock.
*/
- stdx::unique_lock<stdx::mutex> _handleHeartbeatResponseAction_inlock(
+ stdx::unique_lock<Latch> _handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock);
+ stdx::unique_lock<Latch> lock);
/**
* Updates the last committed OpTime to be 'committedOpTime' if it is more recent than the
@@ -1366,7 +1366,7 @@ private:
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationCoordinatorImpl::_mutex"); // (S)
// Handles to actively queued heartbeats.
HeartbeatHandles _heartbeatHandles; // (M)
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index f49ecec21be..8330b7b30ed 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_metrics.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -95,7 +95,7 @@ public:
void ReplicationCoordinatorImpl::_startElectSelfV1(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_startElectSelfV1_inlock(reason);
}
@@ -187,7 +187,7 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(
void ReplicationCoordinatorImpl::_processDryRunResult(
long long originalTerm, TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
invariant(_voteRequester);
@@ -269,7 +269,7 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection(
return _externalState->storeLocalLastVoteDocument(opCtx.get(), lastVote);
}();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionDryRunGuardV1 lossGuard(this);
if (status == ErrorCodes::CallbackCanceled) {
return;
@@ -315,7 +315,7 @@ MONGO_FAIL_POINT_DEFINE(electionHangsBeforeUpdateMemberState);
void ReplicationCoordinatorImpl::_onVoteRequestComplete(
long long newTerm, TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
LoseElectionGuardV1 lossGuard(this);
invariant(_voteRequester);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 6097df4f6e0..4be3daac838 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -54,9 +54,9 @@
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -91,7 +91,7 @@ Milliseconds ReplicationCoordinatorImpl::_getRandomizedElectionOffset_inlock() {
void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::CallbackArgs cbData,
const HostAndPort& target,
int targetIndex) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_untrackHeartbeatHandle_inlock(cbData.myHandle);
if (cbData.status == ErrorCodes::CallbackCanceled) {
@@ -131,7 +131,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd
void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, int targetIndex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// remove handle from queued heartbeats
_untrackHeartbeatHandle_inlock(cbData.myHandle);
@@ -246,10 +246,10 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
_handleHeartbeatResponseAction_inlock(action, hbStatusResponse, std::move(lk));
}
-stdx::unique_lock<stdx::mutex> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
+stdx::unique_lock<Latch> ReplicationCoordinatorImpl::_handleHeartbeatResponseAction_inlock(
const HeartbeatResponseAction& action,
const StatusWith<ReplSetHeartbeatResponse>& responseStatus,
- stdx::unique_lock<stdx::mutex> lock) {
+ stdx::unique_lock<Latch> lock) {
invariant(lock.owns_lock());
switch (action.getAction()) {
case HeartbeatResponseAction::NoAction:
@@ -376,7 +376,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
"Blocking until fail point is disabled.";
auto inShutdown = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _inShutdown;
};
@@ -391,7 +391,7 @@ void ReplicationCoordinatorImpl::_stepDownFinish(
// have taken global lock in S mode and operations blocked on prepare conflict will be killed to
// avoid 3-way deadlock between read, prepared transaction and step down thread.
AutoGetRstlForStepUpStepDown arsd(this, opCtx.get());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// This node has already stepped down due to reconfig. So, signal anyone who is waiting on the
// step down event.
@@ -497,7 +497,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
_externalState.get(), newConfig, getGlobalServiceContext());
if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If this node absent in newConfig, and this node was not previously initialized,
// return to kConfigUninitialized immediately, rather than storing the config and
// transitioning into the RS_REMOVED state. See SERVER-15740.
@@ -523,7 +523,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
auto status = _externalState->storeLocalConfigDocument(opCtx.get(), newConfig.toBSON());
bool isFirstConfig;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
isFirstConfig = !_rsConfig.isInitialized();
if (!status.isOK()) {
error() << "Ignoring new configuration in heartbeat response because we failed to"
@@ -594,7 +594,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
// we have already set our ReplicationCoordinatorImpl::_rsConfigState state to
// "kConfigReconfiguring" which prevents new elections from happening.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
LOG_FOR_HEARTBEATS(0)
<< "Waiting for election to complete before finishing reconfig to version "
@@ -613,7 +613,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
auto opCtx = cc().makeOperationContext();
boost::optional<AutoGetRstlForStepUpStepDown> arsd;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shouldStepDownOnReconfig(lk, newConfig, myIndex)) {
_topCoord->prepareForUnconditionalStepDown();
lk.unlock();
@@ -740,7 +740,7 @@ void ReplicationCoordinatorImpl::_startHeartbeats_inlock() {
void ReplicationCoordinatorImpl::_handleLivenessTimeout(
const executor::TaskExecutor::CallbackArgs& cbData) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Only reset the callback handle if it matches, otherwise more will be coming through
if (cbData.myHandle == _handleLivenessTimeoutCbh) {
_handleLivenessTimeoutCbh = CallbackHandle();
@@ -864,7 +864,7 @@ void ReplicationCoordinatorImpl::_cancelAndRescheduleElectionTimeout_inlock() {
void ReplicationCoordinatorImpl::_startElectSelfIfEligibleV1(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If it is not a single node replica set, no need to start an election after stepdown timeout.
if (reason == TopologyCoordinator::StartElectionReason::kSingleNodePromptElection &&
_rsConfig.getNumMembers() != 1) {
diff --git a/src/mongo/db/repl/replication_metrics.cpp b/src/mongo/db/repl/replication_metrics.cpp
index 55508674562..14f01452775 100644
--- a/src/mongo/db/repl/replication_metrics.cpp
+++ b/src/mongo/db/repl/replication_metrics.cpp
@@ -58,7 +58,7 @@ ReplicationMetrics::~ReplicationMetrics() {}
void ReplicationMetrics::incrementNumElectionsCalledForReason(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case TopologyCoordinator::StartElectionReason::kStepUpRequest:
case TopologyCoordinator::StartElectionReason::kStepUpRequestSkipDryRun: {
@@ -91,7 +91,7 @@ void ReplicationMetrics::incrementNumElectionsCalledForReason(
void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(
TopologyCoordinator::StartElectionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case TopologyCoordinator::StartElectionReason::kStepUpRequest:
case TopologyCoordinator::StartElectionReason::kStepUpRequestSkipDryRun: {
@@ -123,20 +123,20 @@ void ReplicationMetrics::incrementNumElectionsSuccessfulForReason(
}
void ReplicationMetrics::incrementNumStepDownsCausedByHigherTerm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumStepDownsCausedByHigherTerm(
_electionMetrics.getNumStepDownsCausedByHigherTerm() + 1);
}
void ReplicationMetrics::incrementNumCatchUps() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionMetrics.setNumCatchUps(_electionMetrics.getNumCatchUps() + 1);
_updateAverageCatchUpOps(lk);
}
void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
ReplicationCoordinator::PrimaryCatchUpConclusionReason reason) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
switch (reason) {
case ReplicationCoordinator::PrimaryCatchUpConclusionReason::kSucceeded:
_electionMetrics.setNumCatchUpsSucceeded(_electionMetrics.getNumCatchUpsSucceeded() +
@@ -169,140 +169,140 @@ void ReplicationMetrics::incrementNumCatchUpsConcludedForReason(
}
long ReplicationMetrics::getNumStepUpCmdsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getCalled();
}
long ReplicationMetrics::getNumPriorityTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getCalled();
}
long ReplicationMetrics::getNumCatchUpTakeoversCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getCalled();
}
long ReplicationMetrics::getNumElectionTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getCalled();
}
long ReplicationMetrics::getNumFreezeTimeoutsCalled_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getCalled();
}
long ReplicationMetrics::getNumStepUpCmdsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getStepUpCmd().getSuccessful();
}
long ReplicationMetrics::getNumPriorityTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getPriorityTakeover().getSuccessful();
}
long ReplicationMetrics::getNumCatchUpTakeoversSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getCatchUpTakeover().getSuccessful();
}
long ReplicationMetrics::getNumElectionTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getElectionTimeout().getSuccessful();
}
long ReplicationMetrics::getNumFreezeTimeoutsSuccessful_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getFreezeTimeout().getSuccessful();
}
long ReplicationMetrics::getNumStepDownsCausedByHigherTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumStepDownsCausedByHigherTerm();
}
long ReplicationMetrics::getNumCatchUps_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUps();
}
long ReplicationMetrics::getNumCatchUpsSucceeded_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSucceeded();
}
long ReplicationMetrics::getNumCatchUpsAlreadyCaughtUp_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsAlreadyCaughtUp();
}
long ReplicationMetrics::getNumCatchUpsSkipped_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsSkipped();
}
long ReplicationMetrics::getNumCatchUpsTimedOut_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsTimedOut();
}
long ReplicationMetrics::getNumCatchUpsFailedWithError_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithError();
}
long ReplicationMetrics::getNumCatchUpsFailedWithNewTerm_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithNewTerm();
}
long ReplicationMetrics::getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.getNumCatchUpsFailedWithReplSetAbortPrimaryCatchUpCmd();
}
void ReplicationMetrics::setElectionCandidateMetrics(Date_t lastElectionDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setLastElectionDate(lastElectionDate);
_nodeIsCandidateOrPrimary = true;
}
void ReplicationMetrics::setTargetCatchupOpTime(OpTime opTime) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(opTime);
}
void ReplicationMetrics::setNumCatchUpOps(int numCatchUpOps) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setNumCatchUpOps(numCatchUpOps);
_totalNumCatchUpOps += numCatchUpOps;
_updateAverageCatchUpOps(lk);
}
void ReplicationMetrics::setNewTermStartDate(Date_t newTermStartDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setNewTermStartDate(newTermStartDate);
}
void ReplicationMetrics::setWMajorityWriteAvailabilityDate(Date_t wMajorityWriteAvailabilityDate) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setWMajorityWriteAvailabilityDate(wMajorityWriteAvailabilityDate);
}
boost::optional<OpTime> ReplicationMetrics::getTargetCatchupOpTime_forTesting() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionCandidateMetrics.getTargetCatchupOpTime();
}
BSONObj ReplicationMetrics::getElectionMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _electionMetrics.toBSON();
}
BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_nodeIsCandidateOrPrimary) {
return _electionCandidateMetrics.toBSON();
}
@@ -310,7 +310,7 @@ BSONObj ReplicationMetrics::getElectionCandidateMetricsBSON() {
}
void ReplicationMetrics::clearElectionCandidateMetrics() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_electionCandidateMetrics.setTargetCatchupOpTime(boost::none);
_electionCandidateMetrics.setNumCatchUpOps(boost::none);
_electionCandidateMetrics.setNewTermStartDate(boost::none);
diff --git a/src/mongo/db/repl/replication_metrics.h b/src/mongo/db/repl/replication_metrics.h
index 816b1fc39bc..a07f84e48cf 100644
--- a/src/mongo/db/repl/replication_metrics.h
+++ b/src/mongo/db/repl/replication_metrics.h
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_metrics_gen.h"
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -98,7 +98,7 @@ private:
void _updateAverageCatchUpOps(WithLock lk);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationMetrics::_mutex");
ElectionMetrics _electionMetrics;
ElectionCandidateMetrics _electionCandidateMetrics;
ElectionParticipantMetrics _electionParticipantMetrics;
diff --git a/src/mongo/db/repl/replication_process.cpp b/src/mongo/db/repl/replication_process.cpp
index d3e77314cd3..117972289af 100644
--- a/src/mongo/db/repl/replication_process.cpp
+++ b/src/mongo/db/repl/replication_process.cpp
@@ -84,7 +84,7 @@ ReplicationProcess::ReplicationProcess(
_rbid(kUninitializedRollbackId) {}
Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto rbidResult = _storageInterface->getRollbackID(opCtx);
if (!rbidResult.isOK()) {
@@ -102,7 +102,7 @@ Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
}
int ReplicationProcess::getRollbackID() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (kUninitializedRollbackId == _rbid) {
// This may happen when serverStatus is called by an internal client before we have a chance
// to read the rollback ID from storage.
@@ -112,7 +112,7 @@ int ReplicationProcess::getRollbackID() const {
}
Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(kUninitializedRollbackId == _rbid);
@@ -132,7 +132,7 @@ Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
}
Status ReplicationProcess::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto status = _storageInterface->incrementRollbackID(opCtx);
diff --git a/src/mongo/db/repl/replication_process.h b/src/mongo/db/repl/replication_process.h
index 849ac7df8c4..82c298d363d 100644
--- a/src/mongo/db/repl/replication_process.h
+++ b/src/mongo/db/repl/replication_process.h
@@ -38,7 +38,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/replication_recovery.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -103,7 +103,7 @@ private:
// (M) Reads and writes guarded by _mutex.
// Guards access to member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationProcess::_mutex");
// Used to access the storage layer.
StorageInterface* const _storageInterface; // (R)
diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp
index ca678ebce17..3e97d2a56c4 100644
--- a/src/mongo/db/repl/replication_recovery_test.cpp
+++ b/src/mongo/db/repl/replication_recovery_test.cpp
@@ -64,47 +64,47 @@ const NamespaceString testNs("a.a");
class StorageInterfaceRecovery : public StorageInterfaceImpl {
public:
boost::optional<Timestamp> getRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _recoveryTimestamp;
}
void setRecoveryTimestamp(Timestamp recoveryTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoveryTimestamp = recoveryTimestamp;
}
bool supportsRecoverToStableTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoverToStableTimestamp;
}
void setSupportsRecoverToStableTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoverToStableTimestamp = supports;
}
bool supportsRecoveryTimestamp(ServiceContext* serviceCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _supportsRecoveryTimestamp;
}
void setSupportsRecoveryTimestamp(bool supports) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_supportsRecoveryTimestamp = supports;
}
void setPointInTimeReadTimestamp(Timestamp pointInTimeReadTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_pointInTimeReadTimestamp = pointInTimeReadTimestamp;
}
Timestamp getPointInTimeReadTimestamp(OperationContext* opCtx) const override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _pointInTimeReadTimestamp;
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRecovery::_mutex");
Timestamp _initialDataTimestamp = Timestamp::min();
boost::optional<Timestamp> _recoveryTimestamp = boost::none;
Timestamp _pointInTimeReadTimestamp = {};
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 5e7c852d211..d659ee83965 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -118,17 +118,17 @@ std::string Reporter::toString() const {
}
HostAndPort Reporter::getTarget() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _target;
}
Milliseconds Reporter::getKeepAliveInterval() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveInterval;
}
void Reporter::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_status = Status(ErrorCodes::CallbackCanceled, "Reporter no longer valid");
@@ -152,13 +152,13 @@ void Reporter::shutdown() {
}
Status Reporter::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
return _status;
}
Status Reporter::trigger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If these was a previous error then the reporter is dead and return that error.
if (!_status.isOK()) {
@@ -196,7 +196,7 @@ Status Reporter::trigger() {
StatusWith<BSONObj> Reporter::_prepareCommand() {
auto prepareResult = _prepareReplSetUpdatePositionCommandFn();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Reporter could have been canceled while preparing the command.
if (!_status.isOK()) {
@@ -239,7 +239,7 @@ void Reporter::_sendCommand_inlock(BSONObj commandRequest, Milliseconds netTimeo
void Reporter::_processResponseCallback(
const executor::TaskExecutor::RemoteCommandCallbackArgs& rcbd) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the reporter was shut down before this callback is invoked,
// return the canceled "_status".
@@ -299,7 +299,7 @@ void Reporter::_processResponseCallback(
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -318,7 +318,7 @@ void Reporter::_processResponseCallback(
void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::CallbackArgs& args,
bool fromTrigger) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -341,7 +341,7 @@ void Reporter::_prepareAndSendCommandCallback(const executor::TaskExecutor::Call
// Must call without holding the lock.
auto prepareResult = _prepareCommand();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_status.isOK()) {
_onShutdown_inlock();
return;
@@ -367,7 +367,7 @@ void Reporter::_onShutdown_inlock() {
}
bool Reporter::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive_inlock();
}
@@ -376,12 +376,12 @@ bool Reporter::_isActive_inlock() const {
}
bool Reporter::isWaitingToSendReport() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isWaitingToSendReporter;
}
Date_t Reporter::getKeepAliveTimeoutWhen_forTest() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _keepAliveTimeoutWhen;
}
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index f6cc0ea8cea..caa67aaa528 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -36,8 +36,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -188,7 +188,7 @@ private:
const Milliseconds _updatePositionTimeout;
// Protects member data of this Reporter declared below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Reporter::_mutex");
mutable stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index cb5e57f6ae9..9089163aae5 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -33,14 +33,13 @@
#include "mongo/db/repl/rollback_checker.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
namespace repl {
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
RollbackChecker::RollbackChecker(executor::TaskExecutor* executor, HostAndPort syncSource)
: _executor(executor), _syncSource(syncSource), _baseRBID(-1), _lastRBID(-1) {
@@ -121,12 +120,12 @@ Status RollbackChecker::reset_sync() {
}
int RollbackChecker::getBaseRBID() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _baseRBID;
}
int RollbackChecker::getLastRBID_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastRBID;
}
diff --git a/src/mongo/db/repl/rollback_checker.h b/src/mongo/db/repl/rollback_checker.h
index 768dd47bf63..75a948af1cb 100644
--- a/src/mongo/db/repl/rollback_checker.h
+++ b/src/mongo/db/repl/rollback_checker.h
@@ -31,12 +31,11 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
-class Mutex;
-
/**
* The RollbackChecker maintains a sync source and its baseline rollback ID (rbid). It
* contains methods to check if a rollback occurred by checking if the rbid has changed since
@@ -119,7 +118,7 @@ private:
executor::TaskExecutor* const _executor;
// Protects member data of this RollbackChecker.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackChecker::_mutex");
// The sync source to check for rollbacks against.
HostAndPort _syncSource;
diff --git a/src/mongo/db/repl/rollback_checker_test.cpp b/src/mongo/db/repl/rollback_checker_test.cpp
index 46d366645cb..3c4bbdd0941 100644
--- a/src/mongo/db/repl/rollback_checker_test.cpp
+++ b/src/mongo/db/repl/rollback_checker_test.cpp
@@ -46,7 +46,7 @@ using namespace mongo::repl;
using executor::NetworkInterfaceMock;
using executor::RemoteCommandResponse;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
class RollbackCheckerTest : public executor::ThreadPoolExecutorTest {
public:
@@ -58,7 +58,7 @@ protected:
std::unique_ptr<RollbackChecker> _rollbackChecker;
RollbackChecker::Result _hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
bool _hasCalledCallback;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackCheckerTest::_mutex");
};
void RollbackCheckerTest::setUp() {
@@ -66,7 +66,7 @@ void RollbackCheckerTest::setUp() {
launchExecutorThread();
getNet()->enterNetwork();
_rollbackChecker = std::make_unique<RollbackChecker>(&getExecutor(), HostAndPort());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_hasRolledBackResult = {ErrorCodes::NotYetInitialized, ""};
_hasCalledCallback = false;
}
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index ecb73b66573..4c670a82a2b 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -258,12 +258,12 @@ Status RollbackImpl::runRollback(OperationContext* opCtx) {
}
void RollbackImpl::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool RollbackImpl::_isInShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h
index 5b32d6abb32..69dbb520161 100644
--- a/src/mongo/db/repl/rollback_impl.h
+++ b/src/mongo/db/repl/rollback_impl.h
@@ -449,7 +449,7 @@ private:
void _resetDropPendingState(OperationContext* opCtx);
// Guards access to member variables.
- mutable stdx::mutex _mutex; // (S)
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("RollbackImpl::_mutex"); // (S)
// Set to true when RollbackImpl should shut down.
bool _inShutdown = false; // (M)
diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h
index f4c4ce5a13a..10f4b51d566 100644
--- a/src/mongo/db/repl/rollback_test_fixture.h
+++ b/src/mongo/db/repl/rollback_test_fixture.h
@@ -119,7 +119,7 @@ protected:
class RollbackTest::StorageInterfaceRollback : public StorageInterfaceImpl {
public:
void setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
@@ -129,7 +129,7 @@ public:
* of '_currTimestamp'.
*/
StatusWith<Timestamp> recoverToStableTimestamp(OperationContext* opCtx) override {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_recoverToTimestampStatus) {
return _recoverToTimestampStatus.get();
} else {
@@ -152,17 +152,17 @@ public:
}
void setRecoverToTimestampStatus(Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_recoverToTimestampStatus = status;
}
void setCurrentTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_currTimestamp = ts;
}
Timestamp getCurrentTimestamp() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _currTimestamp;
}
@@ -172,7 +172,7 @@ public:
Status setCollectionCount(OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID,
long long newCount) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_setCollectionCountStatus && _setCollectionCountStatusUUID &&
nsOrUUID.uuid() == _setCollectionCountStatusUUID) {
return *_setCollectionCountStatus;
@@ -182,18 +182,18 @@ public:
}
void setSetCollectionCountStatus(UUID uuid, Status status) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_setCollectionCountStatus = status;
_setCollectionCountStatusUUID = uuid;
}
long long getFinalCollectionCount(const UUID& uuid) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _newCounts[uuid];
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceRollback::_mutex");
Timestamp _stableTimestamp;
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index 20d392acf44..18e3bc761b9 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -46,7 +46,7 @@ namespace mongo {
namespace repl {
using executor::RemoteCommandRequest;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using LockGuard = stdx::lock_guard<Latch>;
using CallbackHandle = executor::TaskExecutor::CallbackHandle;
using EventHandle = executor::TaskExecutor::EventHandle;
using RemoteCommandCallbackArgs = executor::TaskExecutor::RemoteCommandCallbackArgs;
diff --git a/src/mongo/db/repl/scatter_gather_runner.h b/src/mongo/db/repl/scatter_gather_runner.h
index d38bdc4862b..90f20bc20b5 100644
--- a/src/mongo/db/repl/scatter_gather_runner.h
+++ b/src/mongo/db/repl/scatter_gather_runner.h
@@ -33,7 +33,7 @@
#include <vector>
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -134,7 +134,7 @@ private:
executor::TaskExecutor::EventHandle _sufficientResponsesReceived;
std::vector<executor::TaskExecutor::CallbackHandle> _callbacks;
bool _started = false;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RunnerImpl::_mutex");
};
executor::TaskExecutor* _executor; // Not owned here.
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 88f4e2f36c7..dde8249541d 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -89,7 +89,7 @@ const char StorageInterfaceImpl::kRollbackIdFieldName[] = "rollbackId";
const char StorageInterfaceImpl::kRollbackIdDocumentId[] = "rollbackId";
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
const auto kIdIndexName = "_id_"_sd;
diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp
index 77936b4453d..e9fa17504be 100644
--- a/src/mongo/db/repl/storage_interface_mock.cpp
+++ b/src/mongo/db/repl/storage_interface_mock.cpp
@@ -41,7 +41,7 @@ namespace mongo {
namespace repl {
StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -49,7 +49,7 @@ StatusWith<int> StorageInterfaceMock::getRollbackID(OperationContext* opCtx) {
}
StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_rbidInitialized) {
return Status(ErrorCodes::NamespaceExists, "Rollback ID already initialized");
}
@@ -61,7 +61,7 @@ StatusWith<int> StorageInterfaceMock::initializeRollbackID(OperationContext* opC
}
StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (!_rbidInitialized) {
return Status(ErrorCodes::NamespaceNotFound, "Rollback ID not initialized");
}
@@ -70,23 +70,23 @@ StatusWith<int> StorageInterfaceMock::incrementRollbackID(OperationContext* opCt
}
void StorageInterfaceMock::setStableTimestamp(ServiceContext* serviceCtx, Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_stableTimestamp = snapshotName;
}
void StorageInterfaceMock::setInitialDataTimestamp(ServiceContext* serviceCtx,
Timestamp snapshotName) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_initialDataTimestamp = snapshotName;
}
Timestamp StorageInterfaceMock::getStableTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _stableTimestamp;
}
Timestamp StorageInterfaceMock::getInitialDataTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _initialDataTimestamp;
}
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index ec32c6dc059..19bd3c69186 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -43,7 +43,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace repl {
@@ -408,7 +408,7 @@ public:
Timestamp oldestOpenReadTimestamp = Timestamp::min();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("StorageInterfaceMock::_mutex");
int _rbid;
bool _rbidInitialized = false;
Timestamp _stableTimestamp = Timestamp::min();
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 012bad86797..03b5af98376 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -98,7 +98,7 @@ Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePosition
void SyncSourceFeedback::forwardSlaveProgress() {
{
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
_positionChanged = true;
_cond.notify_all();
if (_reporter) {
@@ -133,7 +133,7 @@ Status SyncSourceFeedback::_updateUpstream(Reporter* reporter) {
}
void SyncSourceFeedback::shutdown() {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
if (_reporter) {
_reporter->shutdown();
}
@@ -161,7 +161,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
// Take SyncSourceFeedback lock before calling into ReplicationCoordinator
// to avoid deadlock because ReplicationCoordinator could conceivably calling back into
// this class.
- stdx::unique_lock<stdx::mutex> lock(_mtx);
+ stdx::unique_lock<Latch> lock(_mtx);
while (!_positionChanged && !_shutdownSignaled) {
{
MONGO_IDLE_THREAD_BLOCK;
@@ -184,7 +184,7 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
}
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
MemberState state = replCoord->getMemberState();
if (state.primary() || state.startup()) {
continue;
@@ -220,14 +220,14 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor,
keepAliveInterval,
syncSourceFeedbackNetworkTimeoutSecs);
{
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
if (_shutdownSignaled) {
break;
}
_reporter = &reporter;
}
ON_BLOCK_EXIT([this]() {
- stdx::lock_guard<stdx::mutex> lock(_mtx);
+ stdx::lock_guard<Latch> lock(_mtx);
_reporter = nullptr;
});
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index a75cb23ad64..3688de9a0ed 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -32,8 +32,8 @@
#include "mongo/base/status.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
struct HostAndPort;
@@ -79,7 +79,7 @@ private:
Status _updateUpstream(Reporter* reporter);
// protects cond, _shutdownSignaled, _keepAliveInterval, and _positionChanged.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("SyncSourceFeedback::_mtx");
// used to alert our thread of changes which need to be passed up the chain
stdx::condition_variable _cond;
// used to indicate a position change which has not yet been pushed along
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 45364e05bf7..0b371f9359d 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -85,7 +85,7 @@ SyncSourceResolver::~SyncSourceResolver() {
}
bool SyncSourceResolver::isActive() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _isActive_inlock();
}
@@ -95,7 +95,7 @@ bool SyncSourceResolver::_isActive_inlock() const {
Status SyncSourceResolver::startup() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
switch (_state) {
case State::kPreStart:
_state = State::kRunning;
@@ -113,7 +113,7 @@ Status SyncSourceResolver::startup() {
}
void SyncSourceResolver::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Transition directly from PreStart to Complete if not started yet.
if (State::kPreStart == _state) {
_state = State::kComplete;
@@ -137,12 +137,12 @@ void SyncSourceResolver::shutdown() {
}
void SyncSourceResolver::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_condition.wait(lk, [this]() { return !_isActive_inlock(); });
}
bool SyncSourceResolver::_isShuttingDown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return State::kShuttingDown == _state;
}
@@ -206,7 +206,7 @@ std::unique_ptr<Fetcher> SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP
}
Status SyncSourceResolver::_scheduleFetcher(std::unique_ptr<Fetcher> fetcher) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// TODO SERVER-27499 need to check if _state is kShuttingDown inside the mutex.
// Must schedule fetcher inside lock in case fetcher's callback gets invoked immediately by task
// executor.
@@ -341,7 +341,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea
// Once a work is scheduled, nothing prevents it finishing. We need the mutex to protect the
// access of member variables after scheduling, because otherwise the scheduled callback could
// finish and allow the destructor to fire before we access the member variables.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::kShuttingDown) {
return Status(
ErrorCodes::CallbackCanceled,
@@ -530,7 +530,7 @@ Status SyncSourceResolver::_finishCallback(const SyncSourceResolverResponse& res
<< exceptionToStatus();
}
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state != State::kComplete);
_state = State::kComplete;
_condition.notify_all();
diff --git a/src/mongo/db/repl/sync_source_resolver.h b/src/mongo/db/repl/sync_source_resolver.h
index abe6396e650..2b2734d2c70 100644
--- a/src/mongo/db/repl/sync_source_resolver.h
+++ b/src/mongo/db/repl/sync_source_resolver.h
@@ -38,8 +38,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -234,7 +234,7 @@ private:
const OnCompletionFn _onCompletion;
// Protects members of this sync source resolver defined below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncSourceResolverResponse::_mutex");
mutable stdx::condition_variable _condition;
// State transitions:
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 755ab860a07..c8de5ed6e80 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -151,7 +151,7 @@ private:
void _run();
// Protects _cond, _shutdownSignaled, and _latestOpTime.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ApplyBatchFinalizerForJournal::_mutex");
// Used to alert our thread of a new OpTime.
stdx::condition_variable _cond;
// The next OpTime to set as the ReplicationCoordinator's lastOpTime after flushing.
@@ -163,7 +163,7 @@ private:
};
ApplyBatchFinalizerForJournal::~ApplyBatchFinalizerForJournal() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_shutdownSignaled = true;
_cond.notify_all();
lock.unlock();
@@ -175,7 +175,7 @@ void ApplyBatchFinalizerForJournal::record(const OpTimeAndWallTime& newOpTimeAnd
ReplicationCoordinator::DataConsistency consistency) {
_recordApplied(newOpTimeAndWallTime, consistency);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_latestOpTimeAndWallTime = newOpTimeAndWallTime;
_cond.notify_all();
}
@@ -187,7 +187,7 @@ void ApplyBatchFinalizerForJournal::_run() {
OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t()};
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_latestOpTimeAndWallTime.opTime.isNull() && !_shutdownSignaled) {
_cond.wait(lock);
}
@@ -491,7 +491,7 @@ public:
}
OpQueue getNextBatch(Seconds maxWaitTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// _ops can indicate the following cases:
// 1. A new batch is ready to consume.
// 2. Shutdown.
@@ -604,7 +604,7 @@ private:
}
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Block until the previous batch has been taken.
_cv.wait(lk, [&] { return _ops.empty() && !_ops.termWhenExhausted(); });
_ops = std::move(ops);
@@ -621,7 +621,7 @@ private:
OplogBuffer* const _oplogBuffer;
OplogApplier::GetNextApplierBatchFn const _getNextApplierBatchFn;
- stdx::mutex _mutex; // Guards _ops.
+ Mutex _mutex = MONGO_MAKE_LATCH("OpQueueBatcher::_mutex"); // Guards _ops.
stdx::condition_variable _cv;
OpQueue _ops;
@@ -756,12 +756,12 @@ void SyncTail::runLoop(OplogBuffer* oplogBuffer,
}
void SyncTail::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inShutdown = true;
}
bool SyncTail::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _inShutdown;
}
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index 770663e9dab..364df797cad 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -43,7 +43,7 @@
#include "mongo/db/repl/replication_consistency_markers.h"
#include "mongo/db/repl/session_update_tracker.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -239,7 +239,7 @@ private:
const OplogApplier::Options _options;
// Protects member data of SyncTail.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SyncTail::_mutex");
// Set to true if shutdown() has been called.
bool _inShutdown = false;
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 4c8f44ad8d8..c2a18035ea7 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -65,7 +65,7 @@
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/transaction_participant_gen.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -435,7 +435,7 @@ protected:
_insertOp2->getOpTime());
_opObserver->onInsertsFn =
[&](OperationContext*, const NamespaceString& nss, const std::vector<BSONObj>& docs) {
- stdx::lock_guard<stdx::mutex> lock(_insertMutex);
+ stdx::lock_guard<Latch> lock(_insertMutex);
if (nss.isOplog() || nss == _nss1 || nss == _nss2 ||
nss == NamespaceString::kSessionTransactionsTableNamespace) {
_insertedDocs[nss].insert(_insertedDocs[nss].end(), docs.begin(), docs.end());
@@ -482,7 +482,7 @@ protected:
std::unique_ptr<ThreadPool> _writerPool;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntrySyncTailTest::_insertMutex");
};
TEST_F(MultiOplogEntrySyncTailTest, MultiApplyUnpreparedTransactionSeparate) {
@@ -816,7 +816,7 @@ protected:
_abortSinglePrepareApplyOp;
private:
- stdx::mutex _insertMutex;
+ Mutex _insertMutex = MONGO_MAKE_LATCH("MultiOplogEntryPreparedTransactionTest::_insertMutex");
};
TEST_F(MultiOplogEntryPreparedTransactionTest, MultiApplyPreparedTransactionSteadyState) {
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 4c53b558aa1..86edc6da9c5 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -50,8 +50,8 @@ namespace mongo {
namespace repl {
namespace {
-using UniqueLock = stdx::unique_lock<stdx::mutex>;
-using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<Latch>;
+using LockGuard = stdx::lock_guard<Latch>;
/**
@@ -87,7 +87,7 @@ TaskRunner::~TaskRunner() {
}
std::string TaskRunner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
str::stream output;
output << "TaskRunner";
output << " scheduled tasks: " << _tasks.size();
@@ -97,14 +97,14 @@ std::string TaskRunner::getDiagnosticString() const {
}
bool TaskRunner::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _active;
}
void TaskRunner::schedule(Task task) {
invariant(task);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_tasks.push_back(std::move(task));
_condition.notify_all();
@@ -123,7 +123,7 @@ void TaskRunner::schedule(Task task) {
}
void TaskRunner::cancel() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cancelRequested = true;
_condition.notify_all();
}
@@ -159,7 +159,7 @@ void TaskRunner::_runTasks() {
// Release thread back to pool after disposing if no scheduled tasks in queue.
if (nextAction == NextAction::kDisposeOperationContext ||
nextAction == NextAction::kInvalid) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tasks.empty()) {
_finishRunTasks_inlock();
return;
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index 9b15ed3d629..c1db72bdba5 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -33,8 +33,8 @@
#include <list>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/functional.h"
@@ -151,7 +151,7 @@ private:
ThreadPool* _threadPool;
// Protects member data of this TaskRunner.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TaskRunner::_mutex");
stdx::condition_variable _condition;
diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp
index 6953f4900ec..96ad44916aa 100644
--- a/src/mongo/db/repl/task_runner_test.cpp
+++ b/src/mongo/db/repl/task_runner_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/task_runner.h"
#include "mongo/db/repl/task_runner_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/barrier.h"
#include "mongo/util/concurrency/thread_pool.h"
@@ -57,12 +57,12 @@ TEST_F(TaskRunnerTest, GetDiagnosticString) {
}
TEST_F(TaskRunnerTest, CallbackValues) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool called = false;
OperationContext* opCtx = nullptr;
Status status = getDetectableErrorStatus();
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
called = true;
opCtx = theTxn;
status = theStatus;
@@ -72,7 +72,7 @@ TEST_F(TaskRunnerTest, CallbackValues) {
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_TRUE(called);
ASSERT(opCtx);
ASSERT_OK(status);
@@ -84,11 +84,11 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
TaskRunner::NextAction nextAction,
unique_function<void(Task task)> schedule) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
std::vector<OperationContext*> txns;
OpIdVector txnIds;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (txns.size() >= 2U) {
return TaskRunner::NextAction::kInvalid;
}
@@ -111,7 +111,7 @@ OpIdVector _testRunTaskTwice(TaskRunnerTest& test,
test.getThreadPool().waitForIdle();
ASSERT_FALSE(test.getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2U, txns.size());
ASSERT(txns[0]);
ASSERT(txns[1]);
@@ -148,14 +148,14 @@ TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
}
TEST_F(TaskRunnerTest, SkipSecondTask) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -174,14 +174,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -190,14 +190,14 @@ TEST_F(TaskRunnerTest, SkipSecondTask) {
}
TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
int i = 0;
OperationContext* opCtx[2] = {nullptr, nullptr};
Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
stdx::condition_variable condition;
bool schedulingDone = false;
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
int j = i++;
if (j >= 2) {
return TaskRunner::NextAction::kCancel;
@@ -223,14 +223,14 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
ASSERT_TRUE(getTaskRunner().isActive());
getTaskRunner().schedule(task);
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
schedulingDone = true;
condition.notify_all();
}
getThreadPool().waitForIdle();
ASSERT_FALSE(getTaskRunner().isActive());
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQUALS(2, i);
ASSERT(opCtx[0]);
ASSERT_OK(status[0]);
@@ -239,7 +239,7 @@ TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
}
TEST_F(TaskRunnerTest, Cancel) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -247,7 +247,7 @@ TEST_F(TaskRunnerTest, Cancel) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -261,7 +261,7 @@ TEST_F(TaskRunnerTest, Cancel) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -276,13 +276,13 @@ TEST_F(TaskRunnerTest, Cancel) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
unittest::Barrier barrier(2U);
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
Status status1 = getDetectableErrorStatus();
Status status2 = getDetectableErrorStatus();
@@ -290,7 +290,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task1" requests the task runner to retain the operation context. This has
// effect of keeping the task runner active.
auto task1 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
barrier.countDownAndWait();
status1 = theStatus;
return TaskRunner::NextAction::kKeepOperationContext;
@@ -300,7 +300,7 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// Upon completion, "task2" requests the task runner to dispose the operation context. After the
// operation context is destroyed, the task runner will go into an inactive state.
auto task2 = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status2 = theStatus;
return TaskRunner::NextAction::kDisposeOperationContext;
};
@@ -314,13 +314,13 @@ TEST_F(TaskRunnerTest, JoinShouldWaitForTasksToComplete) {
// This status should be OK because we ensured that the task
// was scheduled and invoked before we called cancel().
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status1);
ASSERT_OK(status2);
}
TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condition;
Status status = getDetectableErrorStatus();
bool taskRunning = false;
@@ -328,7 +328,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// Running this task causes the task runner to wait for another task that
// is never scheduled.
auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
status = theStatus;
taskRunning = true;
condition.notify_all();
@@ -338,7 +338,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
getTaskRunner().schedule(task);
ASSERT_TRUE(getTaskRunner().isActive());
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
while (!taskRunning) {
condition.wait(lk);
}
@@ -350,7 +350,7 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
// This status will not be OK if canceling the task runner
// before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_OK(status);
}
diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
index 8060e7f9488..2a9eb7932f3 100644
--- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp
@@ -1529,7 +1529,6 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
Date_t appliedWallTime = Date_t() + Seconds(oplogProgress.getSecs());
OpTime oplogDurable(Timestamp(1, 1), 19);
Date_t durableWallTime = Date_t() + Seconds(oplogDurable.getSecs());
- ;
OpTime lastCommittedOpTime(Timestamp(5, 1), 20);
Date_t lastCommittedWallTime = Date_t() + Seconds(lastCommittedOpTime.getSecs());
OpTime readConcernMajorityOpTime(Timestamp(4, 1), 20);
diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h
index 94d88ce78c4..fbd2639e2b9 100644
--- a/src/mongo/db/repl_index_build_state.h
+++ b/src/mongo/db/repl_index_build_state.h
@@ -38,7 +38,7 @@
#include "mongo/db/catalog/commit_quorum_options.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/future.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/uuid.h"
@@ -104,7 +104,7 @@ struct ReplIndexBuildState {
IndexBuildProtocol protocol;
// Protects the state below.
- mutable stdx::mutex mutex;
+ mutable Mutex mutex = MONGO_MAKE_LATCH("ReplIndexBuildState::mutex");
// Secondaries do not set this information, so it is only set on primaries or on
// transition to primary.
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index a3854cb9038..def2a02bac2 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -60,7 +60,7 @@ ActiveMigrationsRegistry& ActiveMigrationsRegistry::get(OperationContext* opCtx)
StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
const MoveChunkRequest& args) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -80,7 +80,7 @@ StatusWith<ScopedDonateChunk> ActiveMigrationsRegistry::registerDonateChunk(
StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
const NamespaceString& nss, const ChunkRange& chunkRange, const ShardId& fromShardId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeReceiveChunkState) {
return _activeReceiveChunkState->constructErrorStatus();
}
@@ -95,7 +95,7 @@ StatusWith<ScopedReceiveChunk> ActiveMigrationsRegistry::registerReceiveChunk(
}
boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
return _activeMoveChunkState->args.getNss();
}
@@ -106,7 +106,7 @@ boost::optional<NamespaceString> ActiveMigrationsRegistry::getActiveDonateChunkN
BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContext* opCtx) {
boost::optional<NamespaceString> nss;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMoveChunkState) {
nss = _activeMoveChunkState->args.getNss();
@@ -132,13 +132,13 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex
}
void ActiveMigrationsRegistry::_clearDonateChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMoveChunkState);
_activeMoveChunkState.reset();
}
void ActiveMigrationsRegistry::_clearReceiveChunk() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeReceiveChunkState);
_activeReceiveChunkState.reset();
}
diff --git a/src/mongo/db/s/active_migrations_registry.h b/src/mongo/db/s/active_migrations_registry.h
index 2f5dc3b56ae..e885bc23b91 100644
--- a/src/mongo/db/s/active_migrations_registry.h
+++ b/src/mongo/db/s/active_migrations_registry.h
@@ -33,8 +33,8 @@
#include <memory>
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -152,7 +152,7 @@ private:
void _clearReceiveChunk();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMigrationsRegistry::_mutex");
// If there is an active moveChunk operation, this field contains the original request
boost::optional<ActiveMoveChunkState> _activeMoveChunkState;
diff --git a/src/mongo/db/s/active_move_primaries_registry.cpp b/src/mongo/db/s/active_move_primaries_registry.cpp
index fa383581038..4f4a5cf945e 100644
--- a/src/mongo/db/s/active_move_primaries_registry.cpp
+++ b/src/mongo/db/s/active_move_primaries_registry.cpp
@@ -56,7 +56,7 @@ ActiveMovePrimariesRegistry& ActiveMovePrimariesRegistry::get(OperationContext*
StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
const ShardMovePrimary& requestArgs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
if (_activeMovePrimaryState->requestArgs == requestArgs) {
return {ScopedMovePrimary(nullptr, false, _activeMovePrimaryState->notification)};
@@ -71,7 +71,7 @@ StatusWith<ScopedMovePrimary> ActiveMovePrimariesRegistry::registerMovePrimary(
}
boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrimaryNss() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_activeMovePrimaryState) {
return _activeMovePrimaryState->requestArgs.get_shardsvrMovePrimary();
}
@@ -80,7 +80,7 @@ boost::optional<NamespaceString> ActiveMovePrimariesRegistry::getActiveMovePrima
}
void ActiveMovePrimariesRegistry::_clearMovePrimary() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_activeMovePrimaryState);
_activeMovePrimaryState.reset();
}
diff --git a/src/mongo/db/s/active_move_primaries_registry.h b/src/mongo/db/s/active_move_primaries_registry.h
index 38b19a6c94f..94f55657cba 100644
--- a/src/mongo/db/s/active_move_primaries_registry.h
+++ b/src/mongo/db/s/active_move_primaries_registry.h
@@ -99,7 +99,7 @@ private:
void _clearMovePrimary();
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveMovePrimariesRegistry::_mutex");
// If there is an active movePrimary operation going on, this field contains the request that
// initiated it.
diff --git a/src/mongo/db/s/active_rename_collection_registry.cpp b/src/mongo/db/s/active_rename_collection_registry.cpp
index ae9b50b4dcf..6d4e1e533b9 100644
--- a/src/mongo/db/s/active_rename_collection_registry.cpp
+++ b/src/mongo/db/s/active_rename_collection_registry.cpp
@@ -97,14 +97,14 @@ StatusWith<ScopedRenameCollection> ActiveRenameCollectionRegistry::registerRenam
}
void ActiveRenameCollectionRegistry::_clearRenameCollection(std::string nss) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeRenameCollectionMap.find(nss);
invariant(iter != _activeRenameCollectionMap.end());
_activeRenameCollectionMap.erase(nss);
}
void ActiveRenameCollectionRegistry::_setEmptyOrError(std::string nss, Status status) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeRenameCollectionMap.find(nss);
invariant(iter != _activeRenameCollectionMap.end());
auto activeRenameCollectionState = iter->second;
diff --git a/src/mongo/db/s/active_rename_collection_registry.h b/src/mongo/db/s/active_rename_collection_registry.h
index 50028b21f81..765f7627169 100644
--- a/src/mongo/db/s/active_rename_collection_registry.h
+++ b/src/mongo/db/s/active_rename_collection_registry.h
@@ -101,7 +101,7 @@ private:
void _setEmptyOrError(std::string nss, Status status);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveRenameCollectionRegistry::_mutex");
// Map containing any collections currently being renamed
StringMap<std::shared_ptr<ActiveRenameCollectionState>> _activeRenameCollectionMap;
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index 6a01fdd90ee..d2bda7ece20 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -91,7 +91,7 @@ ActiveShardCollectionRegistry& ActiveShardCollectionRegistry::get(OperationConte
StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCollection(
const ShardsvrShardCollection& request) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::string nss = request.get_shardsvrShardCollection().get().ns();
auto iter = _activeShardCollectionMap.find(nss);
@@ -114,7 +114,7 @@ StatusWith<ScopedShardCollection> ActiveShardCollectionRegistry::registerShardCo
}
void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
_activeShardCollectionMap.erase(nss);
@@ -122,7 +122,7 @@ void ActiveShardCollectionRegistry::_clearShardCollection(std::string nss) {
void ActiveShardCollectionRegistry::_setUUIDOrError(std::string nss,
StatusWith<boost::optional<UUID>> swUUID) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto iter = _activeShardCollectionMap.find(nss);
invariant(iter != _activeShardCollectionMap.end());
auto activeShardCollectionState = iter->second;
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index da734aee1c9..91423d65d7c 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
#include <memory>
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/shard_collection_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/string_map.h"
@@ -108,7 +108,7 @@ private:
void _setUUIDOrError(std::string nss, StatusWith<boost::optional<UUID>> swUUID);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ActiveShardCollectionRegistry::_mutex");
// Map containing any collections currently being sharded
StringMap<std::shared_ptr<ActiveShardCollectionState>> _activeShardCollectionMap;
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index a920dd64ba9..9a2fadb8327 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -164,7 +164,7 @@ Balancer::Balancer(ServiceContext* serviceContext)
Balancer::~Balancer() {
// The balancer thread must have been stopped
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
}
@@ -182,7 +182,7 @@ Balancer* Balancer::get(OperationContext* operationContext) {
}
void Balancer::initiateBalancer(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopped);
_state = kRunning;
@@ -194,7 +194,7 @@ void Balancer::initiateBalancer(OperationContext* opCtx) {
}
void Balancer::interruptBalancer() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state != kRunning)
return;
@@ -218,7 +218,7 @@ void Balancer::interruptBalancer() {
void Balancer::waitForBalancerToStop() {
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
if (_state == kStopped)
return;
@@ -228,7 +228,7 @@ void Balancer::waitForBalancerToStop() {
_thread.join();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_state = kStopped;
_thread = {};
@@ -236,7 +236,7 @@ void Balancer::waitForBalancerToStop() {
}
void Balancer::joinCurrentRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> scopedLock(_mutex);
+ stdx::unique_lock<Latch> scopedLock(_mutex);
const auto numRoundsAtStart = _numBalancerRounds;
opCtx->waitForConditionOrInterrupt(_condVar, scopedLock, [&] {
return !_inBalancerRound || _numBalancerRounds != numRoundsAtStart;
@@ -289,7 +289,7 @@ void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
const auto mode = balancerConfig->getBalancerMode();
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
builder->append("mode", BalancerSettingsType::kBalancerModes[mode]);
builder->append("inBalancerRound", _inBalancerRound);
builder->append("numBalancerRounds", _numBalancerRounds);
@@ -303,7 +303,7 @@ void Balancer::_mainThread() {
log() << "CSRS balancer is starting";
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_threadOperationContext = opCtx.get();
}
@@ -423,7 +423,7 @@ void Balancer::_mainThread() {
}
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
invariant(_state == kStopping);
invariant(_migrationManagerInterruptThread.joinable());
}
@@ -432,7 +432,7 @@ void Balancer::_mainThread() {
_migrationManager.drainActiveMigrations();
{
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_migrationManagerInterruptThread = {};
_threadOperationContext = nullptr;
}
@@ -441,19 +441,19 @@ void Balancer::_mainThread() {
}
bool Balancer::_stopRequested() {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
return (_state != kRunning);
}
void Balancer::_beginRound(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_inBalancerRound = true;
_condVar.notify_all();
}
void Balancer::_endRound(OperationContext* opCtx, Milliseconds waitTimeout) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_inBalancerRound = false;
_numBalancerRounds++;
_condVar.notify_all();
@@ -464,7 +464,7 @@ void Balancer::_endRound(OperationContext* opCtx, Milliseconds waitTimeout) {
}
void Balancer::_sleepFor(OperationContext* opCtx, Milliseconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait_for(lock, waitTimeout.toSystemDuration(), [&] { return _state != kRunning; });
}
@@ -672,7 +672,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
}
void Balancer::notifyPersistedBalancerSettingsChanged() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.notify_all();
}
diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h
index d33d6c1ddc0..4e22590bf1d 100644
--- a/src/mongo/db/s/balancer/balancer.h
+++ b/src/mongo/db/s/balancer/balancer.h
@@ -32,8 +32,8 @@
#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h"
#include "mongo/db/s/balancer/balancer_random.h"
#include "mongo/db/s/balancer/migration_manager.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
namespace mongo {
@@ -208,7 +208,7 @@ private:
const BSONObj& minKey);
// Protects the state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Balancer::_mutex");
// Indicates the current state of the balancer
State _state{kStopped};
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 0a988cf1b13..4af124368e4 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -210,7 +210,7 @@ Status MigrationManager::executeManualMigration(
void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kStopped);
invariant(_migrationRecoveryMap.empty());
_state = State::kRecovering;
@@ -285,7 +285,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kStopping) {
_migrationRecoveryMap.clear();
return;
@@ -367,7 +367,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
scopedGuard.dismiss();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state == State::kRecovering) {
_state = State::kEnabled;
_condVar.notify_all();
@@ -383,7 +383,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
void MigrationManager::interruptAndDisableMigrations() {
auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kEnabled || _state == State::kRecovering);
_state = State::kStopping;
@@ -402,7 +402,7 @@ void MigrationManager::interruptAndDisableMigrations() {
}
void MigrationManager::drainActiveMigrations() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopped)
return;
@@ -421,7 +421,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
// Ensure we are not stopped in order to avoid doing the extra work
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
Status(ErrorCodes::BalancerInterrupted,
@@ -457,7 +457,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
secondaryThrottle,
waitForDelete);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_state != State::kEnabled && _state != State::kRecovering) {
return std::make_shared<Notification<RemoteCommandResponse>>(
@@ -522,7 +522,7 @@ void MigrationManager::_schedule(WithLock lock,
ThreadClient tc(getThreadName(), service);
auto opCtx = cc().makeOperationContext();
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_complete(lock, opCtx.get(), itMigration, args.response);
});
@@ -573,12 +573,12 @@ void MigrationManager::_checkDrained(WithLock) {
}
void MigrationManager::_waitForRecovery() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condVar.wait(lock, [this] { return _state != State::kRecovering; });
}
void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state == State::kStopping) {
// The balancer was interrupted. Let the next balancer recover the state.
return;
@@ -605,7 +605,7 @@ Status MigrationManager::_processRemoteCommandResponse(
const RemoteCommandResponse& remoteCommandResponse,
ScopedMigrationRequest* scopedMigrationRequest) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
Status commandStatus(ErrorCodes::InternalError, "Uninitialized value.");
// Check for local errors sending the remote command caused by stepdown.
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index 4f6c1288571..0e517b7e067 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -38,10 +38,10 @@
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -260,7 +260,7 @@ private:
stdx::unordered_map<NamespaceString, std::list<MigrationType>> _migrationRecoveryMap;
// Protects the class state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationManager::_mutex");
// Always start the migration manager in a stopped state.
State _state{State::kStopped};
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 049ab0ae261..c7dd1e22250 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -234,12 +234,12 @@ ChunkSplitter& ChunkSplitter::get(ServiceContext* serviceContext) {
}
void ChunkSplitter::onShardingInitialization(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> scopedLock(_mutex);
+ stdx::lock_guard<Latch> scopedLock(_mutex);
_isPrimary = isPrimary;
}
void ChunkSplitter::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_isPrimary) {
return;
}
@@ -249,7 +249,7 @@ void ChunkSplitter::onStepUp() {
}
void ChunkSplitter::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_isPrimary) {
return;
}
diff --git a/src/mongo/db/s/chunk_splitter.h b/src/mongo/db/s/chunk_splitter.h
index ef774dc017c..a05683fc6e7 100644
--- a/src/mongo/db/s/chunk_splitter.h
+++ b/src/mongo/db/s/chunk_splitter.h
@@ -107,7 +107,7 @@ private:
long dataWritten);
// Protects the state below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ChunkSplitter::_mutex");
// The ChunkSplitter is only active on a primary node.
bool _isPrimary{false};
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index 27b0a47a7ef..9fba4d4c1b2 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -134,7 +134,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
bool writeOpLog = false;
{
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (self->isEmpty()) {
LOG(1) << "No further range deletions scheduled on " << nss.ns();
return boost::none;
@@ -181,7 +181,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
<< "ns" << nss.ns() << "epoch" << epoch << "min"
<< range->getMin() << "max" << range->getMax()));
} catch (const DBException& e) {
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
csr->_metadataManager->_clearAllCleanups(
scopedLock,
e.toStatus("cannot push startRangeDeletion record to Op Log,"
@@ -254,7 +254,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean;
- stdx::lock_guard<stdx::mutex> scopedLock(csr->_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> scopedLock(csr->_metadataManager->_managerLock);
if (!replicationStatus.isOK()) {
LOG(0) << "Error when waiting for write concern after removing " << nss << " range "
@@ -304,7 +304,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
if (!scopedCollectionMetadata) {
LOG(0) << "Abandoning any range deletions because the metadata for " << nss.ns()
<< " was reset";
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
@@ -319,7 +319,7 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
<< nss.ns();
}
- stdx::lock_guard<stdx::mutex> lk(metadataManager->_managerLock);
+ stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
metadataManager->_clearAllCleanups(lk);
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 7e38463b55e..687c9a877b1 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -58,7 +58,7 @@ public:
: _factory(std::move(factory)) {}
CollectionShardingState& getOrCreate(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _collections.find(nss.ns());
if (it == _collections.end()) {
@@ -74,7 +74,7 @@ public:
BSONObjBuilder versionB(builder->subobjStart("versions"));
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (auto& coll : _collections) {
const auto optMetadata = coll.second->getCurrentMetadataIfKnown();
@@ -93,7 +93,7 @@ private:
std::unique_ptr<CollectionShardingStateFactory> _factory;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateMap::_mutex");
CollectionsMap _collections;
};
diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
index 49a4f118ce5..b0c800c92cb 100644
--- a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
+++ b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
@@ -58,7 +58,7 @@ public:
private:
executor::TaskExecutor* _getExecutor() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_taskExecutor) {
const std::string kExecName("CollectionRangeDeleter-TaskExecutor");
@@ -75,7 +75,7 @@ private:
}
// Serializes the instantiation of the task executor
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateFactoryShard::_mutex");
std::unique_ptr<executor::TaskExecutor> _taskExecutor{nullptr};
};
diff --git a/src/mongo/db/s/config/namespace_serializer.cpp b/src/mongo/db/s/config/namespace_serializer.cpp
index c132fe177b2..6c69eaa668d 100644
--- a/src/mongo/db/s/config/namespace_serializer.cpp
+++ b/src/mongo/db/s/config/namespace_serializer.cpp
@@ -49,7 +49,7 @@ NamespaceSerializer::ScopedLock::ScopedLock(StringData ns, NamespaceSerializer&
: _ns(ns.toString()), _nsSerializer(nsSerializer) {}
NamespaceSerializer::ScopedLock::~ScopedLock() {
- stdx::unique_lock<stdx::mutex> lock(_nsSerializer._mutex);
+ stdx::unique_lock<Latch> lock(_nsSerializer._mutex);
auto iter = _nsSerializer._inProgressMap.find(_ns);
iter->second->numWaiting--;
@@ -62,7 +62,7 @@ NamespaceSerializer::ScopedLock::~ScopedLock() {
}
NamespaceSerializer::ScopedLock NamespaceSerializer::lock(OperationContext* opCtx, StringData nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto iter = _inProgressMap.find(nss);
if (iter == _inProgressMap.end()) {
diff --git a/src/mongo/db/s/config/namespace_serializer.h b/src/mongo/db/s/config/namespace_serializer.h
index 912171dcdbc..f0e6c4b158c 100644
--- a/src/mongo/db/s/config/namespace_serializer.h
+++ b/src/mongo/db/s/config/namespace_serializer.h
@@ -36,8 +36,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -72,7 +72,7 @@ private:
bool isInProgress = true;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceSerializer::_mutex");
StringMap<std::shared_ptr<NSLock>> _inProgressMap;
};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 424db73a9d0..557529099ff 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -100,7 +100,7 @@ ShardingCatalogManager::~ShardingCatalogManager() {
}
void ShardingCatalogManager::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -114,7 +114,7 @@ void ShardingCatalogManager::startup() {
void ShardingCatalogManager::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
@@ -126,7 +126,7 @@ void ShardingCatalogManager::shutDown() {
Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configInitialized) {
return {ErrorCodes::AlreadyInitialized,
"Config database was previously loaded into memory"};
@@ -146,14 +146,14 @@ Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext
return status;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = true;
return Status::OK();
}
void ShardingCatalogManager::discardCachedConfigDatabaseInitializationState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configInitialized = false;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 821c2c037fa..0966ecba966 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -34,6 +34,7 @@
#include "mongo/db/repl/optime_with.h"
#include "mongo/db/s/config/namespace_serializer.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
@@ -41,7 +42,6 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/request_types/rename_collection_gen.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -533,7 +533,7 @@ private:
// (S) Self-synchronizing; access in any way from any context.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogManager::_mutex");
// True if shutDown() has been called. False, otherwise.
bool _inShutdown{false}; // (M)
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index f3557c5791e..643c36d9dcd 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -53,7 +53,7 @@ public:
DatabaseShardingStateMap() {}
DatabaseShardingState& getOrCreate(const StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _databases.find(dbName);
if (it == _databases.end()) {
@@ -69,7 +69,7 @@ public:
private:
using DatabasesMap = StringMap<std::shared_ptr<DatabaseShardingState>>;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DatabaseShardingStateMap::_mutex");
DatabasesMap _databases;
};
diff --git a/src/mongo/db/s/implicit_create_collection.cpp b/src/mongo/db/s/implicit_create_collection.cpp
index 7ea8c1e1345..b0ccfc17e37 100644
--- a/src/mongo/db/s/implicit_create_collection.cpp
+++ b/src/mongo/db/s/implicit_create_collection.cpp
@@ -46,8 +46,8 @@
#include "mongo/s/grid.h"
#include "mongo/s/request_types/create_collection_gen.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -73,7 +73,7 @@ public:
invariant(!opCtx->lockState()->isLocked());
{
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
while (_isInProgress) {
auto status = opCtx->waitForConditionOrInterruptNoAssert(_cvIsInProgress, lg);
if (!status.isOK()) {
@@ -85,7 +85,7 @@ public:
}
ON_BLOCK_EXIT([&] {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_isInProgress = false;
_cvIsInProgress.notify_one();
});
@@ -128,7 +128,7 @@ public:
private:
const NamespaceString _ns;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializer::_mutex");
stdx::condition_variable _cvIsInProgress;
bool _isInProgress = false;
};
@@ -136,7 +136,7 @@ private:
class CreateCollectionSerializerMap {
public:
std::shared_ptr<CreateCollectionSerializer> getForNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto iter = _inProgressMap.find(ns.ns());
if (iter == _inProgressMap.end()) {
std::tie(iter, std::ignore) =
@@ -147,12 +147,12 @@ public:
}
void cleanupNs(const NamespaceString& ns) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_inProgressMap.erase(ns.ns());
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CreateCollectionSerializerMap::_mutex");
std::map<std::string, std::shared_ptr<CreateCollectionSerializer>> _inProgressMap;
};
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 5d832418367..52f606b4031 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -173,7 +173,7 @@ public:
}
~RangePreserver() {
- stdx::lock_guard<stdx::mutex> managerLock(_metadataManager->_managerLock);
+ stdx::lock_guard<Latch> managerLock(_metadataManager->_managerLock);
invariant(_metadataTracker->usageCounter != 0);
if (--_metadataTracker->usageCounter == 0) {
@@ -232,7 +232,7 @@ void MetadataManager::_clearAllCleanups(WithLock, Status status) {
boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty()) {
return boost::none;
@@ -269,7 +269,7 @@ boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
}
size_t MetadataManager::numberOfMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
if (_metadata.empty())
return 0;
@@ -277,7 +277,7 @@ size_t MetadataManager::numberOfMetadataSnapshots() const {
}
int MetadataManager::numberOfEmptyMetadataSnapshots() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
int emptyMetadataSnapshots = 0;
for (const auto& collMetadataTracker : _metadata) {
@@ -289,7 +289,7 @@ int MetadataManager::numberOfEmptyMetadataSnapshots() const {
}
void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
// Collection is becoming sharded
if (_metadata.empty()) {
@@ -352,7 +352,7 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
}
void MetadataManager::clearFilteringMetadata() {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_receivingChunks.clear();
_clearAllCleanups(lg);
_metadata.clear();
@@ -394,7 +394,7 @@ void MetadataManager::_retireExpiredMetadata(WithLock lock) {
}
void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end(); ++it) {
BSONArrayBuilder pendingBB(bb.subarrayStart());
@@ -405,7 +405,7 @@ void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
}
void MetadataManager::append(BSONObjBuilder* builder) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
_rangesToClean.append(builder);
@@ -450,7 +450,7 @@ void MetadataManager::_pushListToClean(WithLock, std::list<Deletion> ranges) {
}
auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
if (_overlapsInUseChunk(lg, range)) {
@@ -467,7 +467,7 @@ auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotificati
}
void MetadataManager::forgetReceive(ChunkRange const& range) {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
// This is potentially a partially received chunk, which needs to be cleaned up. We know none
@@ -486,7 +486,7 @@ void MetadataManager::forgetReceive(ChunkRange const& range) {
auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
-> CleanupNotification {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
auto* const activeMetadata = _metadata.back().get();
@@ -523,7 +523,7 @@ auto MetadataManager::cleanUpRange(ChunkRange const& range, Date_t whenToDelete)
}
size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
size_t count = 0;
for (auto& tracker : _metadata) {
count += tracker->orphans.size();
@@ -532,13 +532,13 @@ size_t MetadataManager::numberOfRangesToCleanStillInUse() const {
}
size_t MetadataManager::numberOfRangesToClean() const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
return _rangesToClean.size();
}
auto MetadataManager::trackOrphanedDataCleanup(ChunkRange const& range) const
-> boost::optional<CleanupNotification> {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
auto overlaps = _overlapsInUseCleanups(lg, range);
if (overlaps) {
return overlaps;
@@ -591,7 +591,7 @@ auto MetadataManager::_overlapsInUseCleanups(WithLock, ChunkRange const& range)
}
boost::optional<ChunkRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const {
- stdx::lock_guard<stdx::mutex> lg(_managerLock);
+ stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
return _metadata.back()->metadata->getNextOrphanRange(_receivingChunks, from);
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 0eb43d529c2..90a0a7e233e 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -240,7 +240,7 @@ private:
executor::TaskExecutor* const _executor;
// Mutex to protect the state below
- mutable stdx::mutex _managerLock;
+ mutable Mutex _managerLock = MONGO_MAKE_LATCH("MetadataManager::_managerLock");
// Contains a list of collection metadata for the same collection epoch, ordered in
// chronological order based on the refreshes that occurred. The entry at _metadata.back() is
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 1e367cf7aea..e5d3480fa3d 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -42,10 +42,10 @@
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index b2d8544b21a..7b891cf8e18 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -292,7 +292,7 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) {
// between cancellations for different migration sessions. It is thus possible that a second
// migration from different donor, but the same recipient would certainly abort an already
// running migration.
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = kCloning;
return Status::OK();
@@ -321,7 +321,7 @@ Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate(
}
iteration++;
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
const std::size_t cloneLocsRemaining = _cloneLocs.size();
@@ -551,14 +551,14 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
const repl::OpTime& prePostImageOpTime) {
switch (op) {
case 'd': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_deleted.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
case 'i':
case 'u': {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_reload.push_back(idObj);
_memoryUsed += idObj.firstElement().size() + 5;
} break;
@@ -574,7 +574,7 @@ void MigrationChunkClonerSourceLegacy::_addToTransferModsQueue(
}
bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrackRequests() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_acceptingNewOperationTrackRequests) {
return false;
}
@@ -584,7 +584,7 @@ bool MigrationChunkClonerSourceLegacy::_addedOperationToOutstandingOperationTrac
}
void MigrationChunkClonerSourceLegacy::_drainAllOutstandingOperationTrackRequests(
- stdx::unique_lock<stdx::mutex>& lk) {
+ stdx::unique_lock<Latch>& lk) {
invariant(_state == kDone);
_acceptingNewOperationTrackRequests = false;
_allOutstandingOperationTrackRequestsDrained.wait(
@@ -598,7 +598,7 @@ void MigrationChunkClonerSourceLegacy::_incrementOutstandingOperationTrackReques
}
void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackRequests() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
--_outstandingOperationTrackRequests;
if (_outstandingOperationTrackRequests == 0) {
_allOutstandingOperationTrackRequestsDrained.notify_all();
@@ -606,7 +606,7 @@ void MigrationChunkClonerSourceLegacy::_decrementOutstandingOperationTrackReques
}
uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return std::min(static_cast<uint64_t>(BSONObjMaxUserSize),
_averageObjectSizeForCloneLocs * _cloneLocs.size());
@@ -621,7 +621,7 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto iter = _cloneLocs.begin();
for (; iter != _cloneLocs.end(); ++iter) {
@@ -666,7 +666,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
{
// All clone data must have been drained before starting to fetch the incremental changes.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_cloneLocs.empty());
// The "snapshot" for delete and update list must be taken under a single lock. This is to
@@ -685,7 +685,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
builder->append("size", totalDocSize);
// Put back remaining ids we didn't consume
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_deleted.splice(_deleted.cbegin(), deleteList);
_reload.splice(_reload.cbegin(), updateList);
@@ -693,7 +693,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
}
void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_state = kDone;
_drainAllOutstandingOperationTrackRequests(lk);
@@ -800,7 +800,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
if (!isLargeChunk) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_cloneLocs.insert(recordId);
}
@@ -829,7 +829,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
<< _args.getMaxKey()};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12;
return Status::OK();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index e77998d907d..34b41503bec 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -40,10 +40,10 @@
#include "mongo/db/s/migration_chunk_cloner_source.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_source.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/move_chunk_request.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
@@ -285,7 +285,7 @@ private:
* function. Should only be used in the cleanup for this class. Should use a lock wrapped
* around this class's mutex.
*/
- void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<stdx::mutex>& lk);
+ void _drainAllOutstandingOperationTrackRequests(stdx::unique_lock<Latch>& lk);
/**
* Appends to the builder the list of _id of documents that were deleted during migration.
@@ -325,7 +325,7 @@ private:
std::unique_ptr<SessionCatalogMigrationSource> _sessionCatalogSource;
// Protects the entries below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MigrationChunkClonerSourceLegacy::_mutex");
// The current state of the cloner
State _state{kNew};
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 8cbb3b2875c..307512ae2d8 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -225,12 +225,12 @@ MigrationDestinationManager* MigrationDestinationManager::get(OperationContext*
}
MigrationDestinationManager::State MigrationDestinationManager::getState() const {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
return _state;
}
void MigrationDestinationManager::setState(State newState) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = newState;
_stateChangedCV.notify_all();
}
@@ -238,7 +238,7 @@ void MigrationDestinationManager::setState(State newState) {
void MigrationDestinationManager::_setStateFail(StringData msg) {
log() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -250,7 +250,7 @@ void MigrationDestinationManager::_setStateFail(StringData msg) {
void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
warning() << msg;
{
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_errmsg = msg.toString();
_state = FAIL;
_stateChangedCV.notify_all();
@@ -260,7 +260,7 @@ void MigrationDestinationManager::_setStateFailWarn(StringData msg) {
}
bool MigrationDestinationManager::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isActive(lk);
}
@@ -272,7 +272,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
OperationContext* opCtx,
bool waitForSteadyOrDone) {
if (waitForSteadyOrDone) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
try {
opCtx->waitForConditionOrInterruptFor(_stateChangedCV, lock, Seconds(1), [&]() -> bool {
return _state != READY && _state != CLONE && _state != CATCHUP;
@@ -283,7 +283,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
b.append("waited", true);
}
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
b.appendBool("active", _sessionId.is_initialized());
@@ -314,7 +314,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
BSONObj MigrationDestinationManager::getMigrationStatusReport() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_isActive(lk)) {
return migrationutil::makeMigrationStatusDocument(
_nss, _fromShard, _toShard, false, _min, _max);
@@ -329,7 +329,7 @@ Status MigrationDestinationManager::start(OperationContext* opCtx,
const StartChunkCloneRequest cloneRequest,
const OID& epoch,
const WriteConcernOptions& writeConcern) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_sessionId);
invariant(!_scopedReceiveChunk);
@@ -437,7 +437,7 @@ repl::OpTime MigrationDestinationManager::cloneDocumentsFromDonor(
}
Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
if (!_sessionId) {
return Status::OK();
@@ -458,7 +458,7 @@ Status MigrationDestinationManager::abort(const MigrationSessionId& sessionId) {
}
void MigrationDestinationManager::abortWithoutSessionIdCheck() {
- stdx::lock_guard<stdx::mutex> sl(_mutex);
+ stdx::lock_guard<Latch> sl(_mutex);
_state = ABORT;
_stateChangedCV.notify_all();
_errmsg = "aborted without session id check";
@@ -466,7 +466,7 @@ void MigrationDestinationManager::abortWithoutSessionIdCheck() {
Status MigrationDestinationManager::startCommit(const MigrationSessionId& sessionId) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
if (_state != STEADY) {
return {ErrorCodes::CommandFailed,
@@ -734,7 +734,7 @@ void MigrationDestinationManager::_migrateThread() {
_forgetPending(opCtx.get(), ChunkRange(_min, _max));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessionId.reset();
_scopedReceiveChunk.reset();
_isActiveCV.notify_all();
@@ -846,7 +846,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) {
}
{
- stdx::lock_guard<stdx::mutex> statsLock(_mutex);
+ stdx::lock_guard<Latch> statsLock(_mutex);
_numCloned += batchNumCloned;
ShardingStatistics::get(opCtx).countDocsClonedOnRecipient.addAndFetch(
batchNumCloned);
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index 18c008900cc..607eec9a68a 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -41,9 +41,9 @@
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/migration_session_id.h"
#include "mongo/db/s/session_catalog_migration_destination.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/timer.h"
@@ -178,7 +178,7 @@ private:
bool _isActive(WithLock) const;
// Mutex to guard all fields
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MigrationDestinationManager::_mutex");
// Migration session ID uniquely identifies the migration and indicates whether the prepare
// method has been called.
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.cpp b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
index 6a288834ce7..ecf63039105 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.cpp
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.cpp
@@ -36,7 +36,7 @@ namespace mongo {
NamespaceMetadataChangeNotifications::NamespaceMetadataChangeNotifications() = default;
NamespaceMetadataChangeNotifications::~NamespaceMetadataChangeNotifications() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_notificationsList.empty());
}
@@ -44,7 +44,7 @@ NamespaceMetadataChangeNotifications::ScopedNotification
NamespaceMetadataChangeNotifications::createNotification(const NamespaceString& nss) {
auto notifToken = std::make_shared<NotificationToken>(nss);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto& notifList = _notificationsList[nss];
notifToken->itToErase = notifList.insert(notifList.end(), notifToken);
@@ -53,7 +53,7 @@ NamespaceMetadataChangeNotifications::createNotification(const NamespaceString&
}
void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto mapIt = _notificationsList.find(nss);
if (mapIt == _notificationsList.end()) {
@@ -70,7 +70,7 @@ void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& n
void NamespaceMetadataChangeNotifications::_unregisterNotificationToken(
std::shared_ptr<NotificationToken> token) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!token->itToErase) {
return;
diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.h b/src/mongo/db/s/namespace_metadata_change_notifications.h
index ba7c51e86a0..12df62bfb95 100644
--- a/src/mongo/db/s/namespace_metadata_change_notifications.h
+++ b/src/mongo/db/s/namespace_metadata_change_notifications.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/db/namespace_string.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/notification.h"
namespace mongo {
@@ -114,7 +114,7 @@ private:
void _unregisterNotificationToken(std::shared_ptr<NotificationToken> token);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NamespaceMetadataChangeNotifications::_mutex");
std::map<NamespaceString, NotificationsList> _notificationsList;
};
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index ae7ca172c5f..9e55b7e2aad 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -316,7 +316,7 @@ SessionCatalogMigrationDestination::~SessionCatalogMigrationDestination() {
void SessionCatalogMigrationDestination::start(ServiceContext* service) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == State::NotStarted);
_state = State::Migrating;
_isStateChanged.notify_all();
@@ -340,7 +340,7 @@ void SessionCatalogMigrationDestination::start(ServiceContext* service) {
}
void SessionCatalogMigrationDestination::finish() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != State::ErrorOccurred) {
_state = State::Committing;
_isStateChanged.notify_all();
@@ -375,7 +375,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
while (true) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::ErrorOccurred) {
return;
}
@@ -393,7 +393,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
if (oplogArray.isEmpty()) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state == State::Committing) {
// The migration is considered done only when it gets an empty result from
// the source shard while this is in state committing. This is to make sure
@@ -414,7 +414,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
// We depleted the buffer at least once, transition to ready for commit.
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Note: only transition to "ready to commit" if state is not error/force stop.
if (_state == State::Migrating) {
_state = State::ReadyToCommit;
@@ -455,19 +455,19 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service
waitForWriteConcern(uniqueOpCtx.get(), lastResult.oplogTime, kMajorityWC, &unusedWCResult));
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::Done;
_isStateChanged.notify_all();
}
}
std::string SessionCatalogMigrationDestination::getErrMsg() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _errMsg;
}
void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_state = State::ErrorOccurred;
_errMsg = errMsg.toString();
@@ -475,7 +475,7 @@ void SessionCatalogMigrationDestination::_errorOccurred(StringData errMsg) {
}
SessionCatalogMigrationDestination::State SessionCatalogMigrationDestination::getState() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _state;
}
diff --git a/src/mongo/db/s/session_catalog_migration_destination.h b/src/mongo/db/s/session_catalog_migration_destination.h
index 89c43be2e62..185eecbb9ba 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.h
+++ b/src/mongo/db/s/session_catalog_migration_destination.h
@@ -36,9 +36,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/s/migration_session_id.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -116,7 +116,7 @@ private:
stdx::thread _thread;
// Protects _state and _errMsg.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalogMigrationDestination::_mutex");
stdx::condition_variable _isStateChanged;
State _state = State::NotStarted;
std::string _errMsg; // valid only if _state == ErrorOccurred.
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index f645174986d..15dd677e0ba 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -181,12 +181,12 @@ bool SessionCatalogMigrationSource::hasMoreOplog() {
return true;
}
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
return _hasNewWrites(lk);
}
void SessionCatalogMigrationSource::onCommitCloneStarted() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCommitStarted;
if (_newOplogNotification) {
@@ -196,7 +196,7 @@ void SessionCatalogMigrationSource::onCommitCloneStarted() {
}
void SessionCatalogMigrationSource::onCloneCleanup() {
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
_state = State::kCleanup;
if (_newOplogNotification) {
@@ -207,14 +207,14 @@ void SessionCatalogMigrationSource::onCloneCleanup() {
SessionCatalogMigrationSource::OplogResult SessionCatalogMigrationSource::getLastFetchedOplog() {
{
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
if (_lastFetchedOplog) {
return OplogResult(_lastFetchedOplog, false);
}
}
{
- stdx::lock_guard<stdx::mutex> _lk(_newOplogMutex);
+ stdx::lock_guard<Latch> _lk(_newOplogMutex);
return OplogResult(_lastFetchedNewWriteOplog, true);
}
}
@@ -230,7 +230,7 @@ bool SessionCatalogMigrationSource::fetchNextOplog(OperationContext* opCtx) {
std::shared_ptr<Notification<bool>> SessionCatalogMigrationSource::getNotificationForNewOplog() {
invariant(!_hasMoreOplogFromSessionCatalog());
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newOplogNotification) {
return _newOplogNotification;
@@ -293,13 +293,13 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
}
bool SessionCatalogMigrationSource::_hasMoreOplogFromSessionCatalog() {
- stdx::lock_guard<stdx::mutex> _lk(_sessionCloneMutex);
+ stdx::lock_guard<Latch> _lk(_sessionCloneMutex);
return _lastFetchedOplog || !_lastFetchedOplogBuffer.empty() ||
!_sessionOplogIterators.empty() || _currentOplogIterator;
}
bool SessionCatalogMigrationSource::_fetchNextOplogFromSessionCatalog(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_sessionCloneMutex);
+ stdx::unique_lock<Latch> lk(_sessionCloneMutex);
if (!_lastFetchedOplogBuffer.empty()) {
_lastFetchedOplog = _lastFetchedOplogBuffer.back();
@@ -334,7 +334,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
EntryAtOpTimeType entryAtOpTimeType;
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
if (_newWriteOpTimeList.empty()) {
_lastFetchedNewWriteOplog.reset();
@@ -369,7 +369,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
}
{
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_lastFetchedNewWriteOplog = newWriteOplogEntry;
_newWriteOpTimeList.pop_front();
}
@@ -379,7 +379,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
void SessionCatalogMigrationSource::notifyNewWriteOpTime(repl::OpTime opTime,
EntryAtOpTimeType entryAtOpTimeType) {
- stdx::lock_guard<stdx::mutex> lk(_newOplogMutex);
+ stdx::lock_guard<Latch> lk(_newOplogMutex);
_newWriteOpTimeList.emplace_back(opTime, entryAtOpTimeType);
if (_newOplogNotification) {
diff --git a/src/mongo/db/s/session_catalog_migration_source.h b/src/mongo/db/s/session_catalog_migration_source.h
index 06093d4c8e8..df0d9d80259 100644
--- a/src/mongo/db/s/session_catalog_migration_source.h
+++ b/src/mongo/db/s/session_catalog_migration_source.h
@@ -37,9 +37,9 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/session_txn_record_gen.h"
#include "mongo/db/transaction_history_iterator.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -231,7 +231,8 @@ private:
// Protects _sessionCatalogCursor, _sessionOplogIterators, _currentOplogIterator,
// _lastFetchedOplogBuffer, _lastFetchedOplog
- stdx::mutex _sessionCloneMutex;
+ Mutex _sessionCloneMutex =
+ MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_sessionCloneMutex");
// List of remaining session records that needs to be cloned.
std::vector<std::unique_ptr<SessionOplogIterator>> _sessionOplogIterators;
@@ -248,7 +249,7 @@ private:
boost::optional<repl::OplogEntry> _lastFetchedOplog;
// Protects _newWriteTsList, _lastFetchedNewWriteOplog, _state, _newOplogNotification
- stdx::mutex _newOplogMutex;
+ Mutex _newOplogMutex = MONGO_MAKE_LATCH("SessionCatalogMigrationSource::_newOplogMutex");
// Stores oplog opTime of new writes that are coming in.
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 78600e5d488..a974104d1f4 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -374,7 +374,7 @@ void ShardServerCatalogCacheLoader::notifyOfCollectionVersionUpdate(const Namesp
}
void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role == ReplicaSetRole::None);
if (isPrimary) {
@@ -385,7 +385,7 @@ void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) {
}
void ShardServerCatalogCacheLoader::onStepDown() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
_contexts.interrupt(ErrorCodes::PrimarySteppedDown);
++_term;
@@ -393,7 +393,7 @@ void ShardServerCatalogCacheLoader::onStepDown() {
}
void ShardServerCatalogCacheLoader::onStepUp() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_role != ReplicaSetRole::None);
++_term;
_role = ReplicaSetRole::Primary;
@@ -401,7 +401,7 @@ void ShardServerCatalogCacheLoader::onStepUp() {
void ShardServerCatalogCacheLoader::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_inShutdown) {
return;
}
@@ -412,7 +412,7 @@ void ShardServerCatalogCacheLoader::shutDown() {
// Prevent further scheduling, then interrupt ongoing tasks.
_threadPool.shutdown();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_contexts.interrupt(ErrorCodes::InterruptedAtShutdown);
++_term;
}
@@ -430,7 +430,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -446,7 +446,7 @@ std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSinc
// We may have missed an OperationContextGroup interrupt since this operation
// began but before the OperationContext was added to the group. So we'll check
// that we're still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh routing table because replica set state changed or "
"the node is shutting down.",
@@ -473,7 +473,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
bool isPrimary;
long long term;
std::tie(isPrimary, term) = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return std::make_tuple(_role == ReplicaSetRole::Primary, _term);
}();
@@ -489,7 +489,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
// We may have missed an OperationContextGroup interrupt since this operation began
// but before the OperationContext was added to the group. So we'll check that we're
// still in the same _term.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
uassert(ErrorCodes::InterruptedDueToReplStateChange,
"Unable to refresh database because replica set state changed or the node "
"is shutting down.",
@@ -509,7 +509,7 @@ void ShardServerCatalogCacheLoader::getDatabase(
void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opCtx,
const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -560,7 +560,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC
void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lg(_mutex);
+ stdx::unique_lock<Latch> lg(_mutex);
const auto initialTerm = _term;
boost::optional<uint64_t> taskNumToWait;
@@ -636,7 +636,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
// Get the max version the loader has.
const ChunkVersion maxLoaderVersion = [&] {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt != _collAndChunkTaskLists.end() &&
@@ -707,7 +707,7 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}
const auto termAfterRefresh = [&] {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _term;
}();
@@ -864,7 +864,7 @@ std::pair<bool, CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getE
const NamespaceString& nss,
const ChunkVersion& catalogCacheSinceVersion,
const long long term) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
if (taskListIt == _collAndChunkTaskLists.end()) {
@@ -899,7 +899,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleCollAndChun
OperationContext* opCtx, const NamespaceString& nss, collAndChunkTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _collAndChunkTaskLists[nss];
auto wasEmpty = list.empty();
@@ -921,7 +921,7 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(Oper
DBTask task) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto& list = _dbTaskLists[dbName.toString()];
auto wasEmpty = list.empty();
@@ -955,7 +955,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -977,7 +977,7 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collAndChunkTaskLists.erase(nss);
}
return;
@@ -1004,7 +1004,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
}
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// If task completed successfully, remove it from work queue
if (taskFinished) {
@@ -1026,7 +1026,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
<< " caller to refresh this namespace.";
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dbTaskLists.erase(name);
}
return;
@@ -1039,7 +1039,7 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) {
void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
OperationContext* opCtx, const NamespaceString& nss) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const collAndChunkTask& task = _collAndChunkTaskLists[nss].front();
invariant(task.dropped || !task.collectionAndChangedChunks->changedChunks.empty());
@@ -1076,7 +1076,7 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* opCtx,
StringData dbName) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
const DBTask& task = _dbTaskLists[dbName.toString()].front();
@@ -1241,7 +1241,7 @@ void ShardServerCatalogCacheLoader::DbTaskList::pop_front() {
}
void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
@@ -1249,7 +1249,7 @@ void ShardServerCatalogCacheLoader::CollAndChunkTaskList::waitForActiveTaskCompl
}
void ShardServerCatalogCacheLoader::DbTaskList::waitForActiveTaskCompletion(
- stdx::unique_lock<stdx::mutex>& lg) {
+ stdx::unique_lock<Latch>& lg) {
// Increase the use_count of the condition variable shared pointer, because the entire task list
// might get deleted during the unlocked interval
auto condVar = _activeTaskCompletedCondVar;
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h
index 2a456c5a9ef..4cbdc31e3e0 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.h
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h
@@ -31,8 +31,8 @@
#include "mongo/db/operation_context_group.h"
#include "mongo/db/s/namespace_metadata_change_notifications.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/s/catalog_cache_loader.h"
-#include "mongo/stdx/condition_variable.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -204,7 +204,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -314,7 +314,7 @@ private:
* same task object on which it was called because it might have been deleted during the
* unlocked period.
*/
- void waitForActiveTaskCompletion(stdx::unique_lock<stdx::mutex>& lg);
+ void waitForActiveTaskCompletion(stdx::unique_lock<Latch>& lg);
/**
* Checks whether 'term' matches the term of the latest task in the task list. This is
@@ -484,7 +484,7 @@ private:
NamespaceMetadataChangeNotifications _namespaceNotifications;
// Protects the class state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardServerCatalogCacheLoader::_mutex");
// True if shutDown was called.
bool _inShutdown{false};
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 29f9f699848..46631bb89bd 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -313,7 +313,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity(
auto const shardingState = ShardingState::get(opCtx);
auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- stdx::unique_lock<stdx::mutex> ul(_initSynchronizationMutex);
+ stdx::unique_lock<Latch> ul(_initSynchronizationMutex);
if (shardingState->enabled()) {
uassert(40371, "", shardingState->shardId() == shardIdentity.getShardName());
diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h
index 241488ae3fe..2eaefd22fbe 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.h
+++ b/src/mongo/db/s/sharding_initialization_mongod.h
@@ -114,7 +114,8 @@ public:
private:
// This mutex ensures that only one thread at a time executes the sharding
// initialization/teardown sequence
- stdx::mutex _initSynchronizationMutex;
+ Mutex _initSynchronizationMutex =
+ MONGO_MAKE_LATCH("ShardingInitializationMongod::_initSynchronizationMutex");
// Function for initializing the sharding environment components (i.e. everything on the Grid)
ShardingEnvironmentInitFunc _initFunc;
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index b9c7e634a53..37e5f8930fa 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -57,7 +57,7 @@ ShardingState* ShardingState::get(OperationContext* operationContext) {
}
void ShardingState::setInitialized(ShardId shardId, OID clusterId) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_shardId = std::move(shardId);
@@ -71,7 +71,7 @@ void ShardingState::setInitialized(Status failedStatus) {
invariant(!failedStatus.isOK());
log() << "Failed to initialize sharding components" << causedBy(failedStatus);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
invariant(_getInitializationState() == InitializationState::kNew);
_initializationStatus = std::move(failedStatus);
@@ -79,7 +79,7 @@ void ShardingState::setInitialized(Status failedStatus) {
}
boost::optional<Status> ShardingState::initializationStatus() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (_getInitializationState() == InitializationState::kNew)
return boost::none;
@@ -105,13 +105,13 @@ Status ShardingState::canAcceptShardedCommands() const {
ShardId ShardingState::shardId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _shardId;
}
OID ShardingState::clusterId() {
invariant(enabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _clusterId;
}
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 4b78d0bdfb4..ab3430fb5ec 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -32,8 +32,8 @@
#include <string>
#include "mongo/bson/oid.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/shard_id.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -136,7 +136,7 @@ private:
}
// Protects state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingState::_mutex");
// State of the initialization of the sharding state along with any potential errors
AtomicWord<unsigned> _initializationState{static_cast<uint32_t>(InitializationState::kNew)};
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 7836c63ef62..3d3e392980a 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -139,7 +139,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _participantsDurable (optional)
// Output: _participantsDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participants);
_step = Step::kWritingParticipantList;
@@ -166,7 +166,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
.thenRunOn(Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor())
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_participantsDurable = true;
}
@@ -177,7 +177,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decision (optional)
// Output: _decision is set
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_participantsDurable);
_step = Step::kWaitingForVotes;
@@ -195,7 +195,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
_serviceContext, *_sendPrepareScheduler, _lsid, _txnNumber, *_participants)
.then([this](PrepareVoteConsensus consensus) mutable {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decision = consensus.decision();
}
@@ -218,7 +218,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// _decisionDurable (optional)
// Output: _decisionDurable = true
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decision);
_step = Step::kWritingDecision;
@@ -242,7 +242,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
})
.then([this] {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_decisionDurable = true;
}
@@ -250,7 +250,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Input: _decisionDurable
// Output: (none)
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_decisionDurable);
_step = Step::kWaitingForDecisionAcks;
@@ -291,7 +291,7 @@ TransactionCoordinator::TransactionCoordinator(OperationContext* operationContex
// Do a best-effort attempt (i.e., writeConcern w:1) to delete the coordinator's durable
// state.
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_step = Step::kDeletingCoordinatorDoc;
@@ -364,7 +364,7 @@ void TransactionCoordinator::cancelIfCommitNotYetStarted() {
}
bool TransactionCoordinator::_reserveKickOffCommitPromise() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_kickOffCommitPromiseSet)
return false;
@@ -385,7 +385,7 @@ void TransactionCoordinator::_done(Status status) {
LOG(3) << txn::txnIdToString(_lsid, _txnNumber) << " Two-phase commit completed with "
<< redact(status);
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto tickSource = _serviceContext->getTickSource();
@@ -487,7 +487,7 @@ std::string TransactionCoordinator::_twoPhaseCommitInfoForLog(
}
TransactionCoordinator::Step TransactionCoordinator::getStep() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _step;
}
@@ -496,7 +496,7 @@ void TransactionCoordinator::reportState(BSONObjBuilder& parent) const {
TickSource* tickSource = _serviceContext->getTickSource();
TickSource::Tick currentTick = tickSource->getTicks();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
BSONObjBuilder lsidBuilder(doc.subobjStart("lsid"));
_lsid.serialize(&lsidBuilder);
@@ -543,7 +543,7 @@ std::string TransactionCoordinator::toString(Step step) const {
}
void TransactionCoordinator::_updateAssociatedClient(Client* client) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_transactionCoordinatorMetricsObserver->updateLastClientInfo(client);
}
diff --git a/src/mongo/db/s/transaction_coordinator.h b/src/mongo/db/s/transaction_coordinator.h
index 12005613f89..68745a3e540 100644
--- a/src/mongo/db/s/transaction_coordinator.h
+++ b/src/mongo/db/s/transaction_coordinator.h
@@ -166,7 +166,7 @@ private:
std::unique_ptr<txn::AsyncWorkScheduler> _sendPrepareScheduler;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinator::_mutex");
// Tracks which step of the 2PC coordination is currently (or was most recently) executing
Step _step{Step::kInactive};
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp
index 5a5c029833b..fc0612515b2 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.cpp
+++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp
@@ -52,14 +52,14 @@ void TransactionCoordinatorCatalog::exitStepUp(Status status) {
<< causedBy(status);
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_stepUpCompletionStatus);
_stepUpCompletionStatus = std::move(status);
_stepUpCompleteCV.notify_all();
}
void TransactionCoordinatorCatalog::onStepDown() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
std::vector<std::shared_ptr<TransactionCoordinator>> coordinatorsToCancel;
for (auto&& [sessionId, coordinatorsForSession] : _coordinatorsBySession) {
@@ -83,7 +83,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
LOG(3) << "Inserting coordinator " << lsid.getId() << ':' << txnNumber
<< " into in-memory catalog";
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
if (!forStepUp) {
_waitForStepUpToComplete(ul, opCtx);
}
@@ -113,7 +113,7 @@ void TransactionCoordinatorCatalog::insert(OperationContext* opCtx,
std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
OperationContext* opCtx, const LogicalSessionId& lsid, TxnNumber txnNumber) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
std::shared_ptr<TransactionCoordinator> coordinatorToReturn;
@@ -133,7 +133,7 @@ std::shared_ptr<TransactionCoordinator> TransactionCoordinatorCatalog::get(
boost::optional<std::pair<TxnNumber, std::shared_ptr<TransactionCoordinator>>>
TransactionCoordinatorCatalog::getLatestOnSession(OperationContext* opCtx,
const LogicalSessionId& lsid) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_waitForStepUpToComplete(ul, opCtx);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -156,7 +156,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
LOG(3) << "Removing coordinator " << lsid.getId() << ':' << txnNumber
<< " from in-memory catalog";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto& coordinatorsForSessionIter = _coordinatorsBySession.find(lsid);
@@ -181,7 +181,7 @@ void TransactionCoordinatorCatalog::_remove(const LogicalSessionId& lsid, TxnNum
}
void TransactionCoordinatorCatalog::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
while (!_noActiveCoordinatorsCV.wait_for(
ul, stdx::chrono::seconds{5}, [this] { return _coordinatorsBySession.empty(); })) {
@@ -192,11 +192,11 @@ void TransactionCoordinatorCatalog::join() {
}
std::string TransactionCoordinatorCatalog::toString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _toString(lk);
}
-void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk,
+void TransactionCoordinatorCatalog::_waitForStepUpToComplete(stdx::unique_lock<Latch>& lk,
OperationContext* opCtx) {
invariant(lk.owns_lock());
opCtx->waitForConditionOrInterrupt(
@@ -219,7 +219,7 @@ std::string TransactionCoordinatorCatalog::_toString(WithLock wl) const {
}
void TransactionCoordinatorCatalog::filter(FilterPredicate predicate, FilterVisitor visitor) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto sessionIt = _coordinatorsBySession.begin(); sessionIt != _coordinatorsBySession.end();
++sessionIt) {
auto& lsid = sessionIt->first;
diff --git a/src/mongo/db/s/transaction_coordinator_catalog.h b/src/mongo/db/s/transaction_coordinator_catalog.h
index 5768c69bb3c..375fc33d1d9 100644
--- a/src/mongo/db/s/transaction_coordinator_catalog.h
+++ b/src/mongo/db/s/transaction_coordinator_catalog.h
@@ -33,7 +33,7 @@
#include <map>
#include "mongo/db/s/transaction_coordinator.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/with_lock.h"
namespace mongo {
@@ -125,7 +125,7 @@ private:
* Blocks in an interruptible wait until the catalog is not marked as having a stepup in
* progress.
*/
- void _waitForStepUpToComplete(stdx::unique_lock<stdx::mutex>& lk, OperationContext* opCtx);
+ void _waitForStepUpToComplete(stdx::unique_lock<Latch>& lk, OperationContext* opCtx);
/**
* Removes the coordinator with the given session id and transaction number from the catalog, if
@@ -142,7 +142,7 @@ private:
std::string _toString(WithLock wl) const;
// Protects the state below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorCatalog::_mutex");
// Contains TransactionCoordinator objects by session id and transaction number. May contain
// more than one coordinator per session. All coordinators for a session that do not correspond
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
index 5d3cf3bfdd5..05061af7fbe 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp
@@ -60,14 +60,14 @@ AsyncWorkScheduler::AsyncWorkScheduler(ServiceContext* serviceContext)
AsyncWorkScheduler::~AsyncWorkScheduler() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(_quiesced(lg));
}
if (!_parent)
return;
- stdx::lock_guard<stdx::mutex> lg(_parent->_mutex);
+ stdx::lock_guard<Latch> lg(_parent->_mutex);
_parent->_childSchedulers.erase(_itToRemove);
_parent->_notifyAllTasksComplete(lg);
_parent = nullptr;
@@ -129,7 +129,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
auto pf = makePromiseFuture<ResponseStatus>();
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledCommandHandle =
@@ -157,7 +157,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
} else {
promise->setError([&] {
if (status == ErrorCodes::CallbackCanceled) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
return _shutdownStatus.isOK() ? status : _shutdownStatus;
}
return status;
@@ -172,7 +172,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusWith<ResponseStatus> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -182,7 +182,7 @@ Future<executor::TaskExecutor::ResponseStatus> AsyncWorkScheduler::scheduleRemot
std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
auto child = std::make_unique<AsyncWorkScheduler>(_serviceContext);
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
child->shutdown(_shutdownStatus);
@@ -195,7 +195,7 @@ std::unique_ptr<AsyncWorkScheduler> AsyncWorkScheduler::makeChildScheduler() {
void AsyncWorkScheduler::shutdown(Status status) {
invariant(!status.isOK());
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_shutdownStatus.isOK())
return;
@@ -216,7 +216,7 @@ void AsyncWorkScheduler::shutdown(Status status) {
}
void AsyncWorkScheduler::join() {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
_allListsEmptyCV.wait(ul, [&] {
return _activeOpContexts.empty() && _activeHandles.empty() && _childSchedulers.empty();
});
diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h
index eb769319aad..a1f25c84744 100644
--- a/src/mongo/db/s/transaction_coordinator_futures_util.h
+++ b/src/mongo/db/s/transaction_coordinator_futures_util.h
@@ -78,7 +78,7 @@ public:
auto pf = makePromiseFuture<ReturnType>();
auto taskCompletionPromise = std::make_shared<Promise<ReturnType>>(std::move(pf.promise));
try {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassertStatusOK(_shutdownStatus);
auto scheduledWorkHandle = uassertStatusOK(_executor->scheduleWorkAt(
@@ -119,7 +119,7 @@ public:
return std::move(pf.future).tapAll(
[this, it = std::move(it)](StatusOrStatusWith<ReturnType> s) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_activeHandles.erase(it);
_notifyAllTasksComplete(lg);
});
@@ -210,7 +210,7 @@ private:
ChildIteratorsList::iterator _itToRemove;
// Mutex to protect the shared state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AsyncWorkScheduler::_mutex");
// If shutdown() is called, this contains the first status that was passed to it and is an
// indication that no more operations can be scheduled
@@ -294,7 +294,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
* The first few fields have fixed values. *
******************************************************/
// Protects all state in the SharedBlock.
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("SharedBlock::mutex");
// If any response returns an error prior to a response setting shouldStopIteration to
// ShouldStopIteration::kYes, the promise will be set with that error rather than the global
@@ -332,7 +332,7 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
for (auto&& localFut : futures) {
std::move(localFut)
.then([sharedBlock](IndividualResult res) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->shouldStopIteration =
@@ -340,14 +340,14 @@ Future<GlobalResult> collect(std::vector<Future<IndividualResult>>&& futures,
}
})
.onError([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
if (sharedBlock->shouldStopIteration == ShouldStopIteration::kNo &&
sharedBlock->status.isOK()) {
sharedBlock->status = s;
}
})
.getAsync([sharedBlock](Status s) {
- stdx::unique_lock<stdx::mutex> lk(sharedBlock->mutex);
+ stdx::unique_lock<Latch> lk(sharedBlock->mutex);
sharedBlock->numOutstandingResponses--;
if (sharedBlock->numOutstandingResponses == 0) {
// Unlock before emplacing the result in case any continuations do expensive
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index a8e72285cd6..3ac1212a468 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -171,7 +171,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
Milliseconds recoveryDelayForTesting) {
joinPreviousRound();
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -234,7 +234,7 @@ void TransactionCoordinatorService::onStepUp(OperationContext* opCtx,
void TransactionCoordinatorService::onStepDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!_catalogAndScheduler)
return;
@@ -249,7 +249,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
if (!isPrimary)
return;
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
invariant(!_catalogAndScheduler);
_catalogAndScheduler = std::make_shared<CatalogAndScheduler>(opCtx->getServiceContext());
@@ -260,7 +260,7 @@ void TransactionCoordinatorService::onShardingInitialization(OperationContext* o
std::shared_ptr<TransactionCoordinatorService::CatalogAndScheduler>
TransactionCoordinatorService::_getCatalogAndScheduler(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
uassert(
ErrorCodes::NotMaster, "Transaction coordinator is not a primary", _catalogAndScheduler);
diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h
index c200809744f..a4fe1ce16f9 100644
--- a/src/mongo/db/s/transaction_coordinator_service.h
+++ b/src/mongo/db/s/transaction_coordinator_service.h
@@ -146,7 +146,7 @@ private:
std::shared_ptr<CatalogAndScheduler> _catalogAndSchedulerToCleanup;
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TransactionCoordinatorService::_mutex");
// The catalog + scheduler instantiated at the last step-up attempt. When nullptr, it means
// onStepUp has not been called yet after the last stepDown (or construction).
diff --git a/src/mongo/db/s/wait_for_majority_service.cpp b/src/mongo/db/s/wait_for_majority_service.cpp
index 0625a84b611..f41ed83c630 100644
--- a/src/mongo/db/s/wait_for_majority_service.cpp
+++ b/src/mongo/db/s/wait_for_majority_service.cpp
@@ -141,7 +141,7 @@ SharedSemiFuture<void> WaitForMajorityService::waitUntilMajority(const repl::OpT
void WaitForMajorityService::_periodicallyWaitForMajority(ServiceContext* service) {
ThreadClient tc("waitForMajority", service);
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_inShutDown) {
auto opCtx = tc->makeOperationContext();
diff --git a/src/mongo/db/s/wait_for_majority_service.h b/src/mongo/db/s/wait_for_majority_service.h
index 970b475d0d3..90ec771bd40 100644
--- a/src/mongo/db/s/wait_for_majority_service.h
+++ b/src/mongo/db/s/wait_for_majority_service.h
@@ -36,7 +36,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/future.h"
@@ -74,7 +74,7 @@ private:
*/
void _periodicallyWaitForMajority(ServiceContext* service);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMaorityService::_mutex");
// Contains an ordered list of opTimes to wait to be majority comitted.
OpTimeWaitingMap _queuedOpTimes;
diff --git a/src/mongo/db/s/wait_for_majority_service_test.cpp b/src/mongo/db/s/wait_for_majority_service_test.cpp
index d904d253af1..ca89ac04c8b 100644
--- a/src/mongo/db/s/wait_for_majority_service_test.cpp
+++ b/src/mongo/db/s/wait_for_majority_service_test.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/wait_for_majority_service.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -64,7 +64,7 @@ public:
}
void finishWaitingOneOpTime() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isTestReady = true;
_isTestReadyCV.notify_one();
@@ -74,7 +74,7 @@ public:
}
Status waitForWriteConcernStub(OperationContext* opCtx, const repl::OpTime& opTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_waitForMajorityCallCount++;
_callCountChangedCV.notify_one();
@@ -97,7 +97,7 @@ public:
}
const repl::OpTime& getLastOpTimeWaited() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTimeWaited;
}
@@ -109,7 +109,7 @@ public:
private:
WaitForMajorityService _waitForMajorityService;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WaitForMajorityServiceTest::_mutex");
stdx::condition_variable _isTestReadyCV;
stdx::condition_variable _finishWaitingOneOpTimeCV;
stdx::condition_variable _callCountChangedCV;
diff --git a/src/mongo/db/server_recovery.cpp b/src/mongo/db/server_recovery.cpp
index f7133127f40..c44515a3358 100644
--- a/src/mongo/db/server_recovery.cpp
+++ b/src/mongo/db/server_recovery.cpp
@@ -48,17 +48,17 @@ bool SizeRecoveryState::collectionNeedsSizeAdjustment(const std::string& ident)
}
bool SizeRecoveryState::collectionAlwaysNeedsSizeAdjustment(const std::string& ident) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _collectionsAlwaysNeedingSizeAdjustment.count(ident) > 0;
}
void SizeRecoveryState::markCollectionAsAlwaysNeedsSizeAdjustment(const std::string& ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.insert(ident);
}
void SizeRecoveryState::clearStateBeforeRecovery() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_collectionsAlwaysNeedingSizeAdjustment.clear();
}
} // namespace mongo
diff --git a/src/mongo/db/server_recovery.h b/src/mongo/db/server_recovery.h
index fbd89f56360..3b9d87a8065 100644
--- a/src/mongo/db/server_recovery.h
+++ b/src/mongo/db/server_recovery.h
@@ -33,7 +33,7 @@
#include <string>
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
/**
@@ -81,7 +81,7 @@ public:
void clearStateBeforeRecovery();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SizeRecoveryState::_mutex");
std::set<std::string> _collectionsAlwaysNeedingSizeAdjustment;
};
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 350e9eae02e..148c2ebbc83 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -52,6 +52,7 @@
#include "mongo/util/str.h"
#include "mongo/util/system_clock_source.h"
#include "mongo/util/system_tick_source.h"
+#include <iostream>
namespace mongo {
namespace {
@@ -96,7 +97,7 @@ ServiceContext::ServiceContext()
_preciseClockSource(std::make_unique<SystemClockSource>()) {}
ServiceContext::~ServiceContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& client : _clients) {
severe() << "Client " << client->desc() << " still exists while destroying ServiceContext@"
<< static_cast<void*>(this);
@@ -161,7 +162,7 @@ ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc,
std::unique_ptr<Client> client(new Client(std::move(desc), this, std::move(session)));
onCreate(client.get(), _clientObservers);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_clients.insert(client.get()).second);
}
return UniqueClient(client.release());
@@ -225,7 +226,7 @@ void ServiceContext::setServiceExecutor(std::unique_ptr<transport::ServiceExecut
void ServiceContext::ClientDeleter::operator()(Client* client) const {
ServiceContext* const service = client->getServiceContext();
{
- stdx::lock_guard<stdx::mutex> lk(service->_mutex);
+ stdx::lock_guard<Latch> lk(service->_mutex);
invariant(service->_clients.erase(client));
}
onDestroy(client, service->_clientObservers);
@@ -291,7 +292,7 @@ Client* ServiceContext::LockedClientsCursor::next() {
}
void ServiceContext::setKillAllOperations() {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
// Ensure that all newly created operation contexts will immediately be in the interrupted state
_globalKill.store(true);
@@ -332,17 +333,17 @@ void ServiceContext::unsetKillAllOperations() {
}
void ServiceContext::registerKillOpListener(KillOpListenerInterface* listener) {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ stdx::lock_guard<Latch> clientLock(_mutex);
_killOpListeners.push_back(listener);
}
void ServiceContext::waitForStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupCompleteCondVar.wait(lk, [this] { return _startupComplete; });
}
void ServiceContext::notifyStartupComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_startupComplete = true;
lk.unlock();
_startupCompleteCondVar.notify_all();
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index 0aa04389245..3d4fdd1609b 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -39,8 +39,8 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/session.h"
@@ -50,6 +50,8 @@
#include "mongo/util/periodic_runner.h"
#include "mongo/util/tick_source.h"
+#include <iostream>
+
namespace mongo {
class AbstractMessagingPort;
@@ -163,7 +165,7 @@ public:
Client* next();
private:
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
ClientSet::const_iterator _curr;
ClientSet::const_iterator _end;
};
@@ -530,7 +532,7 @@ private:
std::unique_ptr<ClientObserver> _observer;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceContext::_mutex");
/**
* The periodic runner.
diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp
index bd422327e37..98bab228070 100644
--- a/src/mongo/db/service_context_test_fixture.cpp
+++ b/src/mongo/db/service_context_test_fixture.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/client.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/diagnostic_info.h"
namespace mongo {
diff --git a/src/mongo/db/service_liaison_mock.cpp b/src/mongo/db/service_liaison_mock.cpp
index ab4397f1980..f6c36f9eb51 100644
--- a/src/mongo/db/service_liaison_mock.cpp
+++ b/src/mongo/db/service_liaison_mock.cpp
@@ -43,12 +43,12 @@ MockServiceLiaisonImpl::MockServiceLiaisonImpl() {
}
LogicalSessionIdSet MockServiceLiaisonImpl::getActiveOpSessions() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _activeSessions;
}
LogicalSessionIdSet MockServiceLiaisonImpl::getOpenCursorSessions(OperationContext* opCtx) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _cursorSessions;
}
@@ -65,32 +65,32 @@ void MockServiceLiaisonImpl::scheduleJob(PeriodicRunner::PeriodicJob job) {
void MockServiceLiaisonImpl::addCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::removeCursorSession(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clearCursorSession() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.clear();
}
void MockServiceLiaisonImpl::add(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cursorSessions.insert(std::move(lsid));
}
void MockServiceLiaisonImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.erase(lsid);
}
void MockServiceLiaisonImpl::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_activeSessions.clear();
}
diff --git a/src/mongo/db/service_liaison_mock.h b/src/mongo/db/service_liaison_mock.h
index 6d500ae5682..72512cbb95b 100644
--- a/src/mongo/db/service_liaison_mock.h
+++ b/src/mongo/db/service_liaison_mock.h
@@ -33,8 +33,8 @@
#include "mongo/db/service_liaison.h"
#include "mongo/executor/async_timer_mock.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/time_support.h"
@@ -87,7 +87,7 @@ private:
boost::optional<SessionKiller::Matcher> _matcher;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MockServiceLiaisonImpl::_mutex");
LogicalSessionIdSet _activeSessions;
LogicalSessionIdSet _cursorSessions;
};
diff --git a/src/mongo/db/service_liaison_mongod.cpp b/src/mongo/db/service_liaison_mongod.cpp
index 94e1fbd9217..6e26c6f16e7 100644
--- a/src/mongo/db/service_liaison_mongod.cpp
+++ b/src/mongo/db/service_liaison_mongod.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/cursor_manager.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongod.h b/src/mongo/db/service_liaison_mongod.h
index b1060425f6f..3cf8864b5eb 100644
--- a/src/mongo/db/service_liaison_mongod.h
+++ b/src/mongo/db/service_liaison_mongod.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongod::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/service_liaison_mongos.cpp b/src/mongo/db/service_liaison_mongos.cpp
index 666ca06ea68..9abe73ea5c5 100644
--- a/src/mongo/db/service_liaison_mongos.cpp
+++ b/src/mongo/db/service_liaison_mongos.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/service_liaison_mongos.h"
#include "mongo/db/service_context.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/service_liaison_mongos.h b/src/mongo/db/service_liaison_mongos.h
index ab40801557d..22fc7032d73 100644
--- a/src/mongo/db/service_liaison_mongos.h
+++ b/src/mongo/db/service_liaison_mongos.h
@@ -69,7 +69,7 @@ protected:
*/
ServiceContext* _context() override;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ServiceLiaisonMongos::_mutex");
std::vector<PeriodicJobAnchor> _jobs;
};
diff --git a/src/mongo/db/session_catalog.cpp b/src/mongo/db/session_catalog.cpp
index f3954651690..97fbff47f89 100644
--- a/src/mongo/db/session_catalog.cpp
+++ b/src/mongo/db/session_catalog.cpp
@@ -49,7 +49,7 @@ const auto operationSessionDecoration =
} // namespace
SessionCatalog::~SessionCatalog() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
for (const auto& entry : _sessions) {
ObservableSession session(lg, entry.second->session);
invariant(!session.currentOperation());
@@ -58,7 +58,7 @@ SessionCatalog::~SessionCatalog() {
}
void SessionCatalog::reset_forTest() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_sessions.clear();
}
@@ -79,7 +79,7 @@ SessionCatalog::ScopedCheckedOutSession SessionCatalog::_checkOutSession(Operati
invariant(!opCtx->lockState()->inAWriteUnitOfWork());
invariant(!opCtx->lockState()->isLocked());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, *opCtx->getLogicalSessionId());
// Wait until the session is no longer checked out and until the previously scheduled kill has
@@ -106,7 +106,7 @@ SessionCatalog::SessionToKill SessionCatalog::checkOutSessionForKill(OperationCo
invariant(!operationSessionDecoration(opCtx));
invariant(!opCtx->getTxnNumber());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto sri = _getOrCreateSessionRuntimeInfo(ul, opCtx, killToken.lsidToKill);
invariant(ObservableSession(ul, sri->session)._killed());
@@ -130,7 +130,7 @@ void SessionCatalog::scanSession(const LogicalSessionId& lsid,
std::unique_ptr<SessionRuntimeInfo> sessionToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
if (it != _sessions.end()) {
auto& sri = it->second;
@@ -151,7 +151,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
std::vector<std::unique_ptr<SessionRuntimeInfo>> sessionsToReap;
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
LOG(2) << "Beginning scanSessions. Scanning " << _sessions.size() << " sessions.";
@@ -173,7 +173,7 @@ void SessionCatalog::scanSessions(const SessionKiller::Matcher& matcher,
}
SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& lsid) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto it = _sessions.find(lsid);
uassert(ErrorCodes::NoSuchSession, "Session not found", it != _sessions.end());
@@ -182,7 +182,7 @@ SessionCatalog::KillToken SessionCatalog::killSession(const LogicalSessionId& ls
}
size_t SessionCatalog::size() const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
return _sessions.size();
}
@@ -198,7 +198,7 @@ SessionCatalog::SessionRuntimeInfo* SessionCatalog::_getOrCreateSessionRuntimeIn
void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri,
boost::optional<KillToken> killToken) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
// Make sure we have exactly the same session on the map and that it is still associated with an
// operation context (meaning checked-out)
diff --git a/src/mongo/db/session_catalog.h b/src/mongo/db/session_catalog.h
index b9e5e98049d..ea5226915c7 100644
--- a/src/mongo/db/session_catalog.h
+++ b/src/mongo/db/session_catalog.h
@@ -37,8 +37,8 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/session.h"
#include "mongo/db/session_killer.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -153,7 +153,7 @@ private:
void _releaseSession(SessionRuntimeInfo* sri, boost::optional<KillToken> killToken);
// Protects the state below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("SessionCatalog::_mutex");
// Owns the Session objects for all current Sessions.
SessionRuntimeInfoMap _sessions;
diff --git a/src/mongo/db/session_catalog_test.cpp b/src/mongo/db/session_catalog_test.cpp
index 2ef67c1f884..58e03aa20b4 100644
--- a/src/mongo/db/session_catalog_test.cpp
+++ b/src/mongo/db/session_catalog_test.cpp
@@ -600,9 +600,9 @@ TEST_F(SessionCatalogTestWithDefaultOpCtx, ConcurrentCheckOutAndKill) {
// The main thread won't check in the session until it's killed.
{
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cond;
- stdx::unique_lock<stdx::mutex> lock(m);
+ stdx::unique_lock<Latch> lock(m);
ASSERT_EQ(ErrorCodes::InternalError,
_opCtx->waitForConditionOrInterruptNoAssert(cond, lock));
}
diff --git a/src/mongo/db/session_killer.cpp b/src/mongo/db/session_killer.cpp
index 2f92bf6dbf2..c7acd2d074c 100644
--- a/src/mongo/db/session_killer.cpp
+++ b/src/mongo/db/session_killer.cpp
@@ -50,7 +50,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
Client::setCurrent(sc->makeClient("SessionKiller"));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// While we're not in shutdown
while (!_inShutdown) {
@@ -72,7 +72,7 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer)
SessionKiller::~SessionKiller() {
DESTRUCTOR_GUARD([&] {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
_killerCV.notify_one();
@@ -138,7 +138,7 @@ SessionKiller* SessionKiller::get(OperationContext* ctx) {
std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
OperationContext* opCtx, const KillAllSessionsByPatternSet& toKill) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// Save a shared_ptr to the current reapResults (I.e. the next thing to get killed).
auto reapResults = _reapResults;
@@ -164,7 +164,7 @@ std::shared_ptr<SessionKiller::Result> SessionKiller::kill(
return {reapResults.result, reapResults.result->get_ptr()};
}
-void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk) {
+void SessionKiller::_periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk) {
// Pull our current workload onto the stack. Swap it for empties.
decltype(_nextToReap) nextToReap;
decltype(_reapResults) reapResults;
diff --git a/src/mongo/db/session_killer.h b/src/mongo/db/session_killer.h
index 44f58509d70..8e9cd89cdfa 100644
--- a/src/mongo/db/session_killer.h
+++ b/src/mongo/db/session_killer.h
@@ -37,8 +37,8 @@
#include "mongo/base/status_with.h"
#include "mongo/db/kill_sessions.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/net/hostandport.h"
@@ -125,13 +125,13 @@ private:
std::shared_ptr<boost::optional<Result>> result;
};
- void _periodicKill(OperationContext* opCtx, stdx::unique_lock<stdx::mutex>& lk);
+ void _periodicKill(OperationContext* opCtx, stdx::unique_lock<Latch>& lk);
KillFunc _killFunc;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionKiller::_mutex");
stdx::condition_variable _callerCV;
stdx::condition_variable _killerCV;
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
index 63e16f14321..6e4e8fbc29b 100644
--- a/src/mongo/db/sessions_collection_config_server.cpp
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -96,7 +96,7 @@ Status SessionsCollectionConfigServer::setupSessionsCollection(OperationContext*
return {ErrorCodes::ShardingStateNotInitialized, "sharding state is not yet initialized"};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
{
auto res = _shardCollectionIfNeeded(opCtx);
if (!res.isOK()) {
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
index bdfac76abff..701d055772a 100644
--- a/src/mongo/db/sessions_collection_config_server.h
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection_sharded.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -64,7 +64,7 @@ private:
Status _shardCollectionIfNeeded(OperationContext* opCtx);
Status _generateIndexesIfNeeded(OperationContext* opCtx);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionsCollectionConfigServer::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_mock.cpp b/src/mongo/db/sessions_collection_mock.cpp
index 33e75ab842c..00992744589 100644
--- a/src/mongo/db/sessions_collection_mock.cpp
+++ b/src/mongo/db/sessions_collection_mock.cpp
@@ -60,22 +60,22 @@ Status MockSessionsCollectionImpl::removeRecords(const LogicalSessionIdSet& sess
}
void MockSessionsCollectionImpl::add(LogicalSessionRecord record) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.insert({record.getId(), std::move(record)});
}
void MockSessionsCollectionImpl::remove(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.erase(lsid);
}
bool MockSessionsCollectionImpl::has(LogicalSessionId lsid) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _sessions.find(lsid) != _sessions.end();
}
void MockSessionsCollectionImpl::clearSessions() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.clear();
}
@@ -93,7 +93,7 @@ Status MockSessionsCollectionImpl::_refreshSessions(const LogicalSessionRecordSe
}
Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& sessions) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
_sessions.erase(lsid);
}
@@ -104,7 +104,7 @@ Status MockSessionsCollectionImpl::_removeRecords(const LogicalSessionIdSet& ses
StatusWith<LogicalSessionIdSet> MockSessionsCollectionImpl::findRemovedSessions(
OperationContext* opCtx, const LogicalSessionIdSet& sessions) {
LogicalSessionIdSet lsids;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (auto& lsid : sessions) {
if (_sessions.find(lsid) == _sessions.end()) {
lsids.emplace(lsid);
diff --git a/src/mongo/db/sessions_collection_mock.h b/src/mongo/db/sessions_collection_mock.h
index a31a4f7fc5a..da6477692fa 100644
--- a/src/mongo/db/sessions_collection_mock.h
+++ b/src/mongo/db/sessions_collection_mock.h
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/sessions_collection.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -89,7 +89,7 @@ private:
Status _refreshSessions(const LogicalSessionRecordSet& sessions);
Status _removeRecords(const LogicalSessionIdSet& sessions);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MockSessionsCollectionImpl::_mutex");
SessionMap _sessions;
RefreshHook _refresh;
diff --git a/src/mongo/db/sessions_collection_rs.h b/src/mongo/db/sessions_collection_rs.h
index d073969cdc2..0d52d3f52e4 100644
--- a/src/mongo/db/sessions_collection_rs.h
+++ b/src/mongo/db/sessions_collection_rs.h
@@ -37,7 +37,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/sessions_collection.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
@@ -108,7 +108,7 @@ private:
LocalCallback&& localCallback,
RemoteCallback&& remoteCallback);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SessionsCollectionRS::_mutex");
std::unique_ptr<RemoteCommandTargeter> _targeter;
};
diff --git a/src/mongo/db/snapshot_window_util.cpp b/src/mongo/db/snapshot_window_util.cpp
index 05a46b42e13..c06bb078d5e 100644
--- a/src/mongo/db/snapshot_window_util.cpp
+++ b/src/mongo/db/snapshot_window_util.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/snapshot_window_options.h"
#include "mongo/db/storage/storage_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -55,7 +55,7 @@ namespace SnapshotWindowUtil {
// another, since they act on and modify the same storage parameters. Further guards the static
// variables "_snapshotWindowLastDecreasedAt" and "_snapshotWindowLastIncreasedAt" used in
// increaseTargetSnapshotWindowSize() and decreaseSnapshowWindow().
-stdx::mutex snapshotWindowMutex;
+Mutex snapshotWindowMutex;
namespace {
@@ -92,7 +92,7 @@ void increaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
// Tracks the last time that the snapshot window was increased so that it does not go up so fast
// that the storage engine does not have time to improve snapshot availability.
@@ -150,7 +150,7 @@ void decreaseTargetSnapshotWindowSize(OperationContext* opCtx) {
return;
}
- stdx::unique_lock<stdx::mutex> lock(snapshotWindowMutex);
+ stdx::unique_lock<Latch> lock(snapshotWindowMutex);
StorageEngine* engine = opCtx->getServiceContext()->getStorageEngine();
if (engine && engine->isCacheUnderPressure(opCtx)) {
diff --git a/src/mongo/db/stats/server_write_concern_metrics.cpp b/src/mongo/db/stats/server_write_concern_metrics.cpp
index c36431ca3f3..bfc14025d73 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.cpp
+++ b/src/mongo/db/stats/server_write_concern_metrics.cpp
@@ -58,7 +58,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForInserts(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_insertMetrics.recordWriteConcern(writeConcernOptions, numInserts);
}
@@ -68,7 +68,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForUpdate(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_updateMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -78,7 +78,7 @@ void ServerWriteConcernMetrics::recordWriteConcernForDelete(
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_deleteMetrics.recordWriteConcern(writeConcernOptions);
}
@@ -87,7 +87,7 @@ BSONObj ServerWriteConcernMetrics::toBSON() const {
return BSONObj();
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
BSONObjBuilder builder;
diff --git a/src/mongo/db/stats/server_write_concern_metrics.h b/src/mongo/db/stats/server_write_concern_metrics.h
index 524c4fce917..b1e17f53e38 100644
--- a/src/mongo/db/stats/server_write_concern_metrics.h
+++ b/src/mongo/db/stats/server_write_concern_metrics.h
@@ -97,7 +97,7 @@ private:
StringMap<std::uint64_t> wTagCounts;
};
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ServerWriteConcernMetrics::_mutex");
WriteConcernMetricsForOperationType _insertMetrics;
WriteConcernMetricsForOperationType _updateMetrics;
WriteConcernMetricsForOperationType _deleteMetrics;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
index 977ba1d6b52..370ececd244 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.cpp
@@ -94,7 +94,7 @@ std::unique_ptr<mongo::RecordStore> KVEngine::getRecordStore(OperationContext* o
}
bool KVEngine::trySwapMaster(StringStore& newMaster, uint64_t version) {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
invariant(!newMaster.hasBranch() && !_master.hasBranch());
if (_masterVersion != version)
return false;
diff --git a/src/mongo/db/storage/biggie/biggie_kv_engine.h b/src/mongo/db/storage/biggie/biggie_kv_engine.h
index 97c836b523a..a9a3582cfdd 100644
--- a/src/mongo/db/storage/biggie/biggie_kv_engine.h
+++ b/src/mongo/db/storage/biggie/biggie_kv_engine.h
@@ -154,7 +154,7 @@ public:
* Returns a pair of the current version and copy of tree of the master.
*/
std::pair<uint64_t, StringStore> getMasterInfo() {
- stdx::lock_guard<stdx::mutex> lock(_masterLock);
+ stdx::lock_guard<Latch> lock(_masterLock);
return std::make_pair(_masterVersion, _master);
}
@@ -170,7 +170,7 @@ private:
std::map<std::string, bool> _idents; // TODO : replace with a query to _master.
std::unique_ptr<VisibilityManager> _visibilityManager;
- mutable stdx::mutex _masterLock;
+ mutable Mutex _masterLock = MONGO_MAKE_LATCH("KVEngine::_masterLock");
StringStore _master;
uint64_t _masterVersion = 0;
};
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.cpp b/src/mongo/db/storage/biggie/biggie_record_store.cpp
index 4c47df9cd7b..8e2aadc7041 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.cpp
+++ b/src/mongo/db/storage/biggie/biggie_record_store.cpp
@@ -120,7 +120,7 @@ bool RecordStore::isCapped() const {
}
void RecordStore::setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -264,7 +264,7 @@ void RecordStore::cappedTruncateAfter(OperationContext* opCtx, RecordId end, boo
auto endIt = workingCopy->upper_bound(_postfix);
while (recordIt != endIt) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_cappedCallback) {
// Documents are guaranteed to have a RecordId at the end of the KeyString, unlike
// unique indexes.
@@ -357,11 +357,11 @@ void RecordStore::_cappedDeleteAsNeeded(OperationContext* opCtx, StringStore* wo
auto recordIt = workingCopy->lower_bound(_prefix);
// Ensure only one thread at a time can do deletes, otherwise they'll conflict.
- stdx::lock_guard<stdx::mutex> cappedDeleterLock(_cappedDeleterMutex);
+ stdx::lock_guard<Latch> cappedDeleterLock(_cappedDeleterMutex);
while (_cappedAndNeedDelete(opCtx, workingCopy)) {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
RecordId rid = RecordId(extractRecordId(recordIt->first));
if (_isOplog && _visibilityManager->isFirstHidden(rid)) {
diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h
index e8dee66da1c..005d49ee293 100644
--- a/src/mongo/db/storage/biggie/biggie_record_store.h
+++ b/src/mongo/db/storage/biggie/biggie_record_store.h
@@ -38,7 +38,7 @@
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace biggie {
@@ -138,10 +138,11 @@ private:
std::string _prefix;
std::string _postfix;
- mutable stdx::mutex _cappedCallbackMutex; // Guards _cappedCallback
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("RecordStore::_cappedCallbackMutex"); // Guards _cappedCallback
CappedCallback* _cappedCallback;
- mutable stdx::mutex _cappedDeleterMutex;
+ mutable Mutex _cappedDeleterMutex = MONGO_MAKE_LATCH("RecordStore::_cappedDeleterMutex");
AtomicWord<long long> _highestRecordId{1};
AtomicWord<long long> _numRecords{0};
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
index d9921bc6472..94a869727b0 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.cpp
@@ -56,7 +56,7 @@ public:
virtual void rollback() {
_visibilityManager->dealtWithRecord(_rid);
- stdx::lock_guard<stdx::mutex> lk(_rs->_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_rs->_cappedCallbackMutex);
if (_rs->_cappedCallback)
_rs->_cappedCallback->notifyCappedWaitersIfNeeded();
}
@@ -68,7 +68,7 @@ private:
};
void VisibilityManager::dealtWithRecord(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.erase(rid);
_opsBecameVisibleCV.notify_all();
}
@@ -76,7 +76,7 @@ void VisibilityManager::dealtWithRecord(RecordId rid) {
void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
RecordStore* rs,
RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
_uncommittedRecords.insert(rid);
opCtx->recoveryUnit()->registerChange(std::make_unique<VisibilityManagerChange>(this, rs, rid));
@@ -85,13 +85,13 @@ void VisibilityManager::addUncommittedRecord(OperationContext* opCtx,
}
RecordId VisibilityManager::getAllCommittedRecord() {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
return _uncommittedRecords.empty() ? _highestSeen
: RecordId(_uncommittedRecords.begin()->repr() - 1);
}
bool VisibilityManager::isFirstHidden(RecordId rid) {
- stdx::lock_guard<stdx::mutex> lock(_stateLock);
+ stdx::lock_guard<Latch> lock(_stateLock);
if (_uncommittedRecords.empty())
return false;
return *_uncommittedRecords.begin() == rid;
@@ -100,7 +100,7 @@ bool VisibilityManager::isFirstHidden(RecordId rid) {
void VisibilityManager::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) {
invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
- stdx::unique_lock<stdx::mutex> lock(_stateLock);
+ stdx::unique_lock<Latch> lock(_stateLock);
const RecordId waitFor = _highestSeen;
opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lock, [&] {
return _uncommittedRecords.empty() || *_uncommittedRecords.begin() > waitFor;
diff --git a/src/mongo/db/storage/biggie/biggie_visibility_manager.h b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
index 387b7edc0d0..8370ba0c990 100644
--- a/src/mongo/db/storage/biggie/biggie_visibility_manager.h
+++ b/src/mongo/db/storage/biggie/biggie_visibility_manager.h
@@ -31,7 +31,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -76,7 +76,8 @@ public:
void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx);
private:
- mutable stdx::mutex _stateLock; // Protects the values below.
+ mutable Mutex _stateLock =
+ MONGO_MAKE_LATCH("VisibilityManager::_stateLock"); // Protects the values below.
RecordId _highestSeen = RecordId();
// Used to wait for all earlier oplog writes to be visible.
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index fc88ca957ff..0bc79d049ba 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -151,7 +151,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents.erase(_ident);
}
@@ -166,7 +166,7 @@ public:
virtual void commit(boost::optional<Timestamp>) {}
virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ stdx::lock_guard<Latch> lk(_catalog->_identsLock);
_catalog->_idents[_ident] = _entry;
}
@@ -471,7 +471,7 @@ void DurableCatalogImpl::init(OperationContext* opCtx) {
}
std::vector<NamespaceString> DurableCatalogImpl::getAllCollections() const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
std::vector<NamespaceString> result;
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
result.push_back(NamespaceString(it->first));
@@ -487,7 +487,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
const string ident = _newUniqueIdent(nss, "collection");
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[nss.toString()];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists, "collection already exists");
@@ -517,7 +517,7 @@ Status DurableCatalogImpl::_addEntry(OperationContext* opCtx,
}
std::string DurableCatalogImpl::getCollectionIdent(const NamespaceString& nss) const {
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end());
return it->second.ident;
@@ -536,7 +536,7 @@ BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx,
RecordId* out) const {
RecordId dl;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
NSToIdentMap::const_iterator it = _idents.find(nss.toString());
invariant(it != _idents.end(), str::stream() << "Did not find collection. Ns: " << nss);
dl = it->second.storedLoc;
@@ -652,7 +652,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
fassert(28522, status);
}
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator fromIt = _idents.find(fromNss.toString());
invariant(fromIt != _idents.end());
@@ -673,7 +673,7 @@ Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx,
Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const NamespaceString& nss) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
const NSToIdentMap::iterator it = _idents.find(nss.toString());
if (it == _idents.end()) {
return Status(ErrorCodes::NamespaceNotFound, "collection not found");
@@ -693,7 +693,7 @@ std::vector<std::string> DurableCatalogImpl::getAllIdentsForDB(StringData db) co
std::vector<std::string> v;
{
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
NamespaceString ns(it->first);
if (ns.db() != db)
@@ -761,7 +761,7 @@ StatusWith<std::string> DurableCatalogImpl::newOrphanedIdent(OperationContext* o
NamespaceString::kOrphanCollectionPrefix + identNs)
.ns();
- stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ stdx::lock_guard<Latch> lk(_identsLock);
Entry& old = _idents[ns];
if (!old.ident.empty()) {
return Status(ErrorCodes::NamespaceExists,
diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h
index b7683e9da68..f99ff41da5b 100644
--- a/src/mongo/db/storage/durable_catalog_impl.h
+++ b/src/mongo/db/storage/durable_catalog_impl.h
@@ -40,7 +40,7 @@
#include "mongo/db/storage/bson_collection_catalog_entry.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/db/storage/kv/kv_prefix.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -261,7 +261,7 @@ private:
};
typedef std::map<std::string, Entry> NSToIdentMap;
NSToIdentMap _idents;
- mutable stdx::mutex _identsLock;
+ mutable Mutex _identsLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_identsLock");
// Manages the feature document that may be present in the DurableCatalogImpl. '_featureTracker'
// is guaranteed to be non-null after DurableCatalogImpl::init() is called.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
index 1f689ddd607..597bc513d20 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.cpp
@@ -43,7 +43,7 @@ namespace mongo {
RecoveryUnit* EphemeralForTestEngine::newRecoveryUnit() {
return new EphemeralForTestRecoveryUnit([this]() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
JournalListener::Token token = _journalListener->getToken();
_journalListener->onDurable(token);
});
@@ -55,14 +55,14 @@ Status EphemeralForTestEngine::createRecordStore(OperationContext* opCtx,
const CollectionOptions& options) {
// Register the ident in the `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getRecordStore`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
OperationContext* opCtx, StringData ns, StringData ident, const CollectionOptions& options) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (options.capped) {
return std::make_unique<EphemeralForTestRecordStore>(
ns,
@@ -77,7 +77,7 @@ std::unique_ptr<RecordStore> EphemeralForTestEngine::getRecordStore(
std::unique_ptr<RecordStore> EphemeralForTestEngine::makeTemporaryRecordStore(
OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return std::make_unique<EphemeralForTestRecordStore>(ident, &_dataMap[ident]);
}
@@ -88,14 +88,14 @@ Status EphemeralForTestEngine::createSortedDataInterface(OperationContext* opCtx
const IndexDescriptor* desc) {
// Register the ident in `_dataMap` (for `getAllIdents`). Remainder of work done in
// `getSortedDataInterface`.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap[ident] = {};
return Status::OK();
}
std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterface(
OperationContext* opCtx, StringData ident, const IndexDescriptor* desc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()),
desc->unique(),
desc->parentNS(),
@@ -105,7 +105,7 @@ std::unique_ptr<SortedDataInterface> EphemeralForTestEngine::getSortedDataInterf
}
Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx, StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_dataMap.erase(ident);
return Status::OK();
}
@@ -117,7 +117,7 @@ int64_t EphemeralForTestEngine::getIdentSize(OperationContext* opCtx, StringData
std::vector<std::string> EphemeralForTestEngine::getAllIdents(OperationContext* opCtx) const {
std::vector<std::string> all;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) {
all.push_back(it->first);
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index a083f9f3a4b..b51b285ef2c 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -33,7 +33,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -102,13 +102,12 @@ public:
virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
return _dataMap.find(ident) != _dataMap.end();
- ;
}
std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
void setJournalListener(JournalListener* jl) final {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -127,7 +126,7 @@ public:
private:
typedef StringMap<std::shared_ptr<void>> DataMap;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("EphemeralForTestEngine::_mutex");
DataMap _dataMap; // All actual data is owned in here
// Notified when we write as everything is considered "journalled" since repl depends on it.
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index 2fdbaaa579e..3bd7ffb0ce5 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -35,7 +35,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp
index ac010f891ae..c6976fd77b2 100644
--- a/src/mongo/db/storage/flow_control.cpp
+++ b/src/mongo/db/storage/flow_control.cpp
@@ -172,7 +172,7 @@ double FlowControl::_getLocksPerOp() {
Sample backOne;
std::size_t numSamples;
{
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
numSamples = _sampledOpsApplied.size();
if (numSamples >= 2) {
backTwo = _sampledOpsApplied[numSamples - 2];
@@ -399,7 +399,7 @@ std::int64_t FlowControl::_approximateOpsBetween(Timestamp prevTs, Timestamp cur
std::int64_t prevApplied = -1;
std::int64_t currApplied = -1;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
for (auto&& sample : _sampledOpsApplied) {
if (prevApplied == -1 && prevTs.asULL() <= std::get<0>(sample)) {
prevApplied = std::get<1>(sample);
@@ -427,7 +427,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
_numOpsSinceStartup += opsApplied;
if (_numOpsSinceStartup - _lastSample <
static_cast<std::size_t>(gFlowControlSamplePeriod.load())) {
@@ -469,7 +469,7 @@ void FlowControl::sample(Timestamp timestamp, std::uint64_t opsApplied) {
void FlowControl::_trimSamples(const Timestamp trimTo) {
int numTrimmed = 0;
- stdx::lock_guard<stdx::mutex> lk(_sampledOpsMutex);
+ stdx::lock_guard<Latch> lk(_sampledOpsMutex);
// Always leave at least two samples for calculating `locksPerOp`.
while (_sampledOpsApplied.size() > 2 &&
std::get<0>(_sampledOpsApplied.front()) < trimTo.asULL()) {
diff --git a/src/mongo/db/storage/flow_control.h b/src/mongo/db/storage/flow_control.h
index 64f0d0b1d00..17b465b9d21 100644
--- a/src/mongo/db/storage/flow_control.h
+++ b/src/mongo/db/storage/flow_control.h
@@ -37,7 +37,7 @@
#include "mongo/db/repl/replication_coordinator_fwd.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -125,7 +125,7 @@ private:
// Use an int64_t as this is serialized to bson which does not support unsigned 64-bit numbers.
AtomicWord<std::int64_t> _isLaggedTimeMicros{0};
- mutable stdx::mutex _sampledOpsMutex;
+ mutable Mutex _sampledOpsMutex = MONGO_MAKE_LATCH("FlowControl::_sampledOpsMutex");
std::deque<Sample> _sampledOpsApplied;
// These values are used in the sampling process.
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
index ef5b441d989..44337fffc49 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp
@@ -46,7 +46,7 @@ KVDropPendingIdentReaper::KVDropPendingIdentReaper(KVEngine* engine) : _engine(e
void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestamp,
const NamespaceString& nss,
StringData ident) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
const auto equalRange = _dropPendingIdents.equal_range(dropTimestamp);
const auto& lowerBound = equalRange.first;
const auto& upperBound = equalRange.second;
@@ -65,7 +65,7 @@ void KVDropPendingIdentReaper::addDropPendingIdent(const Timestamp& dropTimestam
}
boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
auto it = _dropPendingIdents.cbegin();
if (it == _dropPendingIdents.cend()) {
return boost::none;
@@ -74,7 +74,7 @@ boost::optional<Timestamp> KVDropPendingIdentReaper::getEarliestDropTimestamp()
}
std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::set<std::string> idents;
for (const auto& entry : _dropPendingIdents) {
const auto& identInfo = entry.second;
@@ -87,7 +87,7 @@ std::set<std::string> KVDropPendingIdentReaper::getAllIdents() const {
void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, const Timestamp& ts) {
DropPendingIdents toDrop;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (auto it = _dropPendingIdents.cbegin();
it != _dropPendingIdents.cend() && it->first < ts;
++it) {
@@ -125,7 +125,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
{
// Entries must be removed AFTER drops are completed, so that getEarliestDropTimestamp()
// returns appropriate results.
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto& timestampAndIdentInfo : toDrop) {
const auto& dropTimestamp = timestampAndIdentInfo.first;
// This may return zero if _dropPendingIdents was cleared using clearDropPendingState().
@@ -135,7 +135,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons
}
void KVDropPendingIdentReaper::clearDropPendingState() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_dropPendingIdents.clear();
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
index c249d9af0ba..75f13690a3d 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h
@@ -38,7 +38,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/kv/kv_engine.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -113,7 +113,7 @@ private:
KVEngine* const _engine;
// Guards access to member variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("KVDropPendingIdentReaper::_mutex");
// Drop-pending idents. Ordered by drop timestamp.
DropPendingIdents _dropPendingIdents;
diff --git a/src/mongo/db/storage/kv/kv_prefix.cpp b/src/mongo/db/storage/kv/kv_prefix.cpp
index 6b88dc22c3b..1a54a82f6a1 100644
--- a/src/mongo/db/storage/kv/kv_prefix.cpp
+++ b/src/mongo/db/storage/kv/kv_prefix.cpp
@@ -31,7 +31,7 @@
namespace mongo {
int64_t KVPrefix::_nextValue = 0;
-stdx::mutex KVPrefix::_nextValueMutex;
+Mutex KVPrefix::_nextValueMutex = MONGO_MAKE_LATCH();
const KVPrefix KVPrefix::kNotPrefixed = KVPrefix(-1);
std::string KVPrefix::toString() const {
@@ -54,7 +54,7 @@ std::string KVPrefix::toString() const {
return;
}
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
_nextValue = largestPrefix._value + 1;
}
@@ -67,7 +67,7 @@ std::string KVPrefix::toString() const {
}
/* static */ KVPrefix KVPrefix::generateNextPrefix() {
- stdx::lock_guard<stdx::mutex> lk(_nextValueMutex);
+ stdx::lock_guard<Latch> lk(_nextValueMutex);
return KVPrefix(_nextValue++);
}
} // namespace mongo
diff --git a/src/mongo/db/storage/kv/kv_prefix.h b/src/mongo/db/storage/kv/kv_prefix.h
index 6a785dc19db..45a1e891c0e 100644
--- a/src/mongo/db/storage/kv/kv_prefix.h
+++ b/src/mongo/db/storage/kv/kv_prefix.h
@@ -33,7 +33,7 @@
#include "mongo/bson/util/builder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -93,7 +93,7 @@ private:
explicit KVPrefix(int64_t value) : _value(value) {}
int64_t _value;
- static stdx::mutex _nextValueMutex;
+ static Mutex _nextValueMutex;
static int64_t _nextValue;
};
diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp
index 2aae21eafb4..cf3f7d10b70 100644
--- a/src/mongo/db/storage/kv/storage_engine_test.cpp
+++ b/src/mongo/db/storage/kv/storage_engine_test.cpp
@@ -431,13 +431,13 @@ TEST_F(TimestampKVEngineTest, TimestampListeners) {
}
TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
bool changes[4] = {false, false, false, false};
TimestampListener first(checkpoint, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[0]) {
changes[0] = true;
cv.notify_all();
@@ -445,7 +445,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener second(oldest, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[1]) {
changes[1] = true;
cv.notify_all();
@@ -453,7 +453,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener third(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[2]) {
changes[2] = true;
cv.notify_all();
@@ -461,7 +461,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
});
TimestampListener fourth(stable, [&](Timestamp timestamp) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
+ stdx::lock_guard<Latch> lock(mutex);
if (!changes[3]) {
changes[3] = true;
cv.notify_all();
@@ -474,7 +474,7 @@ TEST_F(TimestampKVEngineTest, TimestampMonitorNotifiesListeners) {
_storageEngine->getTimestampMonitor()->addListener(&fourth);
// Wait until all 4 listeners get notified at least once.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] {
for (auto const& change : changes) {
if (!change) {
diff --git a/src/mongo/db/storage/mobile/mobile_kv_engine.h b/src/mongo/db/storage/mobile/mobile_kv_engine.h
index 3762ccf0878..0e0b3ab17e3 100644
--- a/src/mongo/db/storage/mobile/mobile_kv_engine.h
+++ b/src/mongo/db/storage/mobile/mobile_kv_engine.h
@@ -35,7 +35,7 @@
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session_pool.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
#include "mongo/util/string_map.h"
@@ -124,7 +124,7 @@ public:
std::vector<std::string> getAllIdents(OperationContext* opCtx) const override;
void setJournalListener(JournalListener* jl) override {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_journalListener = jl;
}
@@ -143,7 +143,7 @@ public:
private:
void maybeVacuum(Client* client, Date_t deadline);
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MobileKVEngine::_mutex");
void _initDBPath(const std::string& path);
std::int32_t _setSQLitePragma(const std::string& pragma, sqlite3* session);
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.cpp b/src/mongo/db/storage/mobile/mobile_record_store.cpp
index 7543fcb1617..f60142d95fe 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.cpp
+++ b/src/mongo/db/storage/mobile/mobile_record_store.cpp
@@ -233,7 +233,7 @@ void MobileRecordStore::_initDataSizeIfNeeded_inlock(OperationContext* opCtx) co
}
long long MobileRecordStore::dataSize(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_initDataSizeIfNeeded_inlock(opCtx);
return _dataSize;
}
@@ -255,7 +255,7 @@ void MobileRecordStore::_initNumRecsIfNeeded_inlock(OperationContext* opCtx) con
}
long long MobileRecordStore::numRecords(OperationContext* opCtx) const {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_initNumRecsIfNeeded_inlock(opCtx);
return _numRecs;
}
@@ -420,7 +420,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_rs->_numRecsMutex);
_rs->_numRecs -= _diff;
}
@@ -430,7 +430,7 @@ private:
};
void MobileRecordStore::_changeNumRecs(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
opCtx->recoveryUnit()->registerChange(std::make_unique<NumRecsChange>(this, diff));
_initNumRecsIfNeeded_inlock(opCtx);
_numRecs += diff;
@@ -441,7 +441,7 @@ bool MobileRecordStore::_resetNumRecsIfNeeded(OperationContext* opCtx, int64_t n
int64_t currNumRecs = numRecords(opCtx);
if (currNumRecs != newNumRecs) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
+ stdx::lock_guard<Latch> lock(_numRecsMutex);
_numRecs = newNumRecs;
}
return wasReset;
@@ -457,7 +457,7 @@ public:
void commit(boost::optional<Timestamp>) override {}
void rollback() override {
- stdx::lock_guard<stdx::mutex> lock(_rs->_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_rs->_dataSizeMutex);
_rs->_dataSize -= _diff;
}
@@ -467,7 +467,7 @@ private:
};
void MobileRecordStore::_changeDataSize(OperationContext* opCtx, int64_t diff) {
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
opCtx->recoveryUnit()->registerChange(std::make_unique<DataSizeChange>(this, diff));
_initDataSizeIfNeeded_inlock(opCtx);
_dataSize += diff;
@@ -479,7 +479,7 @@ bool MobileRecordStore::_resetDataSizeIfNeeded(OperationContext* opCtx, int64_t
if (currDataSize != _dataSize) {
wasReset = true;
- stdx::lock_guard<stdx::mutex> lock(_dataSizeMutex);
+ stdx::lock_guard<Latch> lock(_dataSizeMutex);
_dataSize = newDataSize;
}
return wasReset;
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.h b/src/mongo/db/storage/mobile/mobile_record_store.h
index b08c14c9e44..d9457edd985 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.h
+++ b/src/mongo/db/storage/mobile/mobile_record_store.h
@@ -167,7 +167,7 @@ private:
bool _resetNumRecsIfNeeded(OperationContext* opCtx, int64_t newNumRecs);
mutable int64_t _numRecs;
- mutable stdx::mutex _numRecsMutex;
+ mutable Mutex _numRecsMutex = MONGO_MAKE_LATCH("MobileRecordStore::_numRecsMutex");
mutable bool _isNumRecsInitialized = false;
/**
@@ -188,7 +188,7 @@ private:
bool _resetDataSizeIfNeeded(OperationContext* opCtx, int64_t newDataSize);
mutable int64_t _dataSize;
- mutable stdx::mutex _dataSizeMutex;
+ mutable Mutex _dataSizeMutex = MONGO_MAKE_LATCH("MobileRecordStore::_dataSizeMutex");
mutable bool _isDataSizeInitialized = false;
};
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.cpp b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
index 179a30cbe5e..a8a211bcc6b 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.cpp
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.cpp
@@ -43,7 +43,7 @@
#include "mongo/db/storage/mobile/mobile_session_pool.h"
#include "mongo/db/storage/mobile/mobile_sqlite_statement.h"
#include "mongo/db/storage/mobile/mobile_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -105,7 +105,7 @@ MobileSessionPool::~MobileSessionPool() {
}
std::unique_ptr<MobileSession> MobileSessionPool::getSession(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// We should never be able to get here after _shuttingDown is set, because no new operations
// should be allowed to start.
@@ -141,13 +141,13 @@ void MobileSessionPool::releaseSession(MobileSession* session) {
if (!failedDropsQueue.isEmpty())
failedDropsQueue.execAndDequeueOp(session);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_sessions.push_back(session->getSession());
_releasedSessionNotifier.notify_one();
}
void MobileSessionPool::shutDown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shuttingDown = true;
// Retrieve the operation context from the thread's client if the client exists.
diff --git a/src/mongo/db/storage/mobile/mobile_session_pool.h b/src/mongo/db/storage/mobile/mobile_session_pool.h
index 08586e0ece8..031953cdfb3 100644
--- a/src/mongo/db/storage/mobile/mobile_session_pool.h
+++ b/src/mongo/db/storage/mobile/mobile_session_pool.h
@@ -37,7 +37,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mobile/mobile_options.h"
#include "mongo/db/storage/mobile/mobile_session.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
class MobileSession;
@@ -58,7 +58,7 @@ public:
private:
AtomicWord<bool> _isEmpty;
- stdx::mutex _queueMutex;
+ Mutex _queueMutex = MONGO_MAKE_LATCH("MobileDelayedOpQueue::_queueMutex");
std::queue<std::string> _opQueryQueue;
};
@@ -107,7 +107,7 @@ private:
sqlite3* _popSession_inlock();
// This is used to lock the _sessions vector.
- stdx::mutex _mutex;
+ Mutex _mutex;
stdx::condition_variable _releasedSessionNotifier;
std::string _path;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 8854f359119..a43deca1687 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -900,7 +900,7 @@ StorageEngineImpl::TimestampMonitor::TimestampMonitor(KVEngine* engine, Periodic
StorageEngineImpl::TimestampMonitor::~TimestampMonitor() {
log() << "Timestamp monitor shutting down";
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
invariant(_listeners.empty());
}
@@ -912,7 +912,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
"TimestampMonitor",
[&](Client* client) {
{
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (_listeners.empty()) {
return;
}
@@ -979,7 +979,7 @@ void StorageEngineImpl::TimestampMonitor::startup() {
}
void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestamp newTimestamp) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
for (auto& listener : _listeners) {
if (listener->getType() == type) {
listener->notify(newTimestamp);
@@ -988,7 +988,7 @@ void StorageEngineImpl::TimestampMonitor::notifyAll(TimestampType type, Timestam
}
void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) != _listeners.end()) {
bool listenerAlreadyRegistered = true;
invariant(!listenerAlreadyRegistered);
@@ -997,7 +997,7 @@ void StorageEngineImpl::TimestampMonitor::addListener(TimestampListener* listene
}
void StorageEngineImpl::TimestampMonitor::removeListener(TimestampListener* listener) {
- stdx::lock_guard<stdx::mutex> lock(_monitorMutex);
+ stdx::lock_guard<Latch> lock(_monitorMutex);
if (std::find(_listeners.begin(), _listeners.end(), listener) == _listeners.end()) {
bool listenerNotRegistered = true;
invariant(!listenerNotRegistered);
diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h
index 07f2cf6f42d..64f9774f6e8 100644
--- a/src/mongo/db/storage/storage_engine_impl.h
+++ b/src/mongo/db/storage/storage_engine_impl.h
@@ -46,7 +46,7 @@
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_engine_interface.h"
#include "mongo/db/storage/temporary_record_store.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/periodic_runner.h"
namespace mongo {
@@ -290,7 +290,7 @@ public:
PeriodicRunner* _periodicRunner;
// Protects access to _listeners below.
- stdx::mutex _monitorMutex;
+ Mutex _monitorMutex = MONGO_MAKE_LATCH("TimestampMonitor::_monitorMutex");
std::vector<TimestampListener*> _listeners;
// This should remain as the last member variable so that its destructor gets executed first
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 556a01a2efb..87a0fa73e0f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -187,7 +187,7 @@ public:
while (!_shuttingDown.load()) {
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
// Check every 10 seconds or sooner in the debug builds
_condvar.wait_for(lock, stdx::chrono::seconds(kDebugBuild ? 1 : 10));
@@ -202,7 +202,7 @@ public:
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the session sweeper thread early, we do not want the shutdown
// to wait for us too long.
_condvar.notify_one();
@@ -214,7 +214,7 @@ private:
WiredTigerSessionCache* _sessionCache;
AtomicWord<bool> _shuttingDown{false};
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerSessionSweeper::_mutex"); // protects _condvar
// The session sweeper thread idles on this condition variable for a particular time duration
// between cleaning up expired sessions. It can be triggered early to expediate shutdown.
stdx::condition_variable _condvar;
@@ -322,7 +322,7 @@ public:
auto opCtx = tc->makeOperationContext();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_condvar.wait_for(lock,
stdx::chrono::seconds(static_cast<std::int64_t>(
@@ -395,7 +395,7 @@ public:
if (oplogNeededForRollback.isOK()) {
// Now that the checkpoint is durable, publish the oplog needed to recover
// from it.
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
_oplogNeededForCrashRecovery.store(
oplogNeededForRollback.getValue().asULL());
}
@@ -440,7 +440,7 @@ public:
_hasTriggeredFirstStableCheckpoint = true;
log() << "Triggering the first stable checkpoint. Initial Data: " << initialData
<< " PrevStable: " << prevStable << " CurrStable: " << currStable;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.notify_one();
}
}
@@ -454,14 +454,14 @@ public:
* _oplogNeededForCrashRecovery will not change during assignment.
*/
void assignOplogNeededForCrashRecoveryTo(boost::optional<Timestamp>* timestamp) {
- stdx::lock_guard<stdx::mutex> lk(_oplogNeededForCrashRecoveryMutex);
+ stdx::lock_guard<Latch> lk(_oplogNeededForCrashRecoveryMutex);
*timestamp = Timestamp(_oplogNeededForCrashRecovery.load());
}
void shutdown() {
_shuttingDown.store(true);
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
// Wake up the checkpoint thread early, to take a final checkpoint before shutting
// down, if one has not coincidentally just been taken.
_condvar.notify_one();
@@ -473,7 +473,8 @@ private:
WiredTigerKVEngine* _wiredTigerKVEngine;
WiredTigerSessionCache* _sessionCache;
- stdx::mutex _mutex; // protects _condvar
+ Mutex _mutex = MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_mutex");
+ ; // protects _condvar
// The checkpoint thread idles on this condition variable for a particular time duration between
// taking checkpoints. It can be triggered early to expediate immediate checkpointing.
stdx::condition_variable _condvar;
@@ -482,7 +483,8 @@ private:
bool _hasTriggeredFirstStableCheckpoint = false;
- stdx::mutex _oplogNeededForCrashRecoveryMutex;
+ Mutex _oplogNeededForCrashRecoveryMutex =
+ MONGO_MAKE_LATCH("WiredTigerCheckpointThread::_oplogNeededForCrashRecoveryMutex");
AtomicWord<std::uint64_t> _oplogNeededForCrashRecovery;
};
@@ -1064,7 +1066,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
uassert(51034, "Cannot open backup cursor with in-memory mode.", !isEphemeral());
// Oplog truncation thread won't remove oplog since the checkpoint pinned by the backup cursor.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_checkpointThread->assignOplogNeededForCrashRecoveryTo(&_oplogPinnedByBackup);
auto pinOplogGuard = makeGuard([&] { _oplogPinnedByBackup = boost::none; });
@@ -1099,7 +1101,7 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup(
void WiredTigerKVEngine::endNonBlockingBackup(OperationContext* opCtx) {
_backupSession.reset();
// Oplog truncation thread can now remove the pinned oplog.
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
_oplogPinnedByBackup = boost::none;
_backupCursor = nullptr;
}
@@ -1140,7 +1142,7 @@ void WiredTigerKVEngine::syncSizeInfo(bool sync) const {
void WiredTigerKVEngine::setOldestActiveTransactionTimestampCallback(
StorageEngine::OldestActiveTransactionTimestampCallback callback) {
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
_oldestActiveTransactionTimestampCallback = std::move(callback);
};
@@ -1403,7 +1405,7 @@ Status WiredTigerKVEngine::dropIdent(OperationContext* opCtx, StringData ident)
if (ret == EBUSY) {
// this is expected, queue it up
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_front(uri);
}
_sessionCache->closeCursorsForQueuedDrops();
@@ -1422,7 +1424,7 @@ std::list<WiredTigerCachedCursor> WiredTigerKVEngine::filterCursorsWithQueuedDro
std::list<WiredTigerCachedCursor>* cache) {
std::list<WiredTigerCachedCursor> toDrop;
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
return toDrop;
@@ -1456,7 +1458,7 @@ bool WiredTigerKVEngine::haveDropsQueued() const {
_previousCheckedDropsQueued = now;
// Don't wait for the mutex: if we can't get it, report that no drops are queued.
- stdx::unique_lock<stdx::mutex> lk(_identToDropMutex, stdx::defer_lock);
+ stdx::unique_lock<Latch> lk(_identToDropMutex, stdx::defer_lock);
return lk.try_lock() && !_identToDrop.empty();
}
@@ -1466,7 +1468,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
WiredTigerSession session(_conn);
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
numInQueue = _identToDrop.size();
}
@@ -1479,7 +1481,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
for (int i = 0; i < numToDelete; i++) {
string uri;
{
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
if (_identToDrop.empty())
break;
uri = _identToDrop.front();
@@ -1490,7 +1492,7 @@ void WiredTigerKVEngine::dropSomeQueuedIdents() {
LOG(1) << "WT queued drop of " << uri << " res " << ret;
if (ret == EBUSY) {
- stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ stdx::lock_guard<Latch> lk(_identToDropMutex);
_identToDrop.push_back(uri);
} else {
invariantWTOK(ret);
@@ -1871,7 +1873,7 @@ StatusWith<Timestamp> WiredTigerKVEngine::getOplogNeededForRollback() const {
auto stableTimestamp = _stableTimestamp.load();
// Only one thread can set or execute this callback.
- stdx::lock_guard<stdx::mutex> lk(_oldestActiveTransactionTimestampCallbackMutex);
+ stdx::lock_guard<Latch> lk(_oldestActiveTransactionTimestampCallbackMutex);
boost::optional<Timestamp> oldestActiveTransactionTimestamp;
if (_oldestActiveTransactionTimestampCallback) {
auto status = _oldestActiveTransactionTimestampCallback(Timestamp(stableTimestamp));
@@ -1904,7 +1906,7 @@ boost::optional<Timestamp> WiredTigerKVEngine::getOplogNeededForCrashRecovery()
Timestamp WiredTigerKVEngine::getPinnedOplog() const {
{
- stdx::lock_guard<stdx::mutex> lock(_oplogPinnedByBackupMutex);
+ stdx::lock_guard<Latch> lock(_oplogPinnedByBackupMutex);
if (!storageGlobalParams.allowOplogTruncation) {
// If oplog truncation is not allowed, then return the min timestamp so that no history
// is
@@ -1956,14 +1958,14 @@ bool WiredTigerKVEngine::supportsOplogStones() const {
void WiredTigerKVEngine::startOplogManager(OperationContext* opCtx,
const std::string& uri,
WiredTigerRecordStore* oplogRecordStore) {
- stdx::lock_guard<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::lock_guard<Latch> lock(_oplogManagerMutex);
if (_oplogManagerCount == 0)
_oplogManager->start(opCtx, uri, oplogRecordStore);
_oplogManagerCount++;
}
void WiredTigerKVEngine::haltOplogManager() {
- stdx::unique_lock<stdx::mutex> lock(_oplogManagerMutex);
+ stdx::unique_lock<Latch> lock(_oplogManagerMutex);
invariant(_oplogManagerCount > 0);
_oplogManagerCount--;
if (_oplogManagerCount == 0) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 7b98f9fd388..39a06e1f213 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -45,7 +45,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/elapsed_tracker.h"
namespace mongo {
@@ -409,7 +409,8 @@ private:
std::uint64_t _getCheckpointTimestamp() const;
- mutable stdx::mutex _oldestActiveTransactionTimestampCallbackMutex;
+ mutable Mutex _oldestActiveTransactionTimestampCallbackMutex =
+ MONGO_MAKE_LATCH("::_oldestActiveTransactionTimestampCallbackMutex");
StorageEngine::OldestActiveTransactionTimestampCallback
_oldestActiveTransactionTimestampCallback;
@@ -420,7 +421,7 @@ private:
ClockSource* const _clockSource;
// Mutex to protect use of _oplogManagerCount by this instance of KV engine.
- mutable stdx::mutex _oplogManagerMutex;
+ mutable Mutex _oplogManagerMutex = MONGO_MAKE_LATCH("::_oplogManagerMutex");
std::size_t _oplogManagerCount = 0;
std::unique_ptr<WiredTigerOplogManager> _oplogManager;
@@ -451,15 +452,16 @@ private:
std::string _rsOptions;
std::string _indexOptions;
- mutable stdx::mutex _dropAllQueuesMutex;
- mutable stdx::mutex _identToDropMutex;
+ mutable Mutex _dropAllQueuesMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_dropAllQueuesMutex");
+ mutable Mutex _identToDropMutex = MONGO_MAKE_LATCH("WiredTigerKVEngine::_identToDropMutex");
std::list<std::string> _identToDrop;
mutable Date_t _previousCheckedDropsQueued;
std::unique_ptr<WiredTigerSession> _backupSession;
WT_CURSOR* _backupCursor;
- mutable stdx::mutex _oplogPinnedByBackupMutex;
+ mutable Mutex _oplogPinnedByBackupMutex =
+ MONGO_MAKE_LATCH("WiredTigerKVEngine::_oplogPinnedByBackupMutex");
boost::optional<Timestamp> _oplogPinnedByBackup;
Timestamp _recoveryTimestamp;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
index fbf0b9450a3..647fe8de738 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -74,7 +74,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
// Need to obtain the mutex before starting the thread, as otherwise it may race ahead
// see _shuttingDown as true and quit prematurely.
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_oplogJournalThread = stdx::thread(&WiredTigerOplogManager::_oplogJournalThreadLoop,
this,
WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(),
@@ -86,7 +86,7 @@ void WiredTigerOplogManager::start(OperationContext* opCtx,
void WiredTigerOplogManager::halt() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
invariant(_isRunning);
_shuttingDown = true;
_isRunning = false;
@@ -120,7 +120,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
// Close transaction before we wait.
opCtx->recoveryUnit()->abandonSnapshot();
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
// Prevent any scheduled journal flushes from being delayed and blocking this wait excessively.
_opsWaitingForVisibility++;
@@ -148,7 +148,7 @@ void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible(
}
void WiredTigerOplogManager::triggerJournalFlush() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
if (!_opsWaitingForJournal) {
_opsWaitingForJournal = true;
_opsWaitingForJournalCV.notify_one();
@@ -174,7 +174,7 @@ void WiredTigerOplogManager::_oplogJournalThreadLoop(WiredTigerSessionCache* ses
// waitUntilDurable() call requiring an opCtx parameter.
opCtx->swapLockState(std::make_unique<LockerImpl>());
- stdx::unique_lock<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::unique_lock<Latch> lk(_oplogVisibilityStateMutex);
{
MONGO_IDLE_THREAD_BLOCK;
_opsWaitingForJournalCV.wait(lk,
@@ -251,7 +251,7 @@ std::uint64_t WiredTigerOplogManager::getOplogReadTimestamp() const {
}
void WiredTigerOplogManager::setOplogReadTimestamp(Timestamp ts) {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
_setOplogReadTimestamp(lk, ts.asULL());
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
index 9a82985fc28..09258c657f2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -60,7 +60,7 @@ public:
void halt();
bool isRunning() {
- stdx::lock_guard<stdx::mutex> lk(_oplogVisibilityStateMutex);
+ stdx::lock_guard<Latch> lk(_oplogVisibilityStateMutex);
return _isRunning && !_shuttingDown;
}
@@ -89,7 +89,8 @@ private:
void _setOplogReadTimestamp(WithLock, uint64_t newTimestamp);
stdx::thread _oplogJournalThread;
- mutable stdx::mutex _oplogVisibilityStateMutex;
+ mutable Mutex _oplogVisibilityStateMutex =
+ MONGO_MAKE_LATCH("WiredTigerOplogManager::_oplogVisibilityStateMutex");
mutable stdx::condition_variable
_opsWaitingForJournalCV; // Signaled to trigger a journal flush.
mutable stdx::condition_variable
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index f0a12735423..0c4c1956e51 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -147,7 +147,7 @@ public:
_oplogStones->_currentRecords.store(0);
_oplogStones->_currentBytes.store(0);
- stdx::lock_guard<stdx::mutex> lk(_oplogStones->_mutex);
+ stdx::lock_guard<Latch> lk(_oplogStones->_mutex);
_oplogStones->_stones.clear();
}
@@ -159,7 +159,7 @@ private:
WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs)
: _rs(rs) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(rs->isCapped());
invariant(rs->cappedMaxSize() > 0);
@@ -178,13 +178,13 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTi
}
bool WiredTigerRecordStore::OplogStones::isDead() {
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
return _isDead;
}
void WiredTigerRecordStore::OplogStones::kill() {
{
- stdx::lock_guard<stdx::mutex> lk(_oplogReclaimMutex);
+ stdx::lock_guard<Latch> lk(_oplogReclaimMutex);
_isDead = true;
}
_oplogReclaimCv.notify_one();
@@ -192,11 +192,11 @@ void WiredTigerRecordStore::OplogStones::kill() {
void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
// Wait until kill() is called or there are too many oplog stones.
- stdx::unique_lock<stdx::mutex> lock(_oplogReclaimMutex);
+ stdx::unique_lock<Latch> lock(_oplogReclaimMutex);
while (!_isDead) {
{
MONGO_IDLE_THREAD_BLOCK;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (hasExcessStones_inlock()) {
// There are now excess oplog stones. However, there it may be necessary to keep
// additional oplog.
@@ -219,7 +219,7 @@ void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() {
boost::optional<WiredTigerRecordStore::OplogStones::Stone>
WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!hasExcessStones_inlock()) {
return {};
@@ -229,12 +229,12 @@ WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const {
}
void WiredTigerRecordStore::OplogStones::popOldestStone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stones.pop_front();
}
void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRecord) {
- stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::try_to_lock);
+ stdx::unique_lock<Latch> lk(_mutex, stdx::try_to_lock);
if (!lk) {
// Someone else is either already creating a new stone or popping the oldest one. In the
// latter case, we let the next insert trigger the new stone's creation.
@@ -275,7 +275,7 @@ void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* o
void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
int64_t recordsRemoved, int64_t bytesRemoved, RecordId firstRemovedId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int64_t numStonesToRemove = 0;
int64_t recordsInStonesToRemove = 0;
@@ -305,7 +305,7 @@ void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
void WiredTigerRecordStore::OplogStones::setMinBytesPerStone(int64_t size) {
invariant(size > 0);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Only allow changing the minimum bytes per stone if no data has been inserted.
invariant(_stones.size() == 0 && _currentRecords.load() == 0);
@@ -457,7 +457,7 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
}
void WiredTigerRecordStore::OplogStones::adjust(int64_t maxSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const unsigned long long kMinStonesToKeep = 10ULL;
const unsigned long long kMaxStonesToKeep = 100ULL;
@@ -699,7 +699,7 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine,
WiredTigerRecordStore::~WiredTigerRecordStore() {
{
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_shuttingDown = true;
}
@@ -784,7 +784,7 @@ const char* WiredTigerRecordStore::name() const {
}
bool WiredTigerRecordStore::inShutdown() const {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
return _shuttingDown;
}
@@ -1060,7 +1060,7 @@ int64_t WiredTigerRecordStore::_cappedDeleteAsNeeded_inlock(OperationContext* op
++docsRemoved;
sizeSaved += old_value.size;
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
if (_shuttingDown)
break;
@@ -1332,12 +1332,12 @@ bool WiredTigerRecordStore::isOpHidden_forTest(const RecordId& id) const {
}
bool WiredTigerRecordStore::haveCappedWaiters() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
return _cappedCallback && _cappedCallback->haveCappedWaiters();
}
void WiredTigerRecordStore::notifyCappedWaitersIfNeeded() {
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
// This wakes up cursors blocking in await_data.
if (_cappedCallback) {
_cappedCallback->notifyCappedWaitersIfNeeded();
@@ -1743,7 +1743,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx,
// Compute the number and associated sizes of the records to delete.
{
- stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> cappedCallbackLock(_cappedCallbackMutex);
do {
if (_cappedCallback) {
uassertStatusOK(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 693987af2a6..044d57339d7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -43,8 +43,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/fail_point_service.h"
@@ -212,7 +212,7 @@ public:
Status updateCappedSize(OperationContext* opCtx, long long cappedSize) final;
void setCappedCallback(CappedCallback* cb) {
- stdx::lock_guard<stdx::mutex> lk(_cappedCallbackMutex);
+ stdx::lock_guard<Latch> lk(_cappedCallbackMutex);
_cappedCallback = cb;
}
@@ -343,9 +343,12 @@ private:
RecordId _cappedFirstRecord;
AtomicWord<long long> _cappedSleep;
AtomicWord<long long> _cappedSleepMS;
+
+ // guards _cappedCallback and _shuttingDown
+ mutable Mutex _cappedCallbackMutex =
+ MONGO_MAKE_LATCH("WiredTigerRecordStore::_cappedCallbackMutex");
CappedCallback* _cappedCallback;
bool _shuttingDown;
- mutable stdx::mutex _cappedCallbackMutex; // guards _cappedCallback and _shuttingDown
// See comment in ::cappedDeleteAsNeeded
int _cappedDeleteCheckCount;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
index f6e9371c894..f88334ea85b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
@@ -33,8 +33,8 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -99,7 +99,7 @@ public:
//
size_t numStones() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _stones.size();
}
@@ -129,7 +129,7 @@ private:
WiredTigerRecordStore* _rs;
- stdx::mutex _oplogReclaimMutex;
+ Mutex _oplogReclaimMutex;
stdx::condition_variable _oplogReclaimCv;
// True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the "local"
@@ -143,7 +143,8 @@ private:
AtomicWord<long long> _currentRecords; // Number of records in the stone being filled.
AtomicWord<long long> _currentBytes; // Number of bytes in the stone being filled.
- mutable stdx::mutex _mutex; // Protects against concurrent access to the deque of oplog stones.
+ // Protects against concurrent access to the deque of oplog stones.
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("OplogStones::_mutex");
std::deque<OplogStones::Stone> _stones; // front = oldest, back = newest.
};
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 104a5caa151..897b72eb762 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -266,7 +266,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
UniqueWiredTigerSession session = getSession();
WT_SESSION* s = session->getSession();
{
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
auto config = stableCheckpoint ? "use_timestamp=true" : "use_timestamp=false";
auto checkpointLock = _engine->getCheckpointLock(opCtx);
@@ -280,7 +280,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
uint32_t start = _lastSyncTime.load();
// Do the remainder in a critical section that ensures only a single thread at a time
// will attempt to synchronize.
- stdx::unique_lock<stdx::mutex> lk(_lastSyncMutex);
+ stdx::unique_lock<Latch> lk(_lastSyncMutex);
uint32_t current = _lastSyncTime.loadRelaxed(); // synchronized with writes through mutex
if (current != start) {
// Someone else synced already since we read lastSyncTime, so we're done!
@@ -292,7 +292,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
// This gets the token (OpTime) from the last write, before flushing (either the journal, or a
// checkpoint), and then reports that token (OpTime) as a durable write.
- stdx::unique_lock<stdx::mutex> jlk(_journalListenerMutex);
+ stdx::unique_lock<Latch> jlk(_journalListenerMutex);
JournalListener::Token token = _journalListener->getToken();
// Initialize on first use.
@@ -316,7 +316,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(OperationContext* opCtx,
std::uint64_t lastCount) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
if (lastCount == _prepareCommitOrAbortCounter.loadRelaxed()) {
opCtx->waitForConditionOrInterrupt(_prepareCommittedOrAbortedCond, lk, [&] {
return _prepareCommitOrAbortCounter.loadRelaxed() > lastCount;
@@ -325,14 +325,14 @@ void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(Operatio
}
void WiredTigerSessionCache::notifyPreparedUnitOfWorkHasCommittedOrAborted() {
- stdx::unique_lock<stdx::mutex> lk(_prepareCommittedOrAbortedMutex);
+ stdx::unique_lock<Latch> lk(_prepareCommittedOrAbortedMutex);
_prepareCommitOrAbortCounter.fetchAndAdd(1);
_prepareCommittedOrAbortedCond.notify_all();
}
void WiredTigerSessionCache::closeAllCursors(const std::string& uri) {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeAllCursors(uri);
}
@@ -342,14 +342,14 @@ void WiredTigerSessionCache::closeCursorsForQueuedDrops() {
// Increment the cursor epoch so that all cursors from this epoch are closed.
_cursorEpoch.fetchAndAdd(1);
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
for (SessionCache::iterator i = _sessions.begin(); i != _sessions.end(); i++) {
(*i)->closeCursorsForQueuedDrops(_engine);
}
}
size_t WiredTigerSessionCache::getIdleSessionsCount() {
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
return _sessions.size();
}
@@ -361,7 +361,7 @@ void WiredTigerSessionCache::closeExpiredIdleSessions(int64_t idleTimeMillis) {
auto cutoffTime = _clockSource->now() - Milliseconds(idleTimeMillis);
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
// Discard all sessions that became idle before the cutoff time
for (auto it = _sessions.begin(); it != _sessions.end();) {
auto session = *it;
@@ -381,7 +381,7 @@ void WiredTigerSessionCache::closeAll() {
SessionCache swap;
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
_epoch.fetchAndAdd(1);
_sessions.swap(swap);
}
@@ -401,7 +401,7 @@ UniqueWiredTigerSession WiredTigerSessionCache::getSession() {
invariant(!(_shuttingDown.loadRelaxed() & kShuttingDownMask));
{
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (!_sessions.empty()) {
// Get the most recently used session so that if we discard sessions, we're
// discarding older ones
@@ -468,7 +468,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
session->setIdleExpireTime(_clockSource->now());
if (session->_getEpoch() == currentEpoch) { // check outside of lock to reduce contention
- stdx::lock_guard<stdx::mutex> lock(_cacheLock);
+ stdx::lock_guard<Latch> lock(_cacheLock);
if (session->_getEpoch() == _epoch.load()) { // recheck inside the lock for correctness
returnedToCache = true;
_sessions.push_back(session);
@@ -485,7 +485,7 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
void WiredTigerSessionCache::setJournalListener(JournalListener* jl) {
- stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex);
+ stdx::unique_lock<Latch> lk(_journalListenerMutex);
_journalListener = jl;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index 72b55e311ed..9a94f175cdc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -37,7 +37,7 @@
#include "mongo/db/storage/journal_listener.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
@@ -323,7 +323,7 @@ private:
AtomicWord<unsigned> _shuttingDown;
static const uint32_t kShuttingDownMask = 1 << 31;
- stdx::mutex _cacheLock;
+ Mutex _cacheLock = MONGO_MAKE_LATCH("WiredTigerSessionCache::_cacheLock");
typedef std::vector<WiredTigerSession*> SessionCache;
SessionCache _sessions;
@@ -335,15 +335,16 @@ private:
// Counter and critical section mutex for waitUntilDurable
AtomicWord<unsigned> _lastSyncTime;
- stdx::mutex _lastSyncMutex;
+ Mutex _lastSyncMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_lastSyncMutex");
// Mutex and cond var for waiting on prepare commit or abort.
- stdx::mutex _prepareCommittedOrAbortedMutex;
+ Mutex _prepareCommittedOrAbortedMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionCache::_prepareCommittedOrAbortedMutex");
stdx::condition_variable _prepareCommittedOrAbortedCond;
AtomicWord<std::uint64_t> _prepareCommitOrAbortCounter{0};
// Protects _journalListener.
- stdx::mutex _journalListenerMutex;
+ Mutex _journalListenerMutex = MONGO_MAKE_LATCH("WiredTigerSessionCache::_journalListenerMutex");
// Notified when we commit to the journal.
JournalListener* _journalListener = &NoOpJournalListener::instance;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index 56c8161d134..76ddde766e0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -64,7 +64,7 @@ WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn,
}
WiredTigerSizeStorer::~WiredTigerSizeStorer() {
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
_cursor->close(_cursor);
}
@@ -74,7 +74,7 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
return;
// Ordering is important: as the entry may be flushed concurrently, set the dirty flag last.
- stdx::lock_guard<stdx::mutex> lk(_bufferMutex);
+ stdx::lock_guard<Latch> lk(_bufferMutex);
auto& entry = _buffer[uri];
// During rollback it is possible to get a new SizeInfo. In that case clear the dirty flag,
// so the SizeInfo can be destructed without triggering the dirty check invariant.
@@ -90,13 +90,13 @@ void WiredTigerSizeStorer::store(StringData uri, std::shared_ptr<SizeInfo> sizeI
std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(StringData uri) const {
{
// Check if we can satisfy the read from the buffer.
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
Buffer::const_iterator it = _buffer.find(uri);
if (it != _buffer.end())
return it->second;
}
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
// Intentionally ignoring return value.
ON_BLOCK_EXIT([&] { _cursor->reset(_cursor); });
@@ -125,7 +125,7 @@ std::shared_ptr<WiredTigerSizeStorer::SizeInfo> WiredTigerSizeStorer::load(Strin
void WiredTigerSizeStorer::flush(bool syncToDisk) {
Buffer buffer;
{
- stdx::lock_guard<stdx::mutex> bufferLock(_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(_bufferMutex);
_buffer.swap(buffer);
}
@@ -133,13 +133,13 @@ void WiredTigerSizeStorer::flush(bool syncToDisk) {
return; // Nothing to do.
Timer t;
- stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ stdx::lock_guard<Latch> cursorLock(_cursorMutex);
{
// On failure, place entries back into the map, unless a newer value already exists.
ON_BLOCK_EXIT([this, &buffer]() {
this->_cursor->reset(this->_cursor);
if (!buffer.empty()) {
- stdx::lock_guard<stdx::mutex> bufferLock(this->_bufferMutex);
+ stdx::lock_guard<Latch> bufferLock(this->_bufferMutex);
for (auto& it : buffer)
this->_buffer.try_emplace(it.first, it.second);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 5db2a4e72bc..79e5725ac81 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -36,7 +36,7 @@
#include "mongo/base/string_data.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/string_map.h"
namespace mongo {
@@ -95,12 +95,13 @@ private:
const WiredTigerSession _session;
const bool _readOnly;
// Guards _cursor. Acquire *before* _bufferMutex.
- mutable stdx::mutex _cursorMutex;
+ mutable Mutex _cursorMutex = MONGO_MAKE_LATCH("WiredTigerSessionStorer::_cursorMutex");
WT_CURSOR* _cursor; // pointer is const after constructor
using Buffer = StringMap<std::shared_ptr<SizeInfo>>;
- mutable stdx::mutex _bufferMutex; // Guards _buffer
+ mutable Mutex _bufferMutex =
+ MONGO_MAKE_LATCH("WiredTigerSessionStorer::_bufferMutex"); // Guards _buffer
Buffer _buffer;
};
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index 7216bc1727b..dd7c6ce52b5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -42,14 +42,14 @@
namespace mongo {
void WiredTigerSnapshotManager::setCommittedSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
invariant(!_committedSnapshot || *_committedSnapshot <= timestamp);
_committedSnapshot = timestamp;
}
void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
if (timestamp.isNull())
_localSnapshot = boost::none;
else
@@ -57,12 +57,12 @@ void WiredTigerSnapshotManager::setLocalSnapshot(const Timestamp& timestamp) {
}
boost::optional<Timestamp> WiredTigerSnapshotManager::getLocalSnapshot() {
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
return _localSnapshot;
}
void WiredTigerSnapshotManager::dropAllSnapshots() {
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
_committedSnapshot = boost::none;
}
@@ -71,7 +71,7 @@ boost::optional<Timestamp> WiredTigerSnapshotManager::getMinSnapshotForNextCommi
return boost::none;
}
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
return _committedSnapshot;
}
@@ -81,7 +81,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnCommittedSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_committedSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_committedSnapshotMutex);
uassert(ErrorCodes::ReadConcernMajorityNotAvailableYet,
"Committed view disappeared while running operation",
_committedSnapshot);
@@ -99,7 +99,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnLocalSnapshot(
RoundUpPreparedTimestamps roundUpPreparedTimestamps) const {
WiredTigerBeginTxnBlock txnOpen(session, prepareConflictBehavior, roundUpPreparedTimestamps);
- stdx::lock_guard<stdx::mutex> lock(_localSnapshotMutex);
+ stdx::lock_guard<Latch> lock(_localSnapshotMutex);
invariant(_localSnapshot);
LOG(3) << "begin_transaction on local snapshot " << _localSnapshot.get().toString();
auto status = txnOpen.setReadSnapshot(_localSnapshot.get());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index 75c9777a502..1726a7d4c2b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -35,7 +35,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/storage/snapshot_manager.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -91,11 +91,13 @@ public:
private:
// Snapshot to use for reads at a commit timestamp.
- mutable stdx::mutex _committedSnapshotMutex; // Guards _committedSnapshot.
+ mutable Mutex _committedSnapshotMutex = // Guards _committedSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_committedSnapshotMutex");
boost::optional<Timestamp> _committedSnapshot;
// Snapshot to use for reads at a local stable timestamp.
- mutable stdx::mutex _localSnapshotMutex; // Guards _localSnapshot.
+ mutable Mutex _localSnapshotMutex = // Guards _localSnapshot.
+ MONGO_MAKE_LATCH("WiredTigerSnapshotManager::_localSnapshotMutex");
boost::optional<Timestamp> _localSnapshot;
};
} // namespace mongo
diff --git a/src/mongo/db/time_proof_service.cpp b/src/mongo/db/time_proof_service.cpp
index 756d0397d5f..7e29f0b2254 100644
--- a/src/mongo/db/time_proof_service.cpp
+++ b/src/mongo/db/time_proof_service.cpp
@@ -57,7 +57,7 @@ TimeProofService::Key TimeProofService::generateRandomKey() {
}
TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const Key& key) {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
auto timeCeil = LogicalTime(Timestamp(time.asTimestamp().asULL() | kRangeMask));
if (_cache && _cache->hasProof(timeCeil, key)) {
return _cache->_proof;
@@ -82,7 +82,7 @@ Status TimeProofService::checkProof(LogicalTime time, const TimeProof& proof, co
}
void TimeProofService::resetCache() {
- stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
+ stdx::lock_guard<Latch> lk(_cacheMutex);
if (_cache) {
_cache = boost::none;
}
diff --git a/src/mongo/db/time_proof_service.h b/src/mongo/db/time_proof_service.h
index f7ca66ab3c5..43b6d97a681 100644
--- a/src/mongo/db/time_proof_service.h
+++ b/src/mongo/db/time_proof_service.h
@@ -32,7 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/crypto/sha1_block.h"
#include "mongo/db/logical_time.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -90,7 +90,7 @@ private:
};
// protects _cache
- stdx::mutex _cacheMutex;
+ Mutex _cacheMutex = MONGO_MAKE_LATCH("TimeProofService::_cacheMutex");
// one-entry cache
boost::optional<CacheEntry> _cache;
diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp
index 4252cc1cfb5..f13388e1892 100644
--- a/src/mongo/db/traffic_recorder.cpp
+++ b/src/mongo/db/traffic_recorder.cpp
@@ -133,7 +133,7 @@ public:
db.getCursor().write<LittleEndian<uint32_t>>(size);
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_written += size;
}
@@ -150,7 +150,7 @@ public:
} catch (...) {
auto status = exceptionToStatus();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_result = status;
}
});
@@ -173,7 +173,7 @@ public:
// If we couldn't push our packet begin the process of failing the recording
_pcqPipe.producer.close();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the result was otherwise okay, mark it as failed due to the queue blocking. If
// it failed for another reason, don't overwrite that.
@@ -187,7 +187,7 @@ public:
}
Status shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_inShutdown) {
_inShutdown = true;
@@ -203,7 +203,7 @@ public:
}
BSONObj getStats() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_trafficStats.setBufferedBytes(_pcqPipe.controller.getStats().queueDepth);
_trafficStats.setCurrentFileSize(_written);
return _trafficStats.toBSON();
@@ -251,7 +251,7 @@ private:
MultiProducerSingleConsumerQueue<TrafficRecordingPacket, CostFunction>::Pipe _pcqPipe;
stdx::thread _thread;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Recording::_mutex");
bool _inShutdown = false;
TrafficRecorderStats _trafficStats;
size_t _written = 0;
@@ -282,7 +282,7 @@ void TrafficRecorder::start(const StartRecordingTraffic& options) {
!gTrafficRecordingDirectory.empty());
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording already active", !_recording);
@@ -299,7 +299,7 @@ void TrafficRecorder::stop() {
_shouldRecord.store(false);
auto recording = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
uassert(ErrorCodes::BadValue, "Traffic recording not active", _recording);
@@ -314,7 +314,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
const Message& message) {
if (shouldAlwaysRecordTraffic) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_recording) {
StartRecordingTraffic options;
@@ -347,7 +347,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
// We couldn't queue
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If the recording isn't the one we have in hand bail (its been ended, or a new one has
// been created
@@ -360,7 +360,7 @@ void TrafficRecorder::observe(const transport::SessionHandle& ts,
}
std::shared_ptr<TrafficRecorder::Recording> TrafficRecorder::_getCurrentRecording() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _recording;
}
diff --git a/src/mongo/db/traffic_recorder.h b/src/mongo/db/traffic_recorder.h
index 8bd261cbfb4..964b95fdf80 100644
--- a/src/mongo/db/traffic_recorder.h
+++ b/src/mongo/db/traffic_recorder.h
@@ -34,8 +34,8 @@
#include "mongo/db/service_context.h"
#include "mongo/db/traffic_recorder_gen.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/message.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/transport/session.h"
namespace mongo {
@@ -72,7 +72,7 @@ private:
AtomicWord<bool> _shouldRecord;
// The mutex only protects the last recording shared_ptr
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TrafficRecorder::_mutex");
std::shared_ptr<Recording> _recording;
};
diff --git a/src/mongo/db/ttl_collection_cache.cpp b/src/mongo/db/ttl_collection_cache.cpp
index bb272653d24..d4d9ffe7694 100644
--- a/src/mongo/db/ttl_collection_cache.cpp
+++ b/src/mongo/db/ttl_collection_cache.cpp
@@ -46,19 +46,19 @@ TTLCollectionCache& TTLCollectionCache::get(ServiceContext* ctx) {
}
void TTLCollectionCache::registerTTLInfo(std::pair<UUID, std::string>&& ttlInfo) {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
_ttlInfos.push_back(std::move(ttlInfo));
}
void TTLCollectionCache::deregisterTTLInfo(const std::pair<UUID, std::string>& ttlInfo) {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
auto collIter = std::find(_ttlInfos.begin(), _ttlInfos.end(), ttlInfo);
fassert(40220, collIter != _ttlInfos.end());
_ttlInfos.erase(collIter);
}
std::vector<std::pair<UUID, std::string>> TTLCollectionCache::getTTLInfos() {
- stdx::lock_guard<stdx::mutex> lock(_ttlInfosLock);
+ stdx::lock_guard<Latch> lock(_ttlInfosLock);
return _ttlInfos;
}
}; // namespace mongo
diff --git a/src/mongo/db/ttl_collection_cache.h b/src/mongo/db/ttl_collection_cache.h
index 761a7f93321..b4b428e005c 100644
--- a/src/mongo/db/ttl_collection_cache.h
+++ b/src/mongo/db/ttl_collection_cache.h
@@ -34,7 +34,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/uuid.h"
/**
@@ -52,7 +52,7 @@ public:
std::vector<std::pair<UUID, std::string>> getTTLInfos();
private:
- stdx::mutex _ttlInfosLock;
+ Mutex _ttlInfosLock = MONGO_MAKE_LATCH("TTLCollectionCache::_ttlInfosLock");
std::vector<std::pair<UUID, std::string>> _ttlInfos; // <CollectionUUID, IndexName>
};
} // namespace mongo
diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp
index a2de077d063..ef49919f3e0 100644
--- a/src/mongo/db/views/view_catalog.cpp
+++ b/src/mongo/db/views/view_catalog.cpp
@@ -87,7 +87,7 @@ Status ViewCatalog::reload(OperationContext* opCtx, ViewCatalogLookupBehavior lo
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return _reload(lk, opCtx, ViewCatalogLookupBehavior::kValidateDurableViews);
}
@@ -147,7 +147,7 @@ Status ViewCatalog::_reload(WithLock,
}
void ViewCatalog::clear() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_viewMap.clear();
_viewGraph.clear();
@@ -172,7 +172,7 @@ void ViewCatalog::iterate(OperationContext* opCtx, ViewIteratorCallback callback
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
for (auto&& view : _viewMap) {
callback(*view.second);
@@ -389,7 +389,7 @@ Status ViewCatalog::createView(OperationContext* opCtx,
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -422,7 +422,7 @@ Status ViewCatalog::modifyView(OperationContext* opCtx,
const BSONArray& pipeline) {
invariant(opCtx->lockState()->isDbLockedForMode(viewName.db(), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (viewName.db() != viewOn.db())
return Status(ErrorCodes::BadValue,
@@ -461,7 +461,7 @@ Status ViewCatalog::dropView(OperationContext* opCtx, const NamespaceString& vie
invariant(opCtx->lockState()->isCollectionLockedForMode(
NamespaceString(viewName.db(), NamespaceString::kSystemDotViewsCollectionName), MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_requireValidCatalog(lk);
ON_BLOCK_EXIT([this] { _ignoreExternalChange = false; });
@@ -515,7 +515,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookup(OperationContext* opCtx, Str
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_valid && opCtx->getClient()->isFromUserConnection()) {
// We want to avoid lookups on invalid collection names.
if (!NamespaceString::validCollectionName(ns)) {
@@ -537,7 +537,7 @@ std::shared_ptr<ViewDefinition> ViewCatalog::lookupWithoutValidatingDurableViews
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lookup(lk, opCtx, ns, ViewCatalogLookupBehavior::kAllowInvalidDurableViews);
}
@@ -547,7 +547,7 @@ StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* opCtx,
opCtx,
NamespaceString(_durable->getName(), NamespaceString::kSystemDotViewsCollectionName),
MODE_IS);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_requireValidCatalog(lock);
diff --git a/src/mongo/db/views/view_catalog.h b/src/mongo/db/views/view_catalog.h
index a8fd9df9e3e..44103a0ed24 100644
--- a/src/mongo/db/views/view_catalog.h
+++ b/src/mongo/db/views/view_catalog.h
@@ -44,7 +44,7 @@
#include "mongo/db/views/resolved_view.h"
#include "mongo/db/views/view.h"
#include "mongo/db/views/view_graph.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -199,7 +199,7 @@ private:
*/
void _requireValidCatalog(WithLock);
- stdx::mutex _mutex; // Protects all members.
+ Mutex _mutex = MONGO_MAKE_LATCH("ViewCatalog::_mutex"); // Protects all members.
ViewMap _viewMap;
ViewMap _viewMapBackup;
std::unique_ptr<DurableViewCatalog> _durable;
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index daba3a54f84..8963c988bfd 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -199,12 +199,12 @@ class PendingValue {
public:
PendingValue(int initialValue) : _value(initialValue) {}
void set(int newValue) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_value = newValue;
_condition.notify_all();
}
void await(int expectedValue) const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_value != expectedValue) {
_condition.wait(lk);
}
@@ -212,7 +212,7 @@ public:
private:
int _value;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("PendingValue::_mutex");
mutable stdx::condition_variable _condition;
};
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index c0f09bbf48b..d06d92ae4aa 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -51,9 +51,9 @@
#include "mongo/db/storage/storage_engine_init.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/dbtests/framework_options.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/dbdirectclient_factory.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp
index 9faf584bb10..1c9e52b018f 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.cpp
+++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp
@@ -60,7 +60,7 @@ ConnectionString::ConnectionHook* MockConnRegistry::getConnStrHook() {
}
void MockConnRegistry::addServer(MockRemoteDBServer* server) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
const std::string hostName(server->getServerAddress());
fassert(16533, _registry.count(hostName) == 0);
@@ -69,17 +69,17 @@ void MockConnRegistry::addServer(MockRemoteDBServer* server) {
}
bool MockConnRegistry::removeServer(const std::string& hostName) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
return _registry.erase(hostName) == 1;
}
void MockConnRegistry::clear() {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
_registry.clear();
}
std::unique_ptr<MockDBClientConnection> MockConnRegistry::connect(const std::string& connStr) {
- stdx::lock_guard<stdx::mutex> sl(_registryMutex);
+ stdx::lock_guard<Latch> sl(_registryMutex);
fassert(16534, _registry.count(connStr) == 1);
return std::make_unique<MockDBClientConnection>(_registry[connStr], true);
}
diff --git a/src/mongo/dbtests/mock/mock_conn_registry.h b/src/mongo/dbtests/mock/mock_conn_registry.h
index 5796a0b7f73..feb8eb86517 100644
--- a/src/mongo/dbtests/mock/mock_conn_registry.h
+++ b/src/mongo/dbtests/mock/mock_conn_registry.h
@@ -114,7 +114,7 @@ private:
MockConnHook _mockConnStrHook;
// protects _registry
- stdx::mutex _registryMutex;
+ Mutex _registryMutex = MONGO_MAKE_LATCH("MockConnRegistry::_registryMutex");
stdx::unordered_map<std::string, MockRemoteDBServer*> _registry;
};
} // namespace mongo
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 92d741bbb92..95ff64d1e8b 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -239,7 +239,7 @@ private:
Hotel(int nRooms) : _nRooms(nRooms), _checkedIn(0), _maxRooms(0) {}
void checkIn() {
- stdx::lock_guard<stdx::mutex> lk(_frontDesk);
+ stdx::lock_guard<Latch> lk(_frontDesk);
_checkedIn++;
verify(_checkedIn <= _nRooms);
if (_checkedIn > _maxRooms)
@@ -247,12 +247,12 @@ private:
}
void checkOut() {
- stdx::lock_guard<stdx::mutex> lk(_frontDesk);
+ stdx::lock_guard<Latch> lk(_frontDesk);
_checkedIn--;
verify(_checkedIn >= 0);
}
- stdx::mutex _frontDesk;
+ Mutex _frontDesk = MONGO_MAKE_LATCH("Hotel::_frontDesk");
int _nRooms;
int _checkedIn;
int _maxRooms;
diff --git a/src/mongo/embedded/index_builds_coordinator_embedded.cpp b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
index 67be68dfcaf..45d124fefa5 100644
--- a/src/mongo/embedded/index_builds_coordinator_embedded.cpp
+++ b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
@@ -69,7 +69,7 @@ IndexBuildsCoordinatorEmbedded::startIndexBuild(OperationContext* opCtx,
}
auto replState = [&]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto it = _allIndexBuilds.find(buildUUID);
invariant(it != _allIndexBuilds.end());
return it->second;
diff --git a/src/mongo/embedded/periodic_runner_embedded.cpp b/src/mongo/embedded/periodic_runner_embedded.cpp
index 41ae8a49e93..68c94fa541d 100644
--- a/src/mongo/embedded/periodic_runner_embedded.cpp
+++ b/src/mongo/embedded/periodic_runner_embedded.cpp
@@ -52,14 +52,14 @@ PeriodicRunnerEmbedded::PeriodicRunnerEmbedded(ServiceContext* svc, ClockSource*
auto PeriodicRunnerEmbedded::makeJob(PeriodicJob job) -> JobAnchor {
auto impl = std::make_shared<PeriodicJobImpl>(std::move(job), this->_clockSource, this);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_jobs.push_back(impl);
std::push_heap(_jobs.begin(), _jobs.end(), PeriodicJobSorter());
return JobAnchor(impl);
}
bool PeriodicRunnerEmbedded::tryPump() {
- stdx::unique_lock<stdx::mutex> lock(_mutex, stdx::try_to_lock);
+ stdx::unique_lock<Latch> lock(_mutex, stdx::try_to_lock);
if (!lock.owns_lock())
return false;
@@ -71,7 +71,7 @@ bool PeriodicRunnerEmbedded::tryPump() {
PeriodicJobImpl::ExecutionStatus jobExecStatus;
{
- stdx::lock_guard<stdx::mutex> jobLock(job._mutex);
+ stdx::lock_guard<Latch> jobLock(job._mutex);
jobExecStatus = job._execStatus;
}
@@ -104,7 +104,7 @@ bool PeriodicRunnerEmbedded::tryPump() {
// only variable that can be changed from other threads.
PeriodicJobImpl::ExecutionStatus jobExecStatus;
{
- stdx::lock_guard<stdx::mutex> jobLock(job._mutex);
+ stdx::lock_guard<Latch> jobLock(job._mutex);
jobExecStatus = job._execStatus;
}
@@ -142,19 +142,19 @@ PeriodicRunnerEmbedded::PeriodicJobImpl::PeriodicJobImpl(PeriodicJob job,
: _job(std::move(job)), _clockSource(source), _periodicRunner(runner) {}
void PeriodicRunnerEmbedded::PeriodicJobImpl::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kNotScheduled);
_execStatus = PeriodicJobImpl::ExecutionStatus::kRunning;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::pause() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kRunning);
_execStatus = PeriodicJobImpl::ExecutionStatus::kPaused;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::resume() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::kPaused);
_execStatus = PeriodicJobImpl::ExecutionStatus::kRunning;
}
@@ -162,21 +162,21 @@ void PeriodicRunnerEmbedded::PeriodicJobImpl::resume() {
void PeriodicRunnerEmbedded::PeriodicJobImpl::stop() {
// Also take the master lock, the job lock is not held while executing the job and we must make
// sure the user can invalidate it after this call.
- stdx::lock_guard<stdx::mutex> masterLock(_periodicRunner->_mutex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> masterLock(_periodicRunner->_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (isAlive(lk)) {
_stopWithMasterAndJobLock(masterLock, lk);
}
}
Milliseconds PeriodicRunnerEmbedded::PeriodicJobImpl::getPeriod() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _job.interval;
}
void PeriodicRunnerEmbedded::PeriodicJobImpl::setPeriod(Milliseconds ms) {
- stdx::lock_guard<stdx::mutex> masterLk(_periodicRunner->_mutex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> masterLk(_periodicRunner->_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_job.interval = ms;
diff --git a/src/mongo/embedded/periodic_runner_embedded.h b/src/mongo/embedded/periodic_runner_embedded.h
index 6d82c50db44..a8549fb0bba 100644
--- a/src/mongo/embedded/periodic_runner_embedded.h
+++ b/src/mongo/embedded/periodic_runner_embedded.h
@@ -33,7 +33,7 @@
#include <vector>
#include "mongo/db/service_context_fwd.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/periodic_runner.h"
@@ -88,7 +88,7 @@ private:
// The mutex is protecting _execStatus, the variable that can be accessed from other
// threads.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex");
// The current execution status of the job.
ExecutionStatus _execStatus{ExecutionStatus::kNotScheduled};
@@ -102,7 +102,7 @@ private:
std::vector<std::shared_ptr<PeriodicJobImpl>> _jobs;
std::vector<std::shared_ptr<PeriodicJobImpl>> _Pausedjobs;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicRunnerEmbedded::_mutex");
};
} // namespace mongo
diff --git a/src/mongo/executor/async_multicaster.cpp b/src/mongo/executor/async_multicaster.cpp
index 24e72527d51..3962f8f551d 100644
--- a/src/mongo/executor/async_multicaster.cpp
+++ b/src/mongo/executor/async_multicaster.cpp
@@ -37,8 +37,8 @@
#include "mongo/base/status.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
namespace mongo {
@@ -60,7 +60,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
struct State {
State(size_t leftToDo) : leftToDo(leftToDo) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("State::mutex");
stdx::condition_variable cv;
size_t leftToDo;
size_t running = 0;
@@ -71,7 +71,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
auto state = std::make_shared<State>(servers.size());
for (const auto& server : servers) {
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
// spin up no more than maxConcurrency tasks at once
opCtx->waitForConditionOrInterrupt(
state->cv, lk, [&] { return state->running < _options.maxConcurrency; });
@@ -80,7 +80,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
uassertStatusOK(_executor->scheduleRemoteCommand(
RemoteCommandRequest{server, theDbName, theCmdObj, opCtx, timeoutMillis},
[state](const TaskExecutor::RemoteCommandCallbackArgs& cbData) {
- stdx::lock_guard<stdx::mutex> lk(state->mutex);
+ stdx::lock_guard<Latch> lk(state->mutex);
state->out.emplace_back(
std::forward_as_tuple(cbData.request.target, cbData.response));
@@ -96,7 +96,7 @@ std::vector<AsyncMulticaster::Reply> AsyncMulticaster::multicast(
}));
}
- stdx::unique_lock<stdx::mutex> lk(state->mutex);
+ stdx::unique_lock<Latch> lk(state->mutex);
opCtx->waitForConditionOrInterrupt(state->cv, lk, [&] { return state->leftToDo == 0; });
return std::move(state->out);
diff --git a/src/mongo/executor/async_multicaster.h b/src/mongo/executor/async_multicaster.h
index c2bc9e0be93..63eaaa4993d 100644
--- a/src/mongo/executor/async_multicaster.h
+++ b/src/mongo/executor/async_multicaster.h
@@ -34,7 +34,7 @@
#include "mongo/executor/remote_command_response.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/executor/async_timer_mock.cpp b/src/mongo/executor/async_timer_mock.cpp
index 6a796b684e7..9153cba5f0b 100644
--- a/src/mongo/executor/async_timer_mock.cpp
+++ b/src/mongo/executor/async_timer_mock.cpp
@@ -48,7 +48,7 @@ void AsyncTimerMockImpl::cancel() {
void AsyncTimerMockImpl::asyncWait(AsyncTimerInterface::Handler handler) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_timeLeft != kZeroMilliseconds) {
_handlers.push_back(handler);
return;
@@ -66,7 +66,7 @@ void AsyncTimerMockImpl::fastForward(Milliseconds time) {
// While holding the lock, change the time and remove
// handlers that have expired
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (time >= _timeLeft) {
_timeLeft = kZeroMilliseconds;
tmp.swap(_handlers);
@@ -82,7 +82,7 @@ void AsyncTimerMockImpl::fastForward(Milliseconds time) {
}
Milliseconds AsyncTimerMockImpl::timeLeft() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _timeLeft;
}
@@ -91,7 +91,7 @@ void AsyncTimerMockImpl::expireAfter(Milliseconds expiration) {
// While holding the lock, reset the time and remove all handlers
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_timeLeft = expiration;
tmp.swap(_handlers);
}
@@ -103,14 +103,14 @@ void AsyncTimerMockImpl::expireAfter(Milliseconds expiration) {
}
int AsyncTimerMockImpl::jobs() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _handlers.size();
}
void AsyncTimerMockImpl::_callAllHandlers(std::error_code ec) {
std::vector<AsyncTimerInterface::Handler> tmp;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
tmp.swap(_handlers);
}
diff --git a/src/mongo/executor/async_timer_mock.h b/src/mongo/executor/async_timer_mock.h
index 13463b679d7..5e3a83e3275 100644
--- a/src/mongo/executor/async_timer_mock.h
+++ b/src/mongo/executor/async_timer_mock.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/executor/async_timer_interface.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_set.h"
namespace mongo {
@@ -84,7 +84,7 @@ public:
private:
void _callAllHandlers(std::error_code ec);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AsyncTimerMockImpl::_mutex");
Milliseconds _timeLeft;
std::vector<AsyncTimerInterface::Handler> _handlers;
};
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index 77a83ec4ff4..a0a4c9f0a7c 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -189,7 +189,7 @@ protected:
size_t target = 0;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("LimitController::_mutex");
stdx::unordered_map<PoolId, PoolData> _poolData;
};
diff --git a/src/mongo/executor/connection_pool.h b/src/mongo/executor/connection_pool.h
index 196e99014b7..f46c3c23dfa 100644
--- a/src/mongo/executor/connection_pool.h
+++ b/src/mongo/executor/connection_pool.h
@@ -35,7 +35,7 @@
#include "mongo/executor/egress_tag_closer.h"
#include "mongo/executor/egress_tag_closer_manager.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
@@ -255,7 +255,7 @@ private:
std::shared_ptr<ControllerInterface> _controller;
// The global mutex for specific pool access and the generation counter
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ConnectionPool::_mutex");
PoolId _nextPoolId = 0;
stdx::unordered_map<HostAndPort, std::shared_ptr<SpecificPool>> _pools;
diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp
index e2f7711cca7..c3816eab43c 100644
--- a/src/mongo/executor/connection_pool_tl.cpp
+++ b/src/mongo/executor/connection_pool_tl.cpp
@@ -56,7 +56,7 @@ void TLTypeFactory::shutdown() {
// Stop any attempt to schedule timers in the future
_inShutdown.store(true);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
log() << "Killing all outstanding egress activity.";
for (auto collar : _collars) {
@@ -65,12 +65,12 @@ void TLTypeFactory::shutdown() {
}
void TLTypeFactory::fasten(Type* type) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_collars.insert(type);
}
void TLTypeFactory::release(Type* type) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_collars.erase(type);
type->_wasReleased = true;
diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h
index 7297713b92b..f5bf54ff081 100644
--- a/src/mongo/executor/connection_pool_tl.h
+++ b/src/mongo/executor/connection_pool_tl.h
@@ -79,7 +79,7 @@ private:
std::unique_ptr<NetworkConnectionHook> _onConnectHook;
const ConnectionPool::Options _connPoolOptions;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("TLTypeFactory::_mutex");
AtomicWord<bool> _inShutdown{false};
stdx::unordered_set<Type*> _collars;
};
diff --git a/src/mongo/executor/egress_tag_closer_manager.cpp b/src/mongo/executor/egress_tag_closer_manager.cpp
index fa78a74e183..64c0f0493b8 100644
--- a/src/mongo/executor/egress_tag_closer_manager.cpp
+++ b/src/mongo/executor/egress_tag_closer_manager.cpp
@@ -48,19 +48,19 @@ EgressTagCloserManager& EgressTagCloserManager::get(ServiceContext* svc) {
}
void EgressTagCloserManager::add(EgressTagCloser* etc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_egressTagClosers.insert(etc);
}
void EgressTagCloserManager::remove(EgressTagCloser* etc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_egressTagClosers.erase(etc);
}
void EgressTagCloserManager::dropConnections(transport::Session::TagMask tags) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->dropConnections(tags);
@@ -68,7 +68,7 @@ void EgressTagCloserManager::dropConnections(transport::Session::TagMask tags) {
}
void EgressTagCloserManager::dropConnections(const HostAndPort& hostAndPort) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->dropConnections(hostAndPort);
@@ -78,7 +78,7 @@ void EgressTagCloserManager::dropConnections(const HostAndPort& hostAndPort) {
void EgressTagCloserManager::mutateTags(
const HostAndPort& hostAndPort,
const std::function<transport::Session::TagMask(transport::Session::TagMask)>& mutateFunc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto etc : _egressTagClosers) {
etc->mutateTags(hostAndPort, mutateFunc);
diff --git a/src/mongo/executor/egress_tag_closer_manager.h b/src/mongo/executor/egress_tag_closer_manager.h
index 418658dc430..91d996ee3dc 100644
--- a/src/mongo/executor/egress_tag_closer_manager.h
+++ b/src/mongo/executor/egress_tag_closer_manager.h
@@ -33,7 +33,7 @@
#include "mongo/db/service_context.h"
#include "mongo/executor/egress_tag_closer.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/transport/session.h"
#include "mongo/util/net/hostandport.h"
@@ -65,7 +65,7 @@ public:
const std::function<transport::Session::TagMask(transport::Session::TagMask)>& mutateFunc);
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("EgressTagCloserManager::_mutex");
stdx::unordered_set<EgressTagCloser*> _egressTagClosers;
};
diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp
index f4f05bbc538..03cf7d2a1e9 100644
--- a/src/mongo/executor/network_interface_integration_test.cpp
+++ b/src/mongo/executor/network_interface_integration_test.cpp
@@ -167,14 +167,14 @@ public:
RemoteCommandResponse response;
};
IsMasterData waitForIsMaster() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_isMasterCond.wait(lk, [this] { return _isMasterResult != boost::none; });
return std::move(*_isMasterResult);
}
bool hasIsMaster() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isMasterResult != boost::none;
}
@@ -186,7 +186,7 @@ private:
Status validateHost(const HostAndPort& host,
const BSONObj& request,
const RemoteCommandResponse& isMasterReply) override {
- stdx::lock_guard<stdx::mutex> lk(_parent->_mutex);
+ stdx::lock_guard<Latch> lk(_parent->_mutex);
_parent->_isMasterResult = IsMasterData{request, isMasterReply};
_parent->_isMasterCond.notify_all();
return Status::OK();
@@ -204,7 +204,7 @@ private:
NetworkInterfaceTest* _parent;
};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTest::_mutex");
stdx::condition_variable _isMasterCond;
boost::optional<IsMasterData> _isMasterResult;
};
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index 86e1144b81e..478d72c4b39 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -58,14 +58,14 @@ NetworkInterfaceMock::NetworkInterfaceMock()
_executorNextWakeupDate(Date_t::max()) {}
NetworkInterfaceMock::~NetworkInterfaceMock() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(!_hasStarted || inShutdown());
invariant(_scheduled.empty());
invariant(_blackHoled.empty());
}
void NetworkInterfaceMock::logQueues() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
const std::vector<std::pair<std::string, const NetworkOperationList*>> queues{
{"unscheduled", &_unscheduled},
{"scheduled", &_scheduled},
@@ -85,7 +85,7 @@ void NetworkInterfaceMock::logQueues() {
}
std::string NetworkInterfaceMock::getDiagnosticString() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
return str::stream() << "NetworkInterfaceMock -- waitingToRunMask:" << _waitingToRunMask
<< ", now:" << _now_inlock().toString() << ", hasStarted:" << _hasStarted
<< ", inShutdown: " << _inShutdown.load()
@@ -96,7 +96,7 @@ std::string NetworkInterfaceMock::getDiagnosticString() {
}
Date_t NetworkInterfaceMock::now() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _now_inlock();
}
@@ -112,7 +112,7 @@ Status NetworkInterfaceMock::startCommand(const CallbackHandle& cbHandle,
return {ErrorCodes::ShutdownInProgress, "NetworkInterfaceMock shutdown in progress"};
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const Date_t now = _now_inlock();
auto op = NetworkOperation(cbHandle, request, now, std::move(onFinish));
@@ -132,7 +132,7 @@ Status NetworkInterfaceMock::startCommand(const CallbackHandle& cbHandle,
void NetworkInterfaceMock::setHandshakeReplyForHost(
const mongo::HostAndPort& host, mongo::executor::RemoteCommandResponse&& reply) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto it = _handshakeReplies.find(host);
if (it == std::end(_handshakeReplies)) {
auto res = _handshakeReplies.emplace(host, std::move(reply));
@@ -145,7 +145,7 @@ void NetworkInterfaceMock::setHandshakeReplyForHost(
void NetworkInterfaceMock::cancelCommand(const CallbackHandle& cbHandle, const BatonHandle& baton) {
invariant(!inShutdown());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ResponseStatus rs(ErrorCodes::CallbackCanceled, "Network operation canceled", Milliseconds(0));
// We mimic the real NetworkInterface by only delivering the CallbackCanceled status if the
@@ -179,7 +179,7 @@ Status NetworkInterfaceMock::setAlarm(const TaskExecutor::CallbackHandle& cbHand
return {ErrorCodes::ShutdownInProgress, "NetworkInterfaceMock shutdown in progress"};
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (when <= _now_inlock()) {
lk.unlock();
@@ -210,7 +210,7 @@ bool NetworkInterfaceMock::onNetworkThread() {
}
void NetworkInterfaceMock::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_startup_inlock();
}
@@ -225,7 +225,7 @@ void NetworkInterfaceMock::_startup_inlock() {
void NetworkInterfaceMock::shutdown() {
invariant(!inShutdown());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_hasStarted) {
_startup_inlock();
}
@@ -258,7 +258,7 @@ bool NetworkInterfaceMock::inShutdown() const {
}
void NetworkInterfaceMock::enterNetwork() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!_isNetworkThreadRunnable_inlock()) {
_shouldWakeNetworkCondition.wait(lk);
}
@@ -267,7 +267,7 @@ void NetworkInterfaceMock::enterNetwork() {
}
void NetworkInterfaceMock::exitNetwork() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_currentlyRunning != kNetworkThread) {
return;
}
@@ -279,7 +279,7 @@ void NetworkInterfaceMock::exitNetwork() {
}
bool NetworkInterfaceMock::hasReadyRequests() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
return _hasReadyRequests_inlock();
}
@@ -294,7 +294,7 @@ bool NetworkInterfaceMock::_hasReadyRequests_inlock() {
}
NetworkInterfaceMock::NetworkOperationIterator NetworkInterfaceMock::getNextReadyRequest() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
while (!_hasReadyRequests_inlock()) {
_waitingToRunMask |= kExecutorThread;
@@ -311,7 +311,7 @@ NetworkInterfaceMock::NetworkOperationIterator NetworkInterfaceMock::getFrontOfU
NetworkInterfaceMock::NetworkOperationIterator NetworkInterfaceMock::getNthUnscheduledRequest(
size_t n) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(_hasReadyRequests_inlock());
@@ -325,7 +325,7 @@ NetworkInterfaceMock::NetworkOperationIterator NetworkInterfaceMock::getNthUnsch
void NetworkInterfaceMock::scheduleResponse(NetworkOperationIterator noi,
Date_t when,
const ResponseStatus& response) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
NetworkOperationIterator insertBefore = _scheduled.begin();
while ((insertBefore != _scheduled.end()) && (insertBefore->getResponseDate() <= when)) {
@@ -388,13 +388,13 @@ RemoteCommandRequest NetworkInterfaceMock::scheduleErrorResponse(NetworkOperatio
}
void NetworkInterfaceMock::blackHole(NetworkOperationIterator noi) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
_blackHoled.splice(_blackHoled.end(), _processing, noi);
}
void NetworkInterfaceMock::requeueAt(NetworkOperationIterator noi, Date_t dontAskUntil) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(noi->getNextConsiderationDate() < dontAskUntil);
invariant(_now_inlock() < dontAskUntil);
@@ -409,7 +409,7 @@ void NetworkInterfaceMock::requeueAt(NetworkOperationIterator noi, Date_t dontAs
}
Date_t NetworkInterfaceMock::runUntil(Date_t until) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(until > _now_inlock());
while (until > _now_inlock()) {
@@ -436,7 +436,7 @@ Date_t NetworkInterfaceMock::runUntil(Date_t until) {
}
void NetworkInterfaceMock::advanceTime(Date_t newTime) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
invariant(newTime > _now_inlock());
_now = newTime;
@@ -446,19 +446,19 @@ void NetworkInterfaceMock::advanceTime(Date_t newTime) {
}
void NetworkInterfaceMock::runReadyNetworkOperations() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kNetworkThread);
_runReadyNetworkOperations_inlock(&lk);
}
void NetworkInterfaceMock::waitForWork() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kExecutorThread);
_waitForWork_inlock(&lk);
}
void NetworkInterfaceMock::waitForWorkUntil(Date_t when) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_currentlyRunning == kExecutorThread);
_executorNextWakeupDate = when;
if (_executorNextWakeupDate <= _now_inlock()) {
@@ -538,7 +538,7 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
// The completion handler for the postconnect command schedules the original command.
auto postconnectCompletionHandler =
[this, op = std::move(op)](TaskExecutor::ResponseOnAnyStatus rs) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!rs.isOK()) {
op.setResponse(_now_inlock(), rs);
op.finishResponse();
@@ -566,7 +566,7 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
}
void NetworkInterfaceMock::setConnectionHook(std::unique_ptr<NetworkConnectionHook> hook) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_hasStarted);
invariant(!_hook);
_hook = std::move(hook);
@@ -574,21 +574,21 @@ void NetworkInterfaceMock::setConnectionHook(std::unique_ptr<NetworkConnectionHo
void NetworkInterfaceMock::setEgressMetadataHook(
std::unique_ptr<rpc::EgressMetadataHook> metadataHook) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_hasStarted);
invariant(!_metadataHook);
_metadataHook = std::move(metadataHook);
}
void NetworkInterfaceMock::signalWorkAvailable() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_waitingToRunMask |= kExecutorThread;
if (_currentlyRunning == kNoThread) {
_shouldWakeExecutorCondition.notify_one();
}
}
-void NetworkInterfaceMock::_runReadyNetworkOperations_inlock(stdx::unique_lock<stdx::mutex>* lk) {
+void NetworkInterfaceMock::_runReadyNetworkOperations_inlock(stdx::unique_lock<Latch>* lk) {
while (!_alarms.empty() && _now_inlock() >= _alarms.top().when) {
auto& alarm = _alarms.top();
@@ -626,7 +626,7 @@ void NetworkInterfaceMock::_runReadyNetworkOperations_inlock(stdx::unique_lock<s
_waitingToRunMask &= ~kNetworkThread;
}
-void NetworkInterfaceMock::_waitForWork_inlock(stdx::unique_lock<stdx::mutex>* lk) {
+void NetworkInterfaceMock::_waitForWork_inlock(stdx::unique_lock<Latch>* lk) {
if (_waitingToRunMask & kExecutorThread) {
_waitingToRunMask &= ~kExecutorThread;
return;
diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h
index 8fe6cdb3414..8a5d69b5ad9 100644
--- a/src/mongo/executor/network_interface_mock.h
+++ b/src/mongo/executor/network_interface_mock.h
@@ -36,9 +36,9 @@
#include <vector>
#include "mongo/executor/network_interface.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/metadata_hook.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/stdx/unordered_set.h"
#include "mongo/util/clock_source.h"
@@ -324,7 +324,7 @@ private:
/**
* Implementation of waitForWork*.
*/
- void _waitForWork_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _waitForWork_inlock(stdx::unique_lock<Latch>* lk);
/**
* Returns true if there are ready requests for the network thread to service.
@@ -356,12 +356,12 @@ private:
* reaquire "lk" several times, but will not return until the executor has blocked
* in waitFor*.
*/
- void _runReadyNetworkOperations_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _runReadyNetworkOperations_inlock(stdx::unique_lock<Latch>* lk);
// Mutex that synchronizes access to mutable data in this class and its subclasses.
// Fields guarded by the mutex are labled (M), below, and those that are read-only
// in multi-threaded execution, and so unsynchronized, are labeled (R).
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceMock::_mutex");
// Condition signaled to indicate that the network processing thread should wake up.
stdx::condition_variable _shouldWakeNetworkCondition; // (M)
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index 205caa22a2a..2f625301dfe 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -66,7 +66,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
auto server = fixture.getServers()[0];
std::atomic<int> remainingOps(operations); // NOLINT
- stdx::mutex mtx;
+ auto mtx = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
Timer t;
@@ -81,7 +81,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
if (--remainingOps) {
return func();
}
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
cv.notify_one();
};
@@ -93,7 +93,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
func();
- stdx::unique_lock<stdx::mutex> lk(mtx);
+ stdx::unique_lock<Latch> lk(mtx);
cv.wait(lk, [&] { return remainingOps.load() == 0; });
return t.millis();
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index 787bd0a6dac..f40a298aea1 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -49,7 +49,7 @@ NetworkInterfaceThreadPool::~NetworkInterfaceThreadPool() {
void NetworkInterfaceThreadPool::_dtorImpl() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_tasks.empty())
return;
@@ -63,7 +63,7 @@ void NetworkInterfaceThreadPool::_dtorImpl() {
}
void NetworkInterfaceThreadPool::startup() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_started) {
severe() << "Attempting to start pool, but it has already started";
fassertFailed(34358);
@@ -75,7 +75,7 @@ void NetworkInterfaceThreadPool::startup() {
void NetworkInterfaceThreadPool::shutdown() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
@@ -84,7 +84,7 @@ void NetworkInterfaceThreadPool::shutdown() {
void NetworkInterfaceThreadPool::join() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_joining) {
severe() << "Attempted to join pool more than once";
@@ -100,13 +100,13 @@ void NetworkInterfaceThreadPool::join() {
_net->signalWorkAvailable();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_joiningCondition.wait(
lk, [&] { return _tasks.empty() && (_consumeState == ConsumeState::kNeutral); });
}
void NetworkInterfaceThreadPool::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
task({ErrorCodes::ShutdownInProgress, "Shutdown in progress"});
@@ -127,7 +127,7 @@ void NetworkInterfaceThreadPool::schedule(Task task) {
* allows us to use the network interface's threads as our own pool, which should reduce context
* switches if our tasks are getting scheduled by network interface tasks.
*/
-void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk) {
+void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<Latch> lk) {
if ((_consumeState != ConsumeState::kNeutral) || _tasks.empty())
return;
@@ -140,7 +140,7 @@ void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk
_consumeState = ConsumeState::kScheduled;
lk.unlock();
auto ret = _net->schedule([this](Status status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_consumeState != ConsumeState::kScheduled)
return;
@@ -149,7 +149,7 @@ void NetworkInterfaceThreadPool::_consumeTasks(stdx::unique_lock<stdx::mutex> lk
invariant(ret.isOK() || ErrorCodes::isShutdownError(ret.code()));
}
-void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<stdx::mutex> lk) noexcept {
+void NetworkInterfaceThreadPool::_consumeTasksInline(stdx::unique_lock<Latch> lk) noexcept {
_consumeState = ConsumeState::kConsuming;
const auto consumingTasksGuard = makeGuard([&] { _consumeState = ConsumeState::kNeutral; });
diff --git a/src/mongo/executor/network_interface_thread_pool.h b/src/mongo/executor/network_interface_thread_pool.h
index 51771393032..946519b56f1 100644
--- a/src/mongo/executor/network_interface_thread_pool.h
+++ b/src/mongo/executor/network_interface_thread_pool.h
@@ -32,8 +32,8 @@
#include <cstdint>
#include <vector>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
namespace mongo {
@@ -60,14 +60,14 @@ public:
void schedule(Task task) override;
private:
- void _consumeTasks(stdx::unique_lock<stdx::mutex> lk);
- void _consumeTasksInline(stdx::unique_lock<stdx::mutex> lk) noexcept;
+ void _consumeTasks(stdx::unique_lock<Latch> lk);
+ void _consumeTasksInline(stdx::unique_lock<Latch> lk) noexcept;
void _dtorImpl();
NetworkInterface* const _net;
// Protects all of the pool state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceThreadPool::_mutex");
stdx::condition_variable _joiningCondition;
std::vector<Task> _tasks;
bool _started = false;
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index bcd1672e50e..e932130b005 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -65,7 +65,7 @@ std::string NetworkInterfaceTL::getDiagnosticString() {
void NetworkInterfaceTL::appendConnectionStats(ConnectionPoolStats* stats) const {
auto pool = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _pool.get();
}();
if (pool)
@@ -74,7 +74,7 @@ void NetworkInterfaceTL::appendConnectionStats(ConnectionPoolStats* stats) const
NetworkInterface::Counters NetworkInterfaceTL::getCounters() const {
invariant(getTestCommandsEnabled());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _counters;
}
@@ -83,7 +83,7 @@ std::string NetworkInterfaceTL::getHostName() {
}
void NetworkInterfaceTL::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_svcCtx) {
_tl = _svcCtx->getTransportLayer();
}
@@ -144,19 +144,19 @@ bool NetworkInterfaceTL::inShutdown() const {
}
void NetworkInterfaceTL::waitForWork() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_workReadyCond.wait(lk, [this] { return _isExecutorRunnable; });
}
void NetworkInterfaceTL::waitForWorkUntil(Date_t when) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
MONGO_IDLE_THREAD_BLOCK;
_workReadyCond.wait_until(lk, when.toSystemTimePoint(), [this] { return _isExecutorRunnable; });
}
void NetworkInterfaceTL::signalWorkAvailable() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_isExecutorRunnable) {
_isExecutorRunnable = true;
_workReadyCond.notify_one();
@@ -401,7 +401,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_counters.timedOut++;
}
@@ -449,7 +449,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (swr.isOK() && swr.getValue().status.isOK()) {
_counters.succeeded++;
} else {
@@ -467,7 +467,7 @@ void NetworkInterfaceTL::_onAcquireConn(std::shared_ptr<CommandState> state,
void NetworkInterfaceTL::cancelCommand(const TaskExecutor::CallbackHandle& cbHandle,
const BatonHandle& baton) {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
auto it = _inProgress.find(cbHandle);
if (it == _inProgress.end()) {
return;
@@ -485,7 +485,7 @@ void NetworkInterfaceTL::cancelCommand(const TaskExecutor::CallbackHandle& cbHan
}
if (getTestCommandsEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_counters.canceled++;
}
@@ -528,7 +528,7 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
std::make_shared<AlarmState>(when, cbHandle, _reactor->makeTimer(), std::move(pf.promise));
{
- stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
+ stdx::lock_guard<Latch> lk(_inProgressMutex);
// If a user has already scheduled an alarm with a handle, make sure they intentionally
// override it by canceling and setting a new one.
@@ -546,7 +546,7 @@ Status NetworkInterfaceTL::setAlarm(const TaskExecutor::CallbackHandle& cbHandle
}
void NetworkInterfaceTL::cancelAlarm(const TaskExecutor::CallbackHandle& cbHandle) {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
auto iter = _inProgressAlarms.find(cbHandle);
@@ -566,7 +566,7 @@ void NetworkInterfaceTL::cancelAlarm(const TaskExecutor::CallbackHandle& cbHandl
void NetworkInterfaceTL::_cancelAllAlarms() {
auto alarms = [&] {
- stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);
+ stdx::unique_lock<Latch> lk(_inProgressMutex);
return std::exchange(_inProgressAlarms, {});
}();
@@ -599,7 +599,7 @@ void NetworkInterfaceTL::_answerAlarm(Status status, std::shared_ptr<AlarmState>
// Erase the AlarmState from the map.
{
- stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
+ stdx::lock_guard<Latch> lk(_inProgressMutex);
auto iter = _inProgressAlarms.find(state->cbHandle);
if (iter == _inProgressAlarms.end()) {
diff --git a/src/mongo/executor/network_interface_tl.h b/src/mongo/executor/network_interface_tl.h
index 15fdf391876..ee27fdd410f 100644
--- a/src/mongo/executor/network_interface_tl.h
+++ b/src/mongo/executor/network_interface_tl.h
@@ -147,7 +147,7 @@ private:
std::unique_ptr<transport::TransportLayer> _ownedTransportLayer;
transport::ReactorHandle _reactor;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTL::_mutex");
ConnectionPool::Options _connPoolOpts;
std::unique_ptr<NetworkConnectionHook> _onConnectHook;
std::shared_ptr<ConnectionPool> _pool;
@@ -165,7 +165,7 @@ private:
AtomicWord<State> _state;
stdx::thread _ioThread;
- stdx::mutex _inProgressMutex;
+ Mutex _inProgressMutex = MONGO_MAKE_LATCH("NetworkInterfaceTL::_inProgressMutex");
stdx::unordered_map<TaskExecutor::CallbackHandle, std::weak_ptr<CommandState>> _inProgress;
stdx::unordered_map<TaskExecutor::CallbackHandle, std::shared_ptr<AlarmState>>
_inProgressAlarms;
diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp
index e7e2f1bd5ae..057ff4aa971 100644
--- a/src/mongo/executor/scoped_task_executor.cpp
+++ b/src/mongo/executor/scoped_task_executor.cpp
@@ -226,7 +226,7 @@ private:
[id, work = std::forward<Work>(work), self = shared_from_this()](const auto& cargs) {
using ArgsT = std::decay_t<decltype(cargs)>;
- stdx::unique_lock<stdx::mutex> lk(self->_mutex);
+ stdx::unique_lock<Latch> lk(self->_mutex);
auto doWorkAndNotify = [&](const ArgsT& x) noexcept {
lk.unlock();
@@ -301,7 +301,7 @@ private:
}
}
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ScopedTaskExecutor::_mutex");
bool _inShutdown = false;
std::shared_ptr<TaskExecutor> _executor;
size_t _id = 0;
diff --git a/src/mongo/executor/scoped_task_executor.h b/src/mongo/executor/scoped_task_executor.h
index dc166606115..bcdd49e4151 100644
--- a/src/mongo/executor/scoped_task_executor.h
+++ b/src/mongo/executor/scoped_task_executor.h
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/fail_point_service.h"
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index d36f5c9bac6..f84321a46b7 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -38,7 +38,7 @@
#include "mongo/base/string_data.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/transport/baton.h"
#include "mongo/util/future.h"
#include "mongo/util/out_of_line_executor.h"
diff --git a/src/mongo/executor/thread_pool_mock.cpp b/src/mongo/executor/thread_pool_mock.cpp
index 191537cebff..fb809990e49 100644
--- a/src/mongo/executor/thread_pool_mock.cpp
+++ b/src/mongo/executor/thread_pool_mock.cpp
@@ -43,7 +43,7 @@ ThreadPoolMock::ThreadPoolMock(NetworkInterfaceMock* net, int32_t prngSeed, Opti
: _options(std::move(options)), _prng(prngSeed), _net(net) {}
ThreadPoolMock::~ThreadPoolMock() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_joining)
return;
@@ -53,13 +53,13 @@ ThreadPoolMock::~ThreadPoolMock() {
void ThreadPoolMock::startup() {
LOG(1) << "Starting pool";
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_started);
invariant(!_worker.joinable());
_started = true;
_worker = stdx::thread([this] {
_options.onCreateThread();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
LOG(1) << "Starting to consume tasks";
while (!_joining) {
@@ -77,17 +77,17 @@ void ThreadPoolMock::startup() {
}
void ThreadPoolMock::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown(lk);
}
void ThreadPoolMock::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_join(lk);
}
void ThreadPoolMock::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
@@ -98,7 +98,7 @@ void ThreadPoolMock::schedule(Task task) {
_tasks.emplace_back(std::move(task));
}
-void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<Latch>& lk) {
auto next = static_cast<size_t>(_prng.nextInt64(static_cast<int64_t>(_tasks.size())));
if (next + 1 != _tasks.size()) {
std::swap(_tasks[next], _tasks.back());
@@ -114,14 +114,14 @@ void ThreadPoolMock::_consumeOneTask(stdx::unique_lock<stdx::mutex>& lk) {
lk.lock();
}
-void ThreadPoolMock::_shutdown(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_shutdown(stdx::unique_lock<Latch>& lk) {
LOG(1) << "Shutting down pool";
_inShutdown = true;
_net->signalWorkAvailable();
}
-void ThreadPoolMock::_join(stdx::unique_lock<stdx::mutex>& lk) {
+void ThreadPoolMock::_join(stdx::unique_lock<Latch>& lk) {
LOG(1) << "Joining pool";
_joining = true;
diff --git a/src/mongo/executor/thread_pool_mock.h b/src/mongo/executor/thread_pool_mock.h
index e1f8e30a80f..d81f83dfb4c 100644
--- a/src/mongo/executor/thread_pool_mock.h
+++ b/src/mongo/executor/thread_pool_mock.h
@@ -33,8 +33,8 @@
#include <functional>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
@@ -73,14 +73,14 @@ public:
void schedule(Task task) override;
private:
- void _consumeOneTask(stdx::unique_lock<stdx::mutex>& lk);
- void _shutdown(stdx::unique_lock<stdx::mutex>& lk);
- void _join(stdx::unique_lock<stdx::mutex>& lk);
+ void _consumeOneTask(stdx::unique_lock<Latch>& lk);
+ void _shutdown(stdx::unique_lock<Latch>& lk);
+ void _join(stdx::unique_lock<Latch>& lk);
// These are the options with which the pool was configured at construction time.
const Options _options;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ThreadPoolMock::_mutex");
stdx::thread _worker;
std::vector<Task> _tasks;
PseudoRandom _prng;
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 55adfc29984..84153871128 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -140,20 +140,20 @@ ThreadPoolTaskExecutor::ThreadPoolTaskExecutor(std::unique_ptr<ThreadPoolInterfa
ThreadPoolTaskExecutor::~ThreadPoolTaskExecutor() {
shutdown();
- auto lk = _join(stdx::unique_lock<stdx::mutex>(_mutex));
+ auto lk = _join(stdx::unique_lock<Latch>(_mutex));
invariant(_state == shutdownComplete);
}
void ThreadPoolTaskExecutor::startup() {
_net->startup();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == preStart);
_setState_inlock(running);
_pool->startup();
}
void ThreadPoolTaskExecutor::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
invariant(_networkInProgressQueue.empty());
invariant(_sleepersQueue.empty());
@@ -176,10 +176,10 @@ void ThreadPoolTaskExecutor::shutdown() {
}
void ThreadPoolTaskExecutor::join() {
- _join(stdx::unique_lock<stdx::mutex>(_mutex));
+ _join(stdx::unique_lock<Latch>(_mutex));
}
-stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<stdx::mutex> lk) {
+stdx::unique_lock<Latch> ThreadPoolTaskExecutor::_join(stdx::unique_lock<Latch> lk) {
_stateChange.wait(lk, [this] {
// All tasks are spliced into the _poolInProgressQueue immediately after we accept them.
// This occurs in scheduleIntoPool_inlock.
@@ -223,7 +223,7 @@ stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<s
EventHandle event;
setEventForHandle(&event, std::move(eventState));
signalEvent_inlock(event, std::move(lk));
- lk = stdx::unique_lock<stdx::mutex>(_mutex);
+ lk = stdx::unique_lock<Latch>(_mutex);
}
lk.unlock();
_net->shutdown();
@@ -237,7 +237,7 @@ stdx::unique_lock<stdx::mutex> ThreadPoolTaskExecutor::_join(stdx::unique_lock<s
}
void ThreadPoolTaskExecutor::appendDiagnosticBSON(BSONObjBuilder* b) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// ThreadPool details
// TODO: fill in
@@ -264,7 +264,7 @@ StatusWith<TaskExecutor::EventHandle> ThreadPoolTaskExecutor::makeEvent() {
auto el = makeSingletonEventList();
EventHandle event;
setEventForHandle(&event, el.front());
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return {ErrorCodes::ShutdownInProgress, "Shutdown in progress"};
}
@@ -273,7 +273,7 @@ StatusWith<TaskExecutor::EventHandle> ThreadPoolTaskExecutor::makeEvent() {
}
void ThreadPoolTaskExecutor::signalEvent(const EventHandle& event) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
signalEvent_inlock(event, std::move(lk));
}
@@ -284,7 +284,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::onEvent(const E
}
// Unsure if we'll succeed yet, so pass an empty CallbackFn.
auto wq = makeSingletonWorkQueue({}, nullptr);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
auto cbHandle = enqueueCallbackState_inlock(&eventState->waiters, &wq);
if (!cbHandle.isOK()) {
@@ -304,7 +304,7 @@ StatusWith<stdx::cv_status> ThreadPoolTaskExecutor::waitForEvent(OperationContex
invariant(opCtx);
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// std::condition_variable::wait() can wake up spuriously, so we have to loop until the event
// is signalled or we time out.
@@ -323,7 +323,7 @@ StatusWith<stdx::cv_status> ThreadPoolTaskExecutor::waitForEvent(OperationContex
void ThreadPoolTaskExecutor::waitForEvent(const EventHandle& event) {
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (!eventState->isSignaledFlag) {
eventState->isSignaledCondition.wait(lk);
@@ -334,7 +334,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWork(Ca
// Unsure if we'll succeed yet, so pass an empty CallbackFn.
auto wq = makeSingletonWorkQueue({}, nullptr);
WorkQueue temp;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto cbHandle = enqueueCallbackState_inlock(&temp, &wq);
if (!cbHandle.isOK()) {
return cbHandle;
@@ -352,7 +352,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
}
auto wq = makeSingletonWorkQueue(std::move(work), nullptr, when);
wq.front()->isTimerOperation = true;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto cbHandle = enqueueCallbackState_inlock(&_sleepersQueue, &wq);
if (!cbHandle.isOK()) {
return cbHandle;
@@ -366,7 +366,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
}
auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (cbState->canceled.load()) {
return;
}
@@ -455,7 +455,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
},
baton);
wq.front()->isNetworkOperation = true;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto swCbHandle = enqueueCallbackState_inlock(&_networkInProgressQueue, &wq);
if (!swCbHandle.isOK())
return swCbHandle;
@@ -471,7 +471,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) {
remoteCommandFinished(cbData, cb, scheduledRequest, response);
};
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return;
}
@@ -491,7 +491,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
void ThreadPoolTaskExecutor::cancel(const CallbackHandle& cbHandle) {
invariant(cbHandle.isValid());
auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock()) {
return;
}
@@ -527,7 +527,7 @@ void ThreadPoolTaskExecutor::wait(const CallbackHandle& cbHandle, Interruptible*
if (cbState->isFinished.load()) {
return;
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!cbState->finishedCondition) {
cbState->finishedCondition.emplace();
}
@@ -569,7 +569,7 @@ ThreadPoolTaskExecutor::EventList ThreadPoolTaskExecutor::makeSingletonEventList
}
void ThreadPoolTaskExecutor::signalEvent_inlock(const EventHandle& event,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
invariant(event.isValid());
auto eventState = checked_cast<EventState*>(getEventFromHandle(event));
invariant(!eventState->isSignaledFlag);
@@ -580,20 +580,20 @@ void ThreadPoolTaskExecutor::signalEvent_inlock(const EventHandle& event,
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
scheduleIntoPool_inlock(fromQueue, fromQueue->begin(), fromQueue->end(), std::move(lk));
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& iter,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
scheduleIntoPool_inlock(fromQueue, iter, std::next(iter), std::move(lk));
}
void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& begin,
const WorkQueue::iterator& end,
- stdx::unique_lock<stdx::mutex> lk) {
+ stdx::unique_lock<Latch> lk) {
dassert(fromQueue != &_poolInProgressQueue);
std::vector<std::shared_ptr<CallbackState>> todo(begin, end);
_poolInProgressQueue.splice(_poolInProgressQueue.end(), *fromQueue, begin, end);
@@ -626,7 +626,7 @@ void ThreadPoolTaskExecutor::scheduleIntoPool_inlock(WorkQueue* fromQueue,
} else {
_pool->schedule([this, cbState](auto status) {
if (ErrorCodes::isCancelationError(status.code())) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
cbState->canceled.store(1);
} else {
@@ -659,7 +659,7 @@ void ThreadPoolTaskExecutor::runCallback(std::shared_ptr<CallbackState> cbStateA
callback(std::move(args));
}
cbStateArg->isFinished.store(true);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_poolInProgressQueue.erase(cbStateArg->iter);
if (cbStateArg->finishedCondition) {
cbStateArg->finishedCondition->notify_all();
diff --git a/src/mongo/executor/thread_pool_task_executor.h b/src/mongo/executor/thread_pool_task_executor.h
index 9106f596069..35dca3ce6b7 100644
--- a/src/mongo/executor/thread_pool_task_executor.h
+++ b/src/mongo/executor/thread_pool_task_executor.h
@@ -33,8 +33,8 @@
#include <memory>
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/baton.h"
#include "mongo/util/fail_point_service.h"
@@ -149,13 +149,13 @@ private:
/**
* Signals the given event.
*/
- void signalEvent_inlock(const EventHandle& event, stdx::unique_lock<stdx::mutex> lk);
+ void signalEvent_inlock(const EventHandle& event, stdx::unique_lock<Latch> lk);
/**
* Schedules all items from "fromQueue" into the thread pool and moves them into
* _poolInProgressQueue.
*/
- void scheduleIntoPool_inlock(WorkQueue* fromQueue, stdx::unique_lock<stdx::mutex> lk);
+ void scheduleIntoPool_inlock(WorkQueue* fromQueue, stdx::unique_lock<Latch> lk);
/**
* Schedules the given item from "fromQueue" into the thread pool and moves it into
@@ -163,7 +163,7 @@ private:
*/
void scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& iter,
- stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> lk);
/**
* Schedules entries from "begin" through "end" in "fromQueue" into the thread pool
@@ -172,7 +172,7 @@ private:
void scheduleIntoPool_inlock(WorkQueue* fromQueue,
const WorkQueue::iterator& begin,
const WorkQueue::iterator& end,
- stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> lk);
/**
* Executes the callback specified by "cbState".
@@ -181,7 +181,7 @@ private:
bool _inShutdown_inlock() const;
void _setState_inlock(State newState);
- stdx::unique_lock<stdx::mutex> _join(stdx::unique_lock<stdx::mutex> lk);
+ stdx::unique_lock<Latch> _join(stdx::unique_lock<Latch> lk);
// The network interface used for remote command execution and waiting.
std::shared_ptr<NetworkInterface> _net;
@@ -190,7 +190,7 @@ private:
std::shared_ptr<ThreadPoolInterface> _pool;
// Mutex guarding all remaining fields.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ThreadPoolTaskExecutor::_mutex");
// Queue containing all items currently scheduled into the thread pool but not yet completed.
WorkQueue _poolInProgressQueue;
diff --git a/src/mongo/idl/mutable_observer_registry.h b/src/mongo/idl/mutable_observer_registry.h
index 89ceaf2b2fb..2dfc0241f34 100644
--- a/src/mongo/idl/mutable_observer_registry.h
+++ b/src/mongo/idl/mutable_observer_registry.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/functional.h"
namespace mongo {
@@ -66,7 +66,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MutableObserverRegistry::_mutex");
std::vector<unique_function<void(const T&)>> _registry;
};
diff --git a/src/mongo/logger/console.cpp b/src/mongo/logger/console.cpp
index ebf8049ac71..3646076b245 100644
--- a/src/mongo/logger/console.cpp
+++ b/src/mongo/logger/console.cpp
@@ -44,7 +44,7 @@ namespace mongo {
namespace {
stdx::mutex& consoleMutex() {
- static stdx::mutex instance;
+ static stdx::mutex instance; // NOLINT
return instance;
}
diff --git a/src/mongo/logger/console.h b/src/mongo/logger/console.h
index 7becb076ef7..ecc6b6556ab 100644
--- a/src/mongo/logger/console.h
+++ b/src/mongo/logger/console.h
@@ -31,7 +31,7 @@
#include <iosfwd>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
diff --git a/src/mongo/logger/log_component_settings.cpp b/src/mongo/logger/log_component_settings.cpp
index f1da736be2c..6adba4da495 100644
--- a/src/mongo/logger/log_component_settings.cpp
+++ b/src/mongo/logger/log_component_settings.cpp
@@ -61,7 +61,7 @@ LogSeverity LogComponentSettings::getMinimumLogSeverity(LogComponent component)
void LogComponentSettings::setMinimumLoggedSeverity(LogComponent component, LogSeverity severity) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
_setMinimumLoggedSeverityInLock(component, severity);
}
@@ -99,7 +99,7 @@ void LogComponentSettings::_setMinimumLoggedSeverityInLock(LogComponent componen
void LogComponentSettings::clearMinimumLoggedSeverity(LogComponent component) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
// LogComponent::kDefault must always be configured.
if (component == LogComponent::kDefault) {
diff --git a/src/mongo/logger/log_component_settings.h b/src/mongo/logger/log_component_settings.h
index 23440129d54..3b85f174b31 100644
--- a/src/mongo/logger/log_component_settings.h
+++ b/src/mongo/logger/log_component_settings.h
@@ -32,7 +32,7 @@
#include "mongo/logger/log_component.h"
#include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logger {
@@ -87,7 +87,7 @@ private:
// A mutex to synchronize writes to the severity arrays. This mutex is to synchronize changes to
// the entire array, and the atomics are to synchronize individual elements.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("LogComponentSettings::_mtx");
// True if a log severity is explicitly set for a component.
// This differentiates between unconfigured components and components that happen to have
diff --git a/src/mongo/logger/log_severity_limiter.h b/src/mongo/logger/log_severity_limiter.h
index 689de448926..027393c2adf 100644
--- a/src/mongo/logger/log_severity_limiter.h
+++ b/src/mongo/logger/log_severity_limiter.h
@@ -31,7 +31,7 @@
#include "mongo/logger/log_severity.h"
#include "mongo/logger/logstream_builder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/time_support.h"
@@ -79,7 +79,7 @@ public:
LogSeverity nextFor(const KeyT& key) {
auto now = Date_t::now();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto& cutoff = _cutoffByKey[key];
if (now > cutoff) {
@@ -97,7 +97,7 @@ private:
LogSeverity _limitedLogSeverity;
LogSeverity _normalLogSeverity;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("LogSeverityLimiter::_mutex");
stdx::unordered_map<KeyT, Date_t> _cutoffByKey;
};
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index 67e1fe4b89b..77dca9dd48c 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -170,7 +170,7 @@ Status RamLogAppender::append(const logger::MessageEventEphemeral& event) {
RamLog* RamLog::get(const std::string& name) {
if (!_namedLock) {
// Guaranteed to happen before multi-threaded operation.
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
}
stdx::lock_guard<stdx::mutex> lk(*_namedLock);
@@ -215,7 +215,7 @@ MONGO_INITIALIZER(RamLogCatalog)(InitializerContext*) {
return Status(ErrorCodes::InternalError,
"Inconsistent intiailization of RamLogCatalog.");
}
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
_named = new RM();
}
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index 306dc36bff4..7a3ff8cdaaa 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -39,7 +39,7 @@
#include "mongo/logger/appender.h"
#include "mongo/logger/message_event.h"
#include "mongo/logger/tee.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -115,7 +115,7 @@ private:
const char* getLine_inlock(unsigned lineNumber) const;
- stdx::mutex _mutex; // Guards all non-static data.
+ stdx::mutex _mutex; // Guards all non-static data. // NOLINT
char lines[N][C];
unsigned h; // current position
unsigned n; // number of lines stores 0 o N
diff --git a/src/mongo/logger/rotatable_file_writer.h b/src/mongo/logger/rotatable_file_writer.h
index 83fe4716c9a..ab6fe578281 100644
--- a/src/mongo/logger/rotatable_file_writer.h
+++ b/src/mongo/logger/rotatable_file_writer.h
@@ -34,7 +34,7 @@
#include <string>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logger {
@@ -118,7 +118,7 @@ public:
Status _openFileStream(bool append);
RotatableFileWriter* _writer;
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
};
/**
@@ -128,7 +128,7 @@ public:
private:
friend class RotatableFileWriter::Use;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RotatableFileWriter::_mutex");
std::string _fileName;
std::unique_ptr<std::ostream> _stream;
};
diff --git a/src/mongo/logv2/console.h b/src/mongo/logv2/console.h
index 5e8abc00521..061ad77cf05 100644
--- a/src/mongo/logv2/console.h
+++ b/src/mongo/logv2/console.h
@@ -31,7 +31,7 @@
#include <iosfwd>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logv2 {
diff --git a/src/mongo/logv2/log_component_settings.cpp b/src/mongo/logv2/log_component_settings.cpp
index 0003dfcbe8d..c85d33c8487 100644
--- a/src/mongo/logv2/log_component_settings.cpp
+++ b/src/mongo/logv2/log_component_settings.cpp
@@ -61,7 +61,7 @@ LogSeverity LogComponentSettings::getMinimumLogSeverity(LogComponent component)
void LogComponentSettings::setMinimumLoggedSeverity(LogComponent component, LogSeverity severity) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
_setMinimumLoggedSeverityInLock(component, severity);
}
@@ -99,7 +99,7 @@ void LogComponentSettings::_setMinimumLoggedSeverityInLock(LogComponent componen
void LogComponentSettings::clearMinimumLoggedSeverity(LogComponent component) {
dassert(int(component) >= 0 && int(component) < LogComponent::kNumLogComponents);
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
// LogComponent::kDefault must always be configured.
if (component == LogComponent::kDefault) {
diff --git a/src/mongo/logv2/log_component_settings.h b/src/mongo/logv2/log_component_settings.h
index c9422786132..72e0b00358c 100644
--- a/src/mongo/logv2/log_component_settings.h
+++ b/src/mongo/logv2/log_component_settings.h
@@ -32,7 +32,7 @@
#include "mongo/logv2/log_component.h"
#include "mongo/logv2/log_severity.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace logv2 {
@@ -87,7 +87,7 @@ private:
// A mutex to synchronize writes to the severity arrays. This mutex is to synchronize changes to
// the entire array, and the atomics are to synchronize individual elements.
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("LogComponentSettings::_mtx");
// True if a log severity is explicitly set for a component.
// This differentiates between unconfigured components and components that happen to have
diff --git a/src/mongo/logv2/logv2_bm.cpp b/src/mongo/logv2/logv2_bm.cpp
index 73958db80f8..1b29b84164d 100644
--- a/src/mongo/logv2/logv2_bm.cpp
+++ b/src/mongo/logv2/logv2_bm.cpp
@@ -61,13 +61,13 @@ boost::shared_ptr<std::ostream> makeNullStream() {
// ConsoleAppender can be benchmarked.
class StringstreamConsole {
public:
- stdx::mutex& mutex() {
- static stdx::mutex instance;
+ Mutex& mutex() {
+ static auto instance = MONGO_MAKE_LATCH();
return instance;
}
StringstreamConsole() {
- stdx::unique_lock<stdx::mutex> lk(mutex());
+ stdx::unique_lock<Latch> lk(mutex());
lk.swap(_consoleLock);
_out = makeNullStream();
}
@@ -78,7 +78,7 @@ public:
private:
boost::shared_ptr<std::ostream> _out;
- stdx::unique_lock<stdx::mutex> _consoleLock;
+ stdx::unique_lock<Latch> _consoleLock;
};
// RAII style helper class for init/deinit log system
diff --git a/src/mongo/logv2/ramlog.cpp b/src/mongo/logv2/ramlog.cpp
index b14a93b87ca..38076ef1481 100644
--- a/src/mongo/logv2/ramlog.cpp
+++ b/src/mongo/logv2/ramlog.cpp
@@ -43,7 +43,7 @@ using std::string;
namespace {
typedef std::map<string, RamLog*> RM;
-stdx::mutex* _namedLock = NULL;
+stdx::mutex* _namedLock = NULL; // NOLINT
RM* _named = NULL;
} // namespace
@@ -156,7 +156,7 @@ RamLog::LineIterator::LineIterator(RamLog* ramlog)
RamLog* RamLog::get(const std::string& name) {
if (!_namedLock) {
// Guaranteed to happen before multi-threaded operation.
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
}
stdx::lock_guard<stdx::mutex> lk(*_namedLock);
@@ -201,7 +201,7 @@ MONGO_INITIALIZER(RamLogCatalogV2)(InitializerContext*) {
return Status(ErrorCodes::InternalError,
"Inconsistent intiailization of RamLogCatalog.");
}
- _namedLock = new stdx::mutex();
+ _namedLock = new stdx::mutex(); // NOLINT
_named = new RM();
}
diff --git a/src/mongo/logv2/ramlog.h b/src/mongo/logv2/ramlog.h
index 183464f1c5a..5feba166044 100644
--- a/src/mongo/logv2/ramlog.h
+++ b/src/mongo/logv2/ramlog.h
@@ -36,7 +36,7 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -113,7 +113,8 @@ private:
const char* getLine_inlock(unsigned lineNumber) const;
- stdx::mutex _mutex; // Guards all non-static data.
+ // Guards all non-static data.
+ stdx::mutex _mutex; // NOLINT
char lines[N][C];
unsigned h; // current position
unsigned n; // number of lines stores 0 o N
diff --git a/src/mongo/platform/condition_variable.cpp b/src/mongo/platform/condition_variable.cpp
index 19abb58b8f8..ef6e64aaff7 100644
--- a/src/mongo/platform/condition_variable.cpp
+++ b/src/mongo/platform/condition_variable.cpp
@@ -27,6 +27,8 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/platform/condition_variable.h"
namespace mongo {
diff --git a/src/mongo/platform/condition_variable.h b/src/mongo/platform/condition_variable.h
index 885e4b82ccd..beca4f4dda9 100644
--- a/src/mongo/platform/condition_variable.h
+++ b/src/mongo/platform/condition_variable.h
@@ -28,10 +28,12 @@
*/
#pragma once
-#include "mongo/platform/basic.h"
+
+#include <chrono>
#include "mongo/platform/mutex.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/util/duration.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -46,8 +48,6 @@ public:
};
class ConditionVariable {
- friend class ::mongo::Waitable;
-
public:
static constexpr Milliseconds kUnfulfilledConditionVariableTimeout = Milliseconds(100);
@@ -79,8 +79,10 @@ public:
protected:
template <typename Callback>
- void _runWithNotifyable(Notifyable& notifyable, Callback&& cb) noexcept {
- _condvar._runWithNotifyable(notifyable, cb);
+ friend void runWithNotifyable(ConditionVariable& cv,
+ Notifyable& notifyable,
+ Callback&& cb) noexcept {
+ runWithNotifyable(cv._condvar, notifyable, std::forward<Callback>(cb));
}
private:
@@ -105,7 +107,7 @@ private:
}
if (_conditionVariableActions) {
- if constexpr (std::is_same<decltype(lock), Mutex>::value) {
+ if constexpr (std::is_same<decltype(lock), mongo::Mutex>::value) {
_conditionVariableActions->onUnfulfilledConditionVariable(lock.getName());
} else {
_conditionVariableActions->onUnfulfilledConditionVariable("AnonymousLock");
diff --git a/src/mongo/platform/condition_variable_test.cpp b/src/mongo/platform/condition_variable_test.cpp
index 88e7a40c617..69631e233f2 100644
--- a/src/mongo/platform/condition_variable_test.cpp
+++ b/src/mongo/platform/condition_variable_test.cpp
@@ -39,11 +39,11 @@ namespace mongo {
TEST(ConditionVariable, BasicSingleThread) {
unittest::Barrier barrier(2U);
ConditionVariable cv;
- Mutex m;
+ stdx::mutex m; // NOLINT
bool done = false;
stdx::thread worker([&]() {
- stdx::unique_lock<Mutex> lk(m);
+ stdx::unique_lock<stdx::mutex> lk(m);
barrier.countDownAndWait();
ASSERT(!done);
cv.wait(lk, [&] { return done; });
@@ -52,7 +52,7 @@ TEST(ConditionVariable, BasicSingleThread) {
barrier.countDownAndWait();
{
- stdx::unique_lock<Mutex> lk(m);
+ stdx::unique_lock<stdx::mutex> lk(m);
done = true;
}
cv.notify_one();
diff --git a/src/mongo/platform/mutex.cpp b/src/mongo/platform/mutex.cpp
index 49ececfd25e..ba7230f8e94 100644
--- a/src/mongo/platform/mutex.cpp
+++ b/src/mongo/platform/mutex.cpp
@@ -31,23 +31,19 @@
namespace mongo {
-namespace {
-std::unique_ptr<LockActions> gLockActions;
-}
-
void Mutex::lock() {
- auto hasLock = _mutex.try_lock_for(kContendedLockTimeout.toSystemDuration());
+ auto hasLock = _mutex.try_lock();
if (hasLock) {
return;
}
- if (gLockActions) {
- gLockActions->onContendedLock(_name);
+ if (auto actions = LockActions::getState().actions.load()) {
+ actions->onContendedLock(_name);
}
_mutex.lock();
}
void Mutex::unlock() {
- if (gLockActions) {
- gLockActions->onUnlock(_name);
+ if (auto actions = LockActions::getState().actions.load()) {
+ actions->onUnlock(_name);
}
_mutex.unlock();
}
@@ -55,8 +51,8 @@ bool Mutex::try_lock() {
return _mutex.try_lock();
}
-void Mutex::setLockActions(std::unique_ptr<LockActions> actions) {
- gLockActions = std::move(actions);
+void Mutex::LockActions::add(LockActions* actions) {
+ getState().actions.store(actions);
}
} // namespace mongo
diff --git a/src/mongo/platform/mutex.h b/src/mongo/platform/mutex.h
index ded61a90783..fe72b201d3a 100644
--- a/src/mongo/platform/mutex.h
+++ b/src/mongo/platform/mutex.h
@@ -33,40 +33,80 @@
#include "mongo/base/error_codes.h"
#include "mongo/base/string_data.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/util/clock_source_mock.h"
+#include "mongo/util/duration.h"
namespace mongo {
-class LockActions {
+class Latch {
public:
- virtual ~LockActions() = default;
- virtual void onContendedLock(const StringData& name) = 0;
- virtual void onUnlock(const StringData& name) = 0;
+ virtual ~Latch() = default;
+
+ virtual void lock() = 0;
+ virtual void unlock() = 0;
+ virtual bool try_lock() = 0;
};
-class Mutex {
+class Mutex : public Latch {
public:
+ class LockActions;
static constexpr auto kAnonymousMutexStr = "AnonymousMutex"_sd;
- static constexpr Milliseconds kContendedLockTimeout = Milliseconds(100);
Mutex() : Mutex(kAnonymousMutexStr) {}
// Note that StringData is a view type, thus the underlying string for _name must outlive any
// given Mutex
explicit Mutex(const StringData& name) : _name(name) {}
- void lock();
- void unlock();
- bool try_lock();
+ void lock() override;
+ void unlock() override;
+ bool try_lock() override;
const StringData& getName() const {
return _name;
}
- static void setLockActions(std::unique_ptr<LockActions> actions);
-
private:
const StringData _name;
- stdx::timed_mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
+};
+
+/**
+ * A set of actions to happen upon notable events on a Lockable-conceptualized type
+ */
+class Mutex::LockActions {
+ friend class Mutex;
+
+public:
+ virtual ~LockActions() = default;
+ /**
+ * Action to do when a lock cannot be immediately acquired
+ */
+ virtual void onContendedLock(const StringData& name) = 0;
+
+ /**
+ * Action to do when a lock is unlocked
+ */
+ virtual void onUnlock(const StringData& name) = 0;
+
+ /**
+ * This function adds a LockActions subclass to the triggers for certain actions.
+ *
+ * Note that currently there is only one LockActions in use at a time. As part of SERVER-42895,
+ * this will change so that there is a list of LockActions maintained.
+ *
+ * LockActions can only be added and not removed. If you wish to deactivate a LockActions
+ * subclass, please provide the switch on that subclass to noop its functions.
+ */
+ static void add(LockActions* actions);
+
+private:
+ static auto& getState() {
+ struct State {
+ AtomicWord<LockActions*> actions{nullptr};
+ };
+ static State state;
+ return state;
+ }
};
} // namespace mongo
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 9dacc96a258..ef7e27aa6a9 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -96,7 +96,7 @@ BalancerConfiguration::BalancerConfiguration()
BalancerConfiguration::~BalancerConfiguration() = default;
BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getMode();
}
@@ -148,7 +148,7 @@ Status BalancerConfiguration::enableAutoSplit(OperationContext* opCtx, bool enab
}
bool BalancerConfiguration::shouldBalance() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff ||
_balancerSettings.getMode() == BalancerSettingsType::kAutoSplitOnly) {
return false;
@@ -158,7 +158,7 @@ bool BalancerConfiguration::shouldBalance() const {
}
bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
if (_balancerSettings.getMode() == BalancerSettingsType::kOff) {
return false;
}
@@ -167,12 +167,12 @@ bool BalancerConfiguration::shouldBalanceForAutoSplit() const {
}
MigrationSecondaryThrottleOptions BalancerConfiguration::getSecondaryThrottle() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.getSecondaryThrottle();
}
bool BalancerConfiguration::waitForDelete() const {
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
return _balancerSettings.waitForDelete();
}
@@ -214,7 +214,7 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx)
return settingsObjStatus.getStatus();
}
- stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
+ stdx::lock_guard<Latch> lk(_balancerSettingsMutex);
_balancerSettings = std::move(settings);
return Status::OK();
diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h
index 7bea190a61e..10b174e43e0 100644
--- a/src/mongo/s/balancer_configuration.h
+++ b/src/mongo/s/balancer_configuration.h
@@ -34,8 +34,8 @@
#include <cstdint>
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -283,7 +283,8 @@ private:
Status _refreshAutoSplitSettings(OperationContext* opCtx);
// The latest read balancer settings and a mutex to protect its swaps
- mutable stdx::mutex _balancerSettingsMutex;
+ mutable Mutex _balancerSettingsMutex =
+ MONGO_MAKE_LATCH("BalancerConfiguration::_balancerSettingsMutex");
BalancerSettingsType _balancerSettings;
// Max chunk size after which a chunk would be considered jumbo and won't be moved. This value
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index 5dae286da5a..1a22526aa14 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -132,7 +132,7 @@ StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* opCtx,
GetPingFunc checkerFunc = noGetPingSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getPingReturnValue;
checkerFunc = _getPingChecker;
}
@@ -146,7 +146,7 @@ Status DistLockCatalogMock::ping(OperationContext* opCtx, StringData processID,
PingFunc checkerFunc = noPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _pingReturnValue;
checkerFunc = _pingChecker;
}
@@ -167,7 +167,7 @@ StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* opCtx,
GrabLockFunc checkerFunc = noGrabLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _grabLockReturnValue;
checkerFunc = _grabLockChecker;
}
@@ -188,7 +188,7 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
OvertakeLockFunc checkerFunc = noOvertakeLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _overtakeLockReturnValue;
checkerFunc = _overtakeLockChecker;
}
@@ -202,7 +202,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessi
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -218,7 +218,7 @@ Status DistLockCatalogMock::unlock(OperationContext* opCtx,
UnlockFunc checkerFunc = noUnLockFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _unlockReturnValue;
checkerFunc = _unlockChecker;
}
@@ -234,7 +234,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(
GetServerInfoFunc checkerFunc = noGetServerInfoSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getServerInfoReturnValue;
checkerFunc = _getServerInfoChecker;
}
@@ -249,7 +249,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* opCtx,
GetLockByTSFunc checkerFunc = noGetLockByTSSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByTSReturnValue;
checkerFunc = _getLockByTSChecker;
}
@@ -263,7 +263,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* opCtx
GetLockByNameFunc checkerFunc = noGetLockByNameSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _getLockByNameReturnValue;
checkerFunc = _getLockByNameChecker;
}
@@ -277,7 +277,7 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
StopPingFunc checkerFunc = noStopPingFuncSet;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
ret = _stopPingReturnValue;
checkerFunc = _stopPingChecker;
}
@@ -288,67 +288,67 @@ Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData process
void DistLockCatalogMock::expectGrabLock(DistLockCatalogMock::GrabLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = checkerFunc;
_grabLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectNoGrabLock() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_grabLockChecker = noGrabLockFuncSet;
_grabLockReturnValue = kLocksTypeBadRetValue;
}
void DistLockCatalogMock::expectUnLock(DistLockCatalogMock::UnlockFunc checkerFunc,
Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_unlockChecker = checkerFunc;
_unlockReturnValue = returnThis;
}
void DistLockCatalogMock::expectPing(DistLockCatalogMock::PingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pingChecker = checkerFunc;
_pingReturnValue = returnThis;
}
void DistLockCatalogMock::expectStopPing(StopPingFunc checkerFunc, Status returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_stopPingChecker = checkerFunc;
_stopPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByTS(GetLockByTSFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByTSChecker = checkerFunc;
_getLockByTSReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetLockByName(GetLockByNameFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getLockByNameChecker = checkerFunc;
_getLockByNameReturnValue = returnThis;
}
void DistLockCatalogMock::expectOvertakeLock(OvertakeLockFunc checkerFunc,
StatusWith<LocksType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_overtakeLockChecker = checkerFunc;
_overtakeLockReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetPing(GetPingFunc checkerFunc,
StatusWith<LockpingsType> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getPingChecker = checkerFunc;
_getPingReturnValue = returnThis;
}
void DistLockCatalogMock::expectGetServerInfo(GetServerInfoFunc checkerFunc,
StatusWith<DistLockCatalog::ServerInfo> returnThis) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_getServerInfoChecker = checkerFunc;
_getServerInfoReturnValue = returnThis;
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index faae634b09e..1eab733dc85 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -32,10 +32,10 @@
#include <functional>
#include "mongo/base/status_with.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -191,7 +191,7 @@ public:
private:
// Protects all the member variables.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("DistLockCatalogMock::_mutex");
GrabLockFunc _grabLockChecker;
StatusWith<LocksType> _grabLockReturnValue;
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index 860b464939d..7a422fea7ff 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -94,7 +94,7 @@ void ReplSetDistLockManager::startUp() {
void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_isShutDown = true;
_shutDownCV.notify_all();
}
@@ -118,7 +118,7 @@ std::string ReplSetDistLockManager::getProcessID() {
}
bool ReplSetDistLockManager::isShutDown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _isShutDown;
}
@@ -147,7 +147,7 @@ void ReplSetDistLockManager::doTask() {
std::deque<std::pair<DistLockHandle, boost::optional<std::string>>> toUnlockBatch;
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
toUnlockBatch.swap(_unlockList);
}
@@ -179,7 +179,7 @@ void ReplSetDistLockManager::doTask() {
}
MONGO_IDLE_THREAD_BLOCK;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutDownCV.wait_for(lk, _pingInterval.toSystemDuration(), [this] { return _isShutDown; });
}
}
@@ -222,7 +222,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
const auto& serverInfo = serverInfoStatus.getValue();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto pingIter = _pingHistory.find(lockDoc.getName());
if (pingIter == _pingHistory.end()) {
@@ -504,7 +504,7 @@ Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
const boost::optional<std::string>& name) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_unlockList.push_back(std::make_pair(lockSessionID, name));
}
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset_dist_lock_manager.h
index 1814bd96677..38176244f33 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.h
@@ -34,12 +34,12 @@
#include <string>
#include "mongo/base/string_data.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/dist_lock_ping_info.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
@@ -132,7 +132,7 @@ private:
const Milliseconds _pingInterval; // (I)
const Milliseconds _lockExpiration; // (I)
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ReplSetDistLockManager::_mutex");
std::unique_ptr<stdx::thread> _execThread; // (S)
// Contains the list of locks queued for unlocking. Cases when unlock operation can
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index eac536aca42..89a420f50d5 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -36,6 +36,8 @@
#include <vector>
#include "mongo/bson/json.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
#include "mongo/s/catalog/replset_dist_lock_manager.h"
@@ -44,8 +46,6 @@
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/system_tick_source.h"
#include "mongo/util/tick_source_mock.h"
@@ -413,7 +413,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectGetLockByName([](StringData) {},
{ErrorCodes::LockNotFound, "not found!"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
OID unlockSessionIDPassed;
int unlockCallCount = 0;
@@ -421,7 +421,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -435,7 +435,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -558,7 +558,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
},
{ErrorCodes::ExceededMemoryLimit, "bad remote server"});
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
int unlockCallCount = 0;
OID unlockSessionIDPassed;
@@ -566,7 +566,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
getMockCatalog()->expectUnLock(
[&unlockMutex, &unlockCV, &unlockCallCount, &unlockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockCallCount++;
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
@@ -580,7 +580,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (unlockCallCount == 0) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -609,13 +609,13 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
* 3. Check that correct process is being pinged.
*/
TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable ping3TimesCV;
std::vector<std::string> processIDList;
getMockCatalog()->expectPing(
[&testMutex, &ping3TimesCV, &processIDList](StringData processIDArg, Date_t ping) {
- stdx::lock_guard<stdx::mutex> lk(testMutex);
+ stdx::lock_guard<Latch> lk(testMutex);
processIDList.push_back(processIDArg.toString());
if (processIDList.size() >= 3) {
@@ -626,7 +626,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (processIDList.size() < 3) {
didTimeout = ping3TimesCV.wait_for(lk, kJoinTimeout.toSystemDuration()) ==
stdx::cv_status::timeout;
@@ -659,7 +659,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
std::vector<OID> lockSessionIDPassed;
@@ -667,13 +667,13 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
if (lockSessionIDPassed.size() >= kUnlockErrorCount) {
getMockCatalog()->expectUnLock(
[&lockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
lockSessionIDPassed.push_back(lockSessionID);
unlockCV.notify_all();
},
@@ -705,7 +705,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (lockSessionIDPassed.size() < kUnlockErrorCount) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
@@ -739,7 +739,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
* 5. Check that the lock session id used when lock was called matches with unlock.
*/
TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
- stdx::mutex testMutex;
+ auto testMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
std::vector<OID> lockSessionIDPassed;
std::map<OID, int> unlockIDMap; // id -> count
@@ -761,14 +761,14 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
getMockCatalog()->expectUnLock(
[this, &unlockIDMap, &testMutex, &unlockCV, &mapEntriesGreaterThanTwo](
const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockIDMap[lockSessionID]++;
// Wait until we see at least 2 unique lockSessionID more than twice.
if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
getMockCatalog()->expectUnLock(
[&testMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
unlockCV.notify_all();
},
Status::OK());
@@ -792,7 +792,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
StringData processId,
Date_t time,
StringData why) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
lockSessionIDPassed.push_back(lockSessionIDArg);
},
retLockDoc);
@@ -804,7 +804,7 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(testMutex);
+ stdx::unique_lock<Latch> lk(testMutex);
if (unlockIDMap.size() < 2 || !mapEntriesGreaterThanTwo(unlockIDMap)) {
didTimeout =
@@ -1739,11 +1739,11 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
OID unlockSessionIDPassed;
- stdx::mutex unlockMutex;
+ auto unlockMutex = MONGO_MAKE_LATCH();
stdx::condition_variable unlockCV;
getMockCatalog()->expectUnLock(
[&unlockSessionIDPassed, &unlockMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
unlockSessionIDPassed = lockSessionID;
unlockCV.notify_all();
},
@@ -1756,7 +1756,7 @@ TEST_F(ReplSetDistLockManagerFixture, LockOvertakingResultsInError) {
bool didTimeout = false;
{
- stdx::unique_lock<stdx::mutex> lk(unlockMutex);
+ stdx::unique_lock<Latch> lk(unlockMutex);
if (!unlockSessionIDPassed.isSet()) {
didTimeout =
unlockCV.wait_for(lk, kJoinTimeout.toSystemDuration()) == stdx::cv_status::timeout;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index a73251cd98c..ddb160f3ceb 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -144,7 +144,7 @@ ShardingCatalogClientImpl::ShardingCatalogClientImpl(
ShardingCatalogClientImpl::~ShardingCatalogClientImpl() = default;
void ShardingCatalogClientImpl::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_started) {
return;
}
@@ -156,7 +156,7 @@ void ShardingCatalogClientImpl::startup() {
void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 3d1446e4805..269305ae323 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -31,9 +31,9 @@
#include "mongo/client/connection_string.h"
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -216,7 +216,7 @@ private:
// (R) Read only, can only be written during initialization.
//
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingCatalogClientImpl::_mutex");
// Distributed lock manager singleton.
std::unique_ptr<DistLockManager> _distLockManager; // (R)
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 4a3d3c00849..918e2cb1f2e 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -127,7 +127,7 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx
"SERVER-37398.");
try {
while (true) {
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
auto& dbEntry = _databases[dbName];
if (!dbEntry) {
@@ -217,7 +217,7 @@ CatalogCache::RefreshResult CatalogCache::_getCollectionRoutingInfoAt(
const auto dbInfo = std::move(swDbInfo.getValue());
- stdx::unique_lock<stdx::mutex> ul(_mutex);
+ stdx::unique_lock<Latch> ul(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -312,7 +312,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutin
void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const DatabaseVersion& databaseVersion) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
@@ -345,7 +345,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
// We received StaleShardVersion for a collection we thought was sharded. Either a migration
// occurred to or from a shard we contacted, or the collection was dropped.
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto nss = ccri._cm->getns();
const auto itDb = _collectionsByDb.find(nss.db());
@@ -369,7 +369,7 @@ void CatalogCache::onStaleShardVersion(CachedCollectionRoutingInfo&& ccriToInval
void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
ChunkVersion targetCollectionVersion) const {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
uassert(StaleConfigInfo(nss, targetCollectionVersion, boost::none),
str::stream() << "could not act as router for " << nss.ns()
@@ -397,7 +397,7 @@ void CatalogCache::checkEpochOrThrow(const NamespaceString& nss,
}
void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDbEntry = _databases.find(dbName);
if (itDbEntry == _databases.end()) {
// The database was dropped.
@@ -407,7 +407,7 @@ void CatalogCache::invalidateDatabaseEntry(const StringData dbName) {
}
void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -421,7 +421,7 @@ void CatalogCache::invalidateShardedCollection(const NamespaceString& nss) {
}
void CatalogCache::invalidateEntriesThatReferenceShard(const ShardId& shardId) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
log() << "Starting to invalidate databases and collections with data on shard: " << shardId;
@@ -461,7 +461,7 @@ void CatalogCache::invalidateEntriesThatReferenceShard(const ShardId& shardId) {
}
void CatalogCache::purgeCollection(const NamespaceString& nss) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
@@ -472,13 +472,13 @@ void CatalogCache::purgeCollection(const NamespaceString& nss) {
}
void CatalogCache::purgeDatabase(StringData dbName) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.erase(dbName);
_collectionsByDb.erase(dbName);
}
void CatalogCache::purgeAllDatabases() {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_databases.clear();
_collectionsByDb.clear();
}
@@ -489,7 +489,7 @@ void CatalogCache::report(BSONObjBuilder* builder) const {
size_t numDatabaseEntries;
size_t numCollectionEntries{0};
{
- stdx::lock_guard<stdx::mutex> ul(_mutex);
+ stdx::lock_guard<Latch> ul(_mutex);
numDatabaseEntries = _databases.size();
for (const auto& entry : _collectionsByDb) {
numCollectionEntries += entry.second.size();
@@ -546,7 +546,7 @@ void CatalogCache::_scheduleDatabaseRefresh(WithLock lk,
const auto refreshCallback = [ this, dbName, dbEntry, onRefreshFailed, onRefreshCompleted ](
OperationContext * opCtx, StatusWith<DatabaseType> swDbt) noexcept {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (!swDbt.isOK()) {
onRefreshFailed(lg, swDbt.getStatus());
@@ -657,12 +657,12 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
onRefreshCompleted(Status::OK(), newRoutingInfo.get());
} catch (const DBException& ex) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
onRefreshFailed(lg, ex.toStatus());
return;
}
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
collEntry->needsRefresh = false;
collEntry->refreshCompletionNotification->set(Status::OK());
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index a087f02802c..fe2f1f60400 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -33,12 +33,12 @@
#include "mongo/base/string_data.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/database_version_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/string_map.h"
@@ -394,7 +394,7 @@ private:
using CollectionsByDbMap = StringMap<CollectionInfoMap>;
// Mutex to serialize access to the structures below
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("CatalogCache::_mutex");
// Map from DB name to the info for that database
DatabaseInfoMap _databases;
diff --git a/src/mongo/s/chunk_writes_tracker.cpp b/src/mongo/s/chunk_writes_tracker.cpp
index 807c526532d..abb20746650 100644
--- a/src/mongo/s/chunk_writes_tracker.cpp
+++ b/src/mongo/s/chunk_writes_tracker.cpp
@@ -52,7 +52,7 @@ bool ChunkWritesTracker::shouldSplit(uint64_t maxChunkSize) {
}
bool ChunkWritesTracker::acquireSplitLock() {
- stdx::lock_guard<stdx::mutex> lk(_mtx);
+ stdx::lock_guard<Latch> lk(_mtx);
if (!_isLockedForSplitting) {
_isLockedForSplitting = true;
diff --git a/src/mongo/s/chunk_writes_tracker.h b/src/mongo/s/chunk_writes_tracker.h
index 141879375c4..85309c5b205 100644
--- a/src/mongo/s/chunk_writes_tracker.h
+++ b/src/mongo/s/chunk_writes_tracker.h
@@ -30,7 +30,7 @@
#pragma once
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -89,7 +89,7 @@ private:
/**
* Protects _splitState when starting a split.
*/
- stdx::mutex _mtx;
+ Mutex _mtx = MONGO_MAKE_LATCH("ChunkWritesTracker::_mtx");
/**
* Whether or not a current split is in progress for this chunk.
diff --git a/src/mongo/s/client/rs_local_client.cpp b/src/mongo/s/client/rs_local_client.cpp
index fdb386a3bcf..e1ec4917f8f 100644
--- a/src/mongo/s/client/rs_local_client.cpp
+++ b/src/mongo/s/client/rs_local_client.cpp
@@ -56,7 +56,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
return;
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (lastOpTimeFromClient >= _lastOpTime) {
// It's always possible for lastOpTimeFromClient to be less than _lastOpTime if another
// thread started and completed a write through this ShardLocal (updating _lastOpTime)
@@ -66,7 +66,7 @@ void RSLocalClient::_updateLastOpTimeFromClient(OperationContext* opCtx,
}
repl::OpTime RSLocalClient::_getLastOpTime() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _lastOpTime;
}
diff --git a/src/mongo/s/client/rs_local_client.h b/src/mongo/s/client/rs_local_client.h
index 7bba5c7eaa0..7dabd19d454 100644
--- a/src/mongo/s/client/rs_local_client.h
+++ b/src/mongo/s/client/rs_local_client.h
@@ -30,8 +30,8 @@
#pragma once
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ private:
repl::OpTime _getLastOpTime();
// Guards _lastOpTime below.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("RSLocalClient::_mutex");
// Stores the optime that was generated by the last operation to perform a write that was run
// through _runCommand. Used in _exhaustiveFindOnConfig for waiting for that optime to be
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index 2cc44c44a1f..1e56594331d 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -63,19 +63,19 @@ class ClientConnections;
class ActiveClientConnections {
public:
void add(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.insert(cc);
}
void remove(const ClientConnections* cc) {
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
_clientConnections.erase(cc);
}
void appendInfo(BSONObjBuilder* b) const;
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ActiveClientConnections::_mutex");
std::set<const ClientConnections*> _clientConnections;
} activeClientConnections;
@@ -331,7 +331,7 @@ void ActiveClientConnections::appendInfo(BSONObjBuilder* b) const {
BSONArrayBuilder arr(64 * 1024);
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (const auto* conn : _clientConnections) {
BSONObjBuilder bb(arr.subobjStart());
conn->appendInfo(bb);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 2c92c46e33e..95b3a726eff 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -50,13 +50,13 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/rpc/metadata/egress_metadata_hook_list.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/grid.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
@@ -202,12 +202,12 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
newConnString.type() == ConnectionString::CUSTOM); // For dbtests
// to prevent update config shard connection string during init
- stdx::unique_lock<stdx::mutex> lock(_reloadMutex);
+ stdx::unique_lock<Latch> lock(_reloadMutex);
_data.rebuildShardIfExists(newConnString, _shardFactory.get());
}
void ShardRegistry::init() {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
auto configShard =
_shardFactory->createShard(ShardRegistry::kConfigServerShardId, _initConfigServerCS);
@@ -282,12 +282,12 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
bool ShardRegistry::isUp() const {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
return _isUp;
}
bool ShardRegistry::reload(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
+ stdx::unique_lock<Latch> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
// Another thread is already in the process of reloading so no need to do duplicate work.
@@ -444,7 +444,7 @@ ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shar
}
void ShardRegistryData::swap(ShardRegistryData& other) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_lookup.swap(other._lookup);
_rsLookup.swap(other._rsLookup);
_hostLookup.swap(other._hostLookup);
@@ -452,29 +452,29 @@ void ShardRegistryData::swap(ShardRegistryData& other) {
}
shared_ptr<Shard> ShardRegistryData::getConfigShard() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configShard;
}
void ShardRegistryData::addConfigShard(std::shared_ptr<Shard> shard) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_configShard = shard;
_addShard(lk, shard, true);
}
shared_ptr<Shard> ShardRegistryData::findByRSName(const string& name) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto i = _rsLookup.find(name);
return (i != _rsLookup.end()) ? i->second : nullptr;
}
shared_ptr<Shard> ShardRegistryData::findByHostAndPort(const HostAndPort& hostAndPort) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return mapFindWithDefault(_hostLookup, hostAndPort);
}
shared_ptr<Shard> ShardRegistryData::findByShardId(const ShardId& shardId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _findByShardId(lk, shardId);
}
@@ -487,7 +487,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
// Need to copy, then sort by shardId.
std::vector<std::pair<ShardId, std::string>> shards;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
shards.reserve(_lookup.size());
for (auto&& shard : _lookup) {
shards.emplace_back(shard.first, shard.second->getConnString().toString());
@@ -503,7 +503,7 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
}
void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
const auto& s = i->second;
if (s->getId().toString() == "config") {
@@ -514,7 +514,7 @@ void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
}
void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
invariant(i->second);
auto res = diff.find(i->second->getId());
@@ -526,7 +526,7 @@ void ShardRegistryData::shardIdSetDifference(std::set<ShardId>& diff) const {
void ShardRegistryData::rebuildShardIfExists(const ConnectionString& newConnString,
ShardFactory* factory) {
- stdx::unique_lock<stdx::mutex> updateConnStringLock(_mutex);
+ stdx::unique_lock<Latch> updateConnStringLock(_mutex);
auto it = _rsLookup.find(newConnString.getSetName());
if (it == _rsLookup.end()) {
return;
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index a5917c64413..22a08cef941 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -36,9 +36,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -114,7 +114,7 @@ private:
void _rebuildShard(WithLock, ConnectionString const& newConnString, ShardFactory* factory);
// Protects the lookup maps below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
using ShardMap = stdx::unordered_map<ShardId, std::shared_ptr<Shard>, ShardId::Hasher>;
@@ -302,7 +302,7 @@ private:
ShardRegistryData _data;
// Protects the _reloadState and _initConfigServerCS during startup.
- mutable stdx::mutex _reloadMutex;
+ mutable Mutex _reloadMutex = MONGO_MAKE_LATCH("ShardRegistry::_reloadMutex");
stdx::condition_variable _inReloadCV;
enum class ReloadState {
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 25f2e1959c0..4e684d8fab9 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -143,7 +143,7 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost,
}
void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
// A secondary may return a lastCommittedOpTime less than the latest seen so far.
if (lastCommittedOpTime > _lastCommittedOpTime) {
@@ -152,7 +152,7 @@ void ShardRemote::updateLastCommittedOpTime(LogicalTime lastCommittedOpTime) {
}
LogicalTime ShardRemote::getLastCommittedOpTime() const {
- stdx::lock_guard<stdx::mutex> lk(_lastCommittedOpTimeMutex);
+ stdx::lock_guard<Latch> lk(_lastCommittedOpTimeMutex);
return _lastCommittedOpTime;
}
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 3b19fd8ab0f..cf1b7b2d3f8 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -34,7 +34,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -133,7 +133,8 @@ private:
/**
* Protects _lastCommittedOpTime.
*/
- mutable stdx::mutex _lastCommittedOpTimeMutex;
+ mutable Mutex _lastCommittedOpTimeMutex =
+ MONGO_MAKE_LATCH("ShardRemote::_lastCommittedOpTimeMutex");
/**
* Logical time representing the latest opTime timestamp known to be in this shard's majority
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 1ffed487065..27907950ac4 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -63,14 +63,14 @@ namespace {
class ConnectionShardStatus {
public:
bool hasAnySequenceSet(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
return seenConnIt != _map.end() && seenConnIt->second.size() > 0;
}
bool getSequence(DBClientBase* conn, const string& ns, unsigned long long* sequence) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
if (seenConnIt == _map.end())
@@ -85,18 +85,18 @@ public:
}
void setSequence(DBClientBase* conn, const string& ns, const unsigned long long& s) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map[conn->getConnectionId()][ns] = s;
}
void reset(DBClientBase* conn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_map.erase(conn->getConnectionId());
}
private:
// protects _map
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConnectionShardStatus::_mutex");
// a map from a connection into ChunkManager's sequence number for each namespace
typedef map<unsigned long long, map<string, unsigned long long>> SequenceMap;
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index ed61976820a..1962272ca5d 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -56,14 +56,14 @@ ClusterIdentityLoader* ClusterIdentityLoader::get(OperationContext* operationCon
}
OID ClusterIdentityLoader::getClusterId() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
invariant(_initializationState == InitializationState::kInitialized && _lastLoadResult.isOK());
return _lastLoadResult.getValue();
}
Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_initializationState == InitializationState::kInitialized) {
invariant(_lastLoadResult.isOK());
return Status::OK();
@@ -105,7 +105,7 @@ StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
}
void ClusterIdentityLoader::discardCachedClusterId() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_initializationState == InitializationState::kUninitialized) {
return;
diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h
index b5ee563d253..6b6d394f9e1 100644
--- a/src/mongo/s/cluster_identity_loader.h
+++ b/src/mongo/s/cluster_identity_loader.h
@@ -33,8 +33,8 @@
#include "mongo/bson/oid.h"
#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -94,7 +94,7 @@ private:
StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ClusterIdentityLoader::_mutex");
stdx::condition_variable _inReloadCV;
// Used to ensure that only one thread at a time attempts to reload the cluster ID from the
diff --git a/src/mongo/s/cluster_last_error_info.cpp b/src/mongo/s/cluster_last_error_info.cpp
index 4dd79d95fb4..2fe697df461 100644
--- a/src/mongo/s/cluster_last_error_info.cpp
+++ b/src/mongo/s/cluster_last_error_info.cpp
@@ -40,12 +40,12 @@ const Client::Decoration<std::shared_ptr<ClusterLastErrorInfo>> ClusterLastError
Client::declareDecoration<std::shared_ptr<ClusterLastErrorInfo>>();
void ClusterLastErrorInfo::addShardHost(const std::string& shardHost) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->shardHostsWritten.insert(shardHost);
}
void ClusterLastErrorInfo::addHostOpTime(ConnectionString connStr, HostOpTime stat) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_cur->hostOpTimes[connStr] = stat;
}
@@ -56,13 +56,13 @@ void ClusterLastErrorInfo::addHostOpTimes(const HostOpTimeMap& hostOpTimes) {
}
void ClusterLastErrorInfo::newRequest() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
std::swap(_cur, _prev);
_cur->clear();
}
void ClusterLastErrorInfo::disableForCommand() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
RequestInfo* temp = _cur;
_cur = _prev;
_prev = temp;
diff --git a/src/mongo/s/cluster_last_error_info.h b/src/mongo/s/cluster_last_error_info.h
index 0cc07fa27ac..af13045099d 100644
--- a/src/mongo/s/cluster_last_error_info.h
+++ b/src/mongo/s/cluster_last_error_info.h
@@ -63,7 +63,7 @@ public:
* gets shards used on the previous request
*/
std::set<std::string>* getPrevShardHosts() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return &_prev->shardHostsWritten;
}
@@ -71,7 +71,7 @@ public:
* Gets the shards, hosts, and opTimes the client last wrote to with write commands.
*/
const HostOpTimeMap& getPrevHostOpTimes() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _prev->hostOpTimes;
}
@@ -89,7 +89,7 @@ private:
};
// Protects _infos, _cur, and _prev.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterLastErrorInfo::_mutex");
// We use 2 so we can flip for getLastError type operations.
RequestInfo _infos[2];
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index de8fb1a493f..c4a02e89514 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -160,7 +160,7 @@ void ConfigServerCatalogCacheLoader::onStepUp() {
void ConfigServerCatalogCacheLoader::shutDown() {
{
- stdx::lock_guard<stdx::mutex> lg(_mutex);
+ stdx::lock_guard<Latch> lg(_mutex);
if (_inShutdown) {
return;
}
diff --git a/src/mongo/s/config_server_catalog_cache_loader.h b/src/mongo/s/config_server_catalog_cache_loader.h
index 81c81240100..2da4fb9a8e9 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.h
+++ b/src/mongo/s/config_server_catalog_cache_loader.h
@@ -64,7 +64,7 @@ private:
ThreadPool _threadPool;
// Protects the class state below
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ConfigServerCatalogCacheLoader::_mutex");
// True if shutDown was called.
bool _inShutdown{false};
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 97e2ccef518..bda2bc6e929 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -96,12 +96,12 @@ void Grid::setShardingInitialized() {
}
Grid::CustomConnectionPoolStatsFn Grid::getCustomConnectionPoolStatsFn() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _customConnectionPoolStatsFn;
}
void Grid::setCustomConnectionPoolStatsFn(CustomConnectionPoolStatsFn statsFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_customConnectionPoolStatsFn || !statsFn);
_customConnectionPoolStatsFn = std::move(statsFn);
}
@@ -117,7 +117,7 @@ void Grid::setAllowLocalHost(bool allow) {
repl::OpTime Grid::configOpTime() const {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _configOpTime;
}
@@ -141,7 +141,7 @@ boost::optional<repl::OpTime> Grid::advanceConfigOpTime(OperationContext* opCtx,
boost::optional<repl::OpTime> Grid::_advanceConfigOpTime(const repl::OpTime& opTime) {
invariant(serverGlobalParams.clusterRole != ClusterRole::ConfigServer);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_configOpTime < opTime) {
repl::OpTime prev = _configOpTime;
_configOpTime = opTime;
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 2356b01b029..92b7a761f97 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -33,10 +33,10 @@
#include <memory>
#include "mongo/db/repl/optime.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -192,7 +192,7 @@ private:
AtomicWord<bool> _shardingInitialized{false};
// Protects _configOpTime.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Grid::_mutex");
// Last known highest opTime from the config server that should be used when doing reads.
// This value is updated any time a shard or mongos talks to a config server or a shard.
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index f96f9a635b6..bc89f6aa19a 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -115,12 +115,12 @@ AsyncResultsMerger::AsyncResultsMerger(OperationContext* opCtx,
}
AsyncResultsMerger::~AsyncResultsMerger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_remotesExhausted(lk) || _lifecycleState == kKillComplete);
}
bool AsyncResultsMerger::remotesExhausted() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _remotesExhausted(lk);
}
@@ -135,7 +135,7 @@ bool AsyncResultsMerger::_remotesExhausted(WithLock) const {
}
Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_tailableMode != TailableModeEnum::kTailableAndAwaitData) {
return Status(ErrorCodes::BadValue,
@@ -155,12 +155,12 @@ Status AsyncResultsMerger::setAwaitDataTimeout(Milliseconds awaitDataTimeout) {
}
bool AsyncResultsMerger::ready() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _ready(lk);
}
void AsyncResultsMerger::detachFromOperationContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_opCtx = nullptr;
// If we were about ready to return a boost::none because a tailable cursor reached the end of
// the batch, that should no longer apply to the next use - when we are reattached to a
@@ -170,13 +170,13 @@ void AsyncResultsMerger::detachFromOperationContext() {
}
void AsyncResultsMerger::reattachToOperationContext(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_opCtx);
_opCtx = opCtx;
}
void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCursors) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// Create a new entry in the '_remotes' list for each new shard, and add the first cursor batch
// to its buffer. This ensures the shard's initial high water mark is respected, if it exists.
for (auto&& remote : newCursors) {
@@ -189,7 +189,7 @@ void AsyncResultsMerger::addNewShardCursors(std::vector<RemoteCursor>&& newCurso
}
BSONObj AsyncResultsMerger::getHighWaterMark() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto minPromisedSortKey = _getMinPromisedSortKey(lk);
if (!minPromisedSortKey.isEmpty() && !_ready(lk)) {
_highWaterMark = minPromisedSortKey;
@@ -272,7 +272,7 @@ bool AsyncResultsMerger::_readyUnsorted(WithLock) {
}
StatusWith<ClusterQueryResult> AsyncResultsMerger::nextReady() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
dassert(_ready(lk));
if (_lifecycleState != kAlive) {
return Status(ErrorCodes::IllegalOperation, "AsyncResultsMerger killed");
@@ -400,7 +400,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
auto callbackStatus =
_executor->scheduleRemoteCommand(request, [this, remoteIndex](auto const& cbData) {
- stdx::lock_guard<stdx::mutex> lk(this->_mutex);
+ stdx::lock_guard<Latch> lk(this->_mutex);
this->_handleBatchResponse(lk, cbData, remoteIndex);
});
@@ -413,7 +413,7 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) {
}
Status AsyncResultsMerger::scheduleGetMores() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _scheduleGetMores(lk);
}
@@ -447,7 +447,7 @@ Status AsyncResultsMerger::_scheduleGetMores(WithLock lk) {
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_lifecycleState != kAlive) {
// Can't schedule further network operations if the ARM is being killed.
@@ -704,7 +704,7 @@ void AsyncResultsMerger::_scheduleKillCursors(WithLock, OperationContext* opCtx)
}
executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_killCompleteEvent.isValid()) {
invariant(_lifecycleState != kAlive);
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3cf357dca6b..e0e0f2e94c1 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
#include "mongo/s/query/cluster_query_result.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
@@ -451,7 +451,7 @@ private:
AsyncResultsMergerParams _params;
// Must be acquired before accessing any data members (other than _params, which is read-only).
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("AsyncResultsMerger::_mutex");
// Data tracking the state of our communication with each of the remote nodes.
std::vector<RemoteCursorData> _remotes;
diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp
index 5d07b0e2c75..c99aff31fcf 100644
--- a/src/mongo/s/query/blocking_results_merger_test.cpp
+++ b/src/mongo/s/query/blocking_results_merger_test.cpp
@@ -157,13 +157,13 @@ TEST_F(ResultsMergerTestFixture, ShouldBeAbleToBlockUntilNextResultIsReadyWithDe
future.default_timed_get();
// Used for synchronizing the background thread with this thread.
- stdx::mutex mutex;
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ auto mutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> lk(mutex);
// Issue a blocking wait for the next result asynchronously on a different thread.
future = launchAsync([&]() {
// Block until the main thread has responded to the getMore.
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
auto next = unittest::assertGet(blockingMerger.next(
operationContext(), RouterExecStage::ExecContext::kGetMoreNoResultsYet));
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index d1b39da34b1..03dfd1114f5 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -160,7 +160,7 @@ ClusterCursorManager::~ClusterCursorManager() {
void ClusterCursorManager::shutdown(OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_inShutdown = true;
}
killAllCursors(opCtx);
@@ -176,7 +176,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
// Read the clock out of the lock.
const auto now = _clockSource->now();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown) {
lk.unlock();
@@ -239,7 +239,7 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
OperationContext* opCtx,
AuthzCheckFn authChecker,
AuthCheck checkSessionAuth) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_inShutdown) {
return Status(ErrorCodes::ShutdownInProgress,
@@ -299,7 +299,7 @@ void ClusterCursorManager::checkInCursor(std::unique_ptr<ClusterClientCursor> cu
cursor->detachFromOperationContext();
cursor->setLastUseDate(now);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
invariant(entry);
@@ -324,7 +324,7 @@ Status ClusterCursorManager::checkAuthForKillCursors(OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId,
AuthzCheckFn authChecker) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -352,7 +352,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
CursorId cursorId) {
invariant(opCtx);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
CursorEntry* entry = _getEntry(lk, nss, cursorId);
if (!entry) {
@@ -376,7 +376,7 @@ Status ClusterCursorManager::killCursor(OperationContext* opCtx,
return Status::OK();
}
-void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId) {
@@ -390,7 +390,7 @@ void ClusterCursorManager::detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk
std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContext* opCtx,
Date_t cutoff) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [cutoff](CursorId cursorId, const CursorEntry& entry) -> bool {
bool res = entry.getLifetimeType() == CursorLifetime::Mortal &&
@@ -408,14 +408,14 @@ std::size_t ClusterCursorManager::killMortalCursorsInactiveSince(OperationContex
}
void ClusterCursorManager::killAllCursors(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto pred = [](CursorId, const CursorEntry&) -> bool { return true; };
killCursorsSatisfying(std::move(lk), opCtx, std::move(pred));
}
std::size_t ClusterCursorManager::killCursorsSatisfying(
- stdx::unique_lock<stdx::mutex> lk,
+ stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred) {
invariant(opCtx);
@@ -471,7 +471,7 @@ std::size_t ClusterCursorManager::killCursorsSatisfying(
}
ClusterCursorManager::Stats ClusterCursorManager::stats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
@@ -504,7 +504,7 @@ ClusterCursorManager::Stats ClusterCursorManager::stats() const {
}
void ClusterCursorManager::appendActiveSessions(LogicalSessionIdSet* lsids) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (const auto& nsContainerPair : _namespaceToContainerMap) {
for (const auto& cursorIdEntryPair : nsContainerPair.second.entryMap) {
@@ -545,7 +545,7 @@ std::vector<GenericCursor> ClusterCursorManager::getIdleCursors(
const OperationContext* opCtx, MongoProcessInterface::CurrentOpUserMode userMode) const {
std::vector<GenericCursor> cursors;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
AuthorizationSession* ctxAuth = AuthorizationSession::get(opCtx->getClient());
@@ -593,7 +593,7 @@ std::pair<Status, int> ClusterCursorManager::killCursorsWithMatchingSessions(
stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
LogicalSessionId lsid) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
stdx::unordered_set<CursorId> cursorIds;
@@ -618,7 +618,7 @@ stdx::unordered_set<CursorId> ClusterCursorManager::getCursorsForSession(
boost::optional<NamespaceString> ClusterCursorManager::getNamespaceForCursorId(
CursorId cursorId) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
const auto it = _cursorIdPrefixToNamespaceMap.find(extractPrefixFromCursorId(cursorId));
if (it == _cursorIdPrefixToNamespaceMap.end()) {
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index af2ea16581b..a54e2d8bea3 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -38,11 +38,11 @@
#include "mongo/db/kill_sessions.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/session_killer.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/s/query/cluster_client_cursor.h"
#include "mongo/s/query/cluster_client_cursor_guard.h"
#include "mongo/s/query/cluster_client_cursor_params.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/time_support.h"
@@ -406,7 +406,7 @@ private:
/**
* Will detach a cursor, release the lock and then call kill() on it.
*/
- void detachAndKillCursor(stdx::unique_lock<stdx::mutex> lk,
+ void detachAndKillCursor(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
const NamespaceString& nss,
CursorId cursorId);
@@ -443,7 +443,7 @@ private:
*
* Returns the number of cursors killed.
*/
- std::size_t killCursorsSatisfying(stdx::unique_lock<stdx::mutex> lk,
+ std::size_t killCursorsSatisfying(stdx::unique_lock<Latch> lk,
OperationContext* opCtx,
std::function<bool(CursorId, const CursorEntry&)> pred);
@@ -597,7 +597,7 @@ private:
ClockSource* _clockSource;
// Synchronizes access to all private state variables below.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ClusterCursorManager::_mutex");
bool _inShutdown{false};
diff --git a/src/mongo/s/query/establish_cursors.h b/src/mongo/s/query/establish_cursors.h
index 97e72225072..95f6e7ae9d0 100644
--- a/src/mongo/s/query/establish_cursors.h
+++ b/src/mongo/s/query/establish_cursors.h
@@ -37,9 +37,9 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/db/cursor_id.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/platform/mutex.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/query/async_results_merger_params_gen.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/s/router_transactions_metrics.cpp b/src/mongo/s/router_transactions_metrics.cpp
index ddc8406354f..b3cad2b1edc 100644
--- a/src/mongo/s/router_transactions_metrics.cpp
+++ b/src/mongo/s/router_transactions_metrics.cpp
@@ -223,7 +223,7 @@ void RouterTransactionsMetrics::incrementCommitSuccessful(TransactionRouter::Com
void RouterTransactionsMetrics::incrementAbortCauseMap(std::string abortCause) {
invariant(!abortCause.empty());
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
auto it = _abortCauseMap.find(abortCause);
if (it == _abortCauseMap.end()) {
_abortCauseMap.emplace(std::pair<std::string, std::int64_t>(std::move(abortCause), 1));
@@ -263,7 +263,7 @@ void RouterTransactionsMetrics::updateStats(RouterTransactionsStats* stats) {
BSONObjBuilder bob;
{
- stdx::lock_guard<stdx::mutex> lock(_abortCauseMutex);
+ stdx::lock_guard<Latch> lock(_abortCauseMutex);
for (auto const& abortCauseEntry : _abortCauseMap) {
bob.append(abortCauseEntry.first, abortCauseEntry.second);
}
diff --git a/src/mongo/s/router_transactions_metrics.h b/src/mongo/s/router_transactions_metrics.h
index ed496fe394c..5c52a8e20d0 100644
--- a/src/mongo/s/router_transactions_metrics.h
+++ b/src/mongo/s/router_transactions_metrics.h
@@ -147,7 +147,7 @@ private:
CommitStats _recoverWithTokenCommitStats;
// Mutual exclusion for _abortCauseMap
- stdx::mutex _abortCauseMutex;
+ Mutex _abortCauseMutex = MONGO_MAKE_LATCH("RouterTransactionsMetrics::_abortCauseMutex");
// Map tracking the total number of each abort cause for any multi-statement transaction that
// was aborted through this router.
diff --git a/src/mongo/s/sharding_task_executor.h b/src/mongo/s/sharding_task_executor.h
index 0f034d144f1..e370a5425a5 100644
--- a/src/mongo/s/sharding_task_executor.h
+++ b/src/mongo/s/sharding_task_executor.h
@@ -34,8 +34,8 @@
#include "mongo/base/status_with.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace executor {
diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h
index c077578892f..d9b82233974 100644
--- a/src/mongo/s/sharding_task_executor_pool_controller.h
+++ b/src/mongo/s/sharding_task_executor_pool_controller.h
@@ -35,7 +35,7 @@
#include "mongo/client/replica_set_change_notifier.h"
#include "mongo/executor/connection_pool.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -194,7 +194,7 @@ private:
ReplicaSetChangeListenerHandle _listener;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ShardingTaskExecutorPoolController::_mutex");
// Entires to _poolDatas are added by addHost() and removed by removeHost()
stdx::unordered_map<PoolId, PoolData> _poolDatas;
diff --git a/src/mongo/scripting/deadline_monitor.h b/src/mongo/scripting/deadline_monitor.h
index 03abfcbdac6..39862ebf58c 100644
--- a/src/mongo/scripting/deadline_monitor.h
+++ b/src/mongo/scripting/deadline_monitor.h
@@ -31,8 +31,8 @@
#include <cstdint>
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/idle_thread_block.h"
@@ -84,7 +84,7 @@ public:
~DeadlineMonitor() {
{
// ensure the monitor thread has been stopped before destruction
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
_inShutdown = true;
_newDeadlineAvailable.notify_one();
}
@@ -105,7 +105,7 @@ public:
} else {
deadline = Date_t::max();
}
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
if (_tasks.find(task) == _tasks.end()) {
_tasks.emplace(task, deadline);
@@ -123,7 +123,7 @@ public:
* @return true if the task was found and erased
*/
bool stopDeadline(_Task* const task) {
- stdx::lock_guard<stdx::mutex> lk(_deadlineMutex);
+ stdx::lock_guard<Latch> lk(_deadlineMutex);
return _tasks.erase(task);
}
@@ -135,7 +135,7 @@ private:
*/
void deadlineMonitorThread() {
setThreadName("DeadlineMonitor");
- stdx::unique_lock<stdx::mutex> lk(_deadlineMutex);
+ stdx::unique_lock<Latch> lk(_deadlineMutex);
Date_t lastInterruptCycle = Date_t::now();
while (!_inShutdown) {
// get the next interval to wait
@@ -187,8 +187,9 @@ private:
}
using TaskDeadlineMap = stdx::unordered_map<_Task*, Date_t>;
- TaskDeadlineMap _tasks; // map of running tasks with deadlines
- stdx::mutex _deadlineMutex; // protects all non-const members, except _monitorThread
+ TaskDeadlineMap _tasks; // map of running tasks with deadlines
+ // protects all non-const members, except _monitorThread
+ Mutex _deadlineMutex = MONGO_MAKE_LATCH("DeadlineMonitor::_deadlineMutex");
stdx::condition_variable _newDeadlineAvailable; // Signaled for timeout, start and stop
stdx::thread _monitorThread; // the deadline monitor thread
Date_t _nearestDeadlineWallclock = Date_t::max(); // absolute time of the nearest deadline
diff --git a/src/mongo/scripting/deadline_monitor_test.cpp b/src/mongo/scripting/deadline_monitor_test.cpp
index 1ad1fb70cdb..1d95d761ee3 100644
--- a/src/mongo/scripting/deadline_monitor_test.cpp
+++ b/src/mongo/scripting/deadline_monitor_test.cpp
@@ -45,20 +45,20 @@ class TaskGroup {
public:
TaskGroup() : _c(), _killCount(0), _targetKillCount(0) {}
void noteKill() {
- stdx::lock_guard<stdx::mutex> lk(_m);
+ stdx::lock_guard<Latch> lk(_m);
++_killCount;
if (_killCount >= _targetKillCount)
_c.notify_one();
}
void waitForKillCount(uint64_t target) {
- stdx::unique_lock<stdx::mutex> lk(_m);
+ stdx::unique_lock<Latch> lk(_m);
_targetKillCount = target;
while (_killCount < _targetKillCount)
_c.wait(lk);
}
private:
- stdx::mutex _m;
+ Mutex _m = MONGO_MAKE_LATCH("TaskGroup::_m");
stdx::condition_variable _c;
uint64_t _killCount;
uint64_t _targetKillCount;
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index b5093581889..67edb2ca277 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -332,7 +332,7 @@ namespace {
class ScopeCache {
public:
void release(const string& poolName, const std::shared_ptr<Scope>& scope) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (scope->hasOutOfMemoryException()) {
// make some room
@@ -358,7 +358,7 @@ public:
}
std::shared_ptr<Scope> tryAcquire(OperationContext* opCtx, const string& poolName) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (Pools::iterator it = _pools.begin(); it != _pools.end(); ++it) {
if (it->poolName == poolName) {
@@ -374,7 +374,7 @@ public:
}
void clear() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_pools.clear();
}
@@ -391,7 +391,7 @@ private:
typedef std::deque<ScopeAndPool> Pools; // More-recently used Scopes are kept at the front.
Pools _pools; // protected by _mutex
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ScopeCache::_mutex");
};
ScopeCache scopeCache;
diff --git a/src/mongo/scripting/mozjs/PosixNSPR.cpp b/src/mongo/scripting/mozjs/PosixNSPR.cpp
index 55a821450e6..f264b145f22 100644
--- a/src/mongo/scripting/mozjs/PosixNSPR.cpp
+++ b/src/mongo/scripting/mozjs/PosixNSPR.cpp
@@ -22,9 +22,9 @@
#include <js/Utility.h>
#include <vm/PosixNSPR.h>
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/time_support.h"
@@ -185,11 +185,11 @@ PRStatus PR_CallOnceWithArg(PRCallOnceType* once, PRCallOnceWithArgFN func, void
}
class nspr::Lock {
- mongo::stdx::mutex mutex_;
+ mongo::Mutex mutex_;
public:
Lock() {}
- mongo::stdx::mutex& mutex() {
+ mongo::Mutex& mutex() {
return mutex_;
}
};
@@ -265,8 +265,8 @@ uint32_t PR_TicksPerSecond() {
PRStatus PR_WaitCondVar(PRCondVar* cvar, uint32_t timeout) {
if (timeout == PR_INTERVAL_NO_TIMEOUT) {
try {
- mongo::stdx::unique_lock<mongo::stdx::mutex> lk(cvar->lock()->mutex(),
- mongo::stdx::adopt_lock_t());
+ mongo::stdx::unique_lock<mongo::Mutex> lk(cvar->lock()->mutex(),
+ mongo::stdx::adopt_lock_t());
cvar->cond().wait(lk);
lk.release();
@@ -277,8 +277,8 @@ PRStatus PR_WaitCondVar(PRCondVar* cvar, uint32_t timeout) {
}
} else {
try {
- mongo::stdx::unique_lock<mongo::stdx::mutex> lk(cvar->lock()->mutex(),
- mongo::stdx::adopt_lock_t());
+ mongo::stdx::unique_lock<mongo::Mutex> lk(cvar->lock()->mutex(),
+ mongo::stdx::adopt_lock_t());
cvar->cond().wait_for(lk, mongo::Microseconds(timeout).toSystemDuration());
lk.release();
diff --git a/src/mongo/scripting/mozjs/countdownlatch.cpp b/src/mongo/scripting/mozjs/countdownlatch.cpp
index 9f0abbed741..f50735c734c 100644
--- a/src/mongo/scripting/mozjs/countdownlatch.cpp
+++ b/src/mongo/scripting/mozjs/countdownlatch.cpp
@@ -31,10 +31,10 @@
#include "mongo/scripting/mozjs/countdownlatch.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
namespace mongo {
@@ -61,17 +61,17 @@ public:
int32_t make(int32_t count) {
uassert(ErrorCodes::JSInterpreterFailure, "argument must be >= 0", count >= 0);
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
int32_t desc = ++_counter;
- _latches.insert(std::make_pair(desc, std::make_shared<Latch>(count)));
+ _latches.insert(std::make_pair(desc, std::make_shared<CountDownLatch>(count)));
return desc;
}
void await(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
while (latch->count != 0) {
latch->cv.wait(lock);
@@ -79,8 +79,8 @@ public:
}
void countDown(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
if (latch->count > 0)
latch->count--;
@@ -90,8 +90,8 @@ public:
}
int32_t getCount(int32_t desc) {
- std::shared_ptr<Latch> latch = get(desc);
- stdx::unique_lock<stdx::mutex> lock(latch->mutex);
+ auto latch = get(desc);
+ stdx::unique_lock<Latch> lock(latch->mutex);
return latch->count;
}
@@ -100,16 +100,16 @@ private:
/**
* Latches for communication between threads
*/
- struct Latch {
- Latch(int32_t count) : count(count) {}
+ struct CountDownLatch {
+ CountDownLatch(int32_t count) : count(count) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("Latch::mutex");
stdx::condition_variable cv;
int32_t count;
};
- std::shared_ptr<Latch> get(int32_t desc) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ std::shared_ptr<CountDownLatch> get(int32_t desc) {
+ stdx::lock_guard<Latch> lock(_mutex);
auto iter = _latches.find(desc);
uassert(ErrorCodes::JSInterpreterFailure,
@@ -119,9 +119,9 @@ private:
return iter->second;
}
- using Map = stdx::unordered_map<int32_t, std::shared_ptr<Latch>>;
+ using Map = stdx::unordered_map<int32_t, std::shared_ptr<CountDownLatch>>;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("CountDownLatchHolder::_mutex");
Map _latches;
int32_t _counter;
};
diff --git a/src/mongo/scripting/mozjs/engine.cpp b/src/mongo/scripting/mozjs/engine.cpp
index 1cb2e6ca3c3..c09c97a3f0b 100644
--- a/src/mongo/scripting/mozjs/engine.cpp
+++ b/src/mongo/scripting/mozjs/engine.cpp
@@ -82,7 +82,7 @@ mongo::Scope* MozJSScriptEngine::createScopeForCurrentThread() {
}
void MozJSScriptEngine::interrupt(unsigned opId) {
- stdx::lock_guard<stdx::mutex> intLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> intLock(_globalInterruptLock);
OpIdToScopeMap::iterator iScope = _opToScopeMap.find(opId);
if (iScope == _opToScopeMap.end()) {
// got interrupt request for a scope that no longer exists
@@ -109,7 +109,7 @@ std::string MozJSScriptEngine::printKnownOps_inlock() {
}
void MozJSScriptEngine::interruptAll() {
- stdx::lock_guard<stdx::mutex> interruptLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> interruptLock(_globalInterruptLock);
for (auto&& iScope : _opToScopeMap) {
iScope.second->kill();
@@ -141,7 +141,7 @@ void MozJSScriptEngine::setJSHeapLimitMB(int limit) {
}
void MozJSScriptEngine::registerOperation(OperationContext* opCtx, MozJSImplScope* scope) {
- stdx::lock_guard<stdx::mutex> giLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> giLock(_globalInterruptLock);
auto opId = opCtx->getOpID();
@@ -155,7 +155,7 @@ void MozJSScriptEngine::registerOperation(OperationContext* opCtx, MozJSImplScop
}
void MozJSScriptEngine::unregisterOperation(unsigned int opId) {
- stdx::lock_guard<stdx::mutex> giLock(_globalInterruptLock);
+ stdx::lock_guard<Latch> giLock(_globalInterruptLock);
LOG(2) << "ImplScope " << static_cast<const void*>(this) << " unregistered for op " << opId;
diff --git a/src/mongo/scripting/mozjs/engine.h b/src/mongo/scripting/mozjs/engine.h
index 789137b1663..d49103a7996 100644
--- a/src/mongo/scripting/mozjs/engine.h
+++ b/src/mongo/scripting/mozjs/engine.h
@@ -31,9 +31,9 @@
#include <jsapi.h>
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/deadline_monitor.h"
#include "mongo/scripting/engine.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/mutex.h"
@@ -91,7 +91,7 @@ private:
/**
* This mutex protects _opToScopeMap
*/
- stdx::mutex _globalInterruptLock;
+ Mutex _globalInterruptLock = MONGO_MAKE_LATCH("MozJSScriptEngine::_globalInterruptLock");
using OpIdToScopeMap = stdx::unordered_map<unsigned, MozJSImplScope*>;
OpIdToScopeMap _opToScopeMap; // map of mongo op ids to scopes (protected by
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 358106b0de5..c78115b5ae2 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -42,12 +42,12 @@
#include "mongo/base/error_codes.h"
#include "mongo/db/operation_context.h"
#include "mongo/platform/decimal128.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/stack_locator.h"
#include "mongo/scripting/jsexception.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/valuewriter.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -94,7 +94,7 @@ const int kStackChunkSize = 8192;
* Runtime's can race on first creation (on some function statics), so we just
* serialize the initial Runtime creation.
*/
-stdx::mutex gRuntimeCreationMutex;
+Mutex gRuntimeCreationMutex;
bool gFirstRuntimeCreated = false;
bool closeToMaxMemory() {
@@ -147,7 +147,7 @@ void MozJSImplScope::unregisterOperation() {
void MozJSImplScope::kill() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
// If we are on the right thread, in the middle of an operation, and we have a registered
// opCtx, then we should check the opCtx for interrupts.
@@ -169,7 +169,7 @@ void MozJSImplScope::interrupt() {
}
bool MozJSImplScope::isKillPending() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return !_killStatus.isOK();
}
@@ -196,7 +196,7 @@ bool MozJSImplScope::_interruptCallback(JSContext* cx) {
// Check our initial kill status (which might be fine).
auto status = [&scope]() -> Status {
- stdx::lock_guard<stdx::mutex> lk(scope->_mutex);
+ stdx::lock_guard<Latch> lk(scope->_mutex);
return scope->_killStatus;
}();
@@ -292,7 +292,7 @@ MozJSImplScope::MozRuntime::MozRuntime(const MozJSScriptEngine* engine) {
}
{
- stdx::unique_lock<stdx::mutex> lk(gRuntimeCreationMutex);
+ stdx::unique_lock<Latch> lk(gRuntimeCreationMutex);
if (gFirstRuntimeCreated) {
// If we've already made a runtime, just proceed
@@ -787,7 +787,7 @@ void MozJSImplScope::gc() {
}
void MozJSImplScope::sleep(Milliseconds ms) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
uassert(ErrorCodes::JSUncatchableError,
"sleep was interrupted by kill",
@@ -866,7 +866,7 @@ void MozJSImplScope::setStatus(Status status) {
bool MozJSImplScope::_checkErrorState(bool success, bool reportError, bool assertOnError) {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_killStatus.isOK()) {
success = false;
setStatus(_killStatus);
diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h
index 5c428528544..79bd262dae9 100644
--- a/src/mongo/scripting/mozjs/implscope.h
+++ b/src/mongo/scripting/mozjs/implscope.h
@@ -414,7 +414,7 @@ private:
std::vector<JS::PersistentRootedValue> _funcs;
InternedStringTable _internedStrings;
Status _killStatus;
- mutable std::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("MozJSImplScope::_mutex");
stdx::condition_variable _sleepCondition;
std::string _error;
unsigned int _opId; // op id for this scope
diff --git a/src/mongo/scripting/mozjs/jsthread.cpp b/src/mongo/scripting/mozjs/jsthread.cpp
index a52e3398529..3b6580abcf6 100644
--- a/src/mongo/scripting/mozjs/jsthread.cpp
+++ b/src/mongo/scripting/mozjs/jsthread.cpp
@@ -38,11 +38,11 @@
#include <vm/PosixNSPR.h>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/valuewriter.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -160,12 +160,12 @@ private:
SharedData() = default;
void setErrorStatus(Status status) {
- stdx::lock_guard<stdx::mutex> lck(_statusMutex);
+ stdx::lock_guard<Latch> lck(_statusMutex);
_status = std::move(status);
}
Status getErrorStatus() {
- stdx::lock_guard<stdx::mutex> lck(_statusMutex);
+ stdx::lock_guard<Latch> lck(_statusMutex);
return _status;
}
@@ -179,7 +179,7 @@ private:
std::string _stack;
private:
- stdx::mutex _statusMutex;
+ Mutex _statusMutex = MONGO_MAKE_LATCH("SharedData::_statusMutex");
Status _status = Status::OK();
};
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index fbe83f9ed83..fc3a38d0927 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -285,7 +285,7 @@ void MozJSProxyScope::runWithoutInterruptionExceptAtGlobalShutdown(Closure&& clo
}
void MozJSProxyScope::runOnImplThread(unique_function<void()> f) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_function = std::move(f);
invariant(_state == State::Idle);
@@ -322,7 +322,7 @@ void MozJSProxyScope::runOnImplThread(unique_function<void()> f) {
void MozJSProxyScope::shutdownThread() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_state == State::Idle);
@@ -369,7 +369,7 @@ void MozJSProxyScope::implThread(void* arg) {
const auto unbindImplScope = makeGuard([&proxy] { proxy->_implScope = nullptr; });
while (true) {
- stdx::unique_lock<stdx::mutex> lk(proxy->_mutex);
+ stdx::unique_lock<Latch> lk(proxy->_mutex);
{
MONGO_IDLE_THREAD_BLOCK;
proxy->_implCondvar.wait(lk, [proxy] {
diff --git a/src/mongo/scripting/mozjs/proxyscope.h b/src/mongo/scripting/mozjs/proxyscope.h
index e4948a6bc9f..d0d4c5a3423 100644
--- a/src/mongo/scripting/mozjs/proxyscope.h
+++ b/src/mongo/scripting/mozjs/proxyscope.h
@@ -32,9 +32,9 @@
#include "vm/PosixNSPR.h"
#include "mongo/client/dbclient_cursor.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/scripting/mozjs/engine.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/functional.h"
@@ -195,7 +195,7 @@ private:
* This mutex protects _function, _state and _status as channels for
* function invocation and exception handling
*/
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("MozJSProxyScope::_mutex");
unique_function<void()> _function;
State _state;
Status _status;
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index f5ca0b2af04..1f318f938dd 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -764,7 +764,7 @@ BenchRunState::~BenchRunState() {
}
void BenchRunState::waitForState(State awaitedState) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
switch (awaitedState) {
case BRS_RUNNING:
@@ -792,7 +792,7 @@ void BenchRunState::tellWorkersToCollectStats() {
}
void BenchRunState::assertFinished() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(0 == _numUnstartedWorkers + _numActiveWorkers);
}
@@ -805,7 +805,7 @@ bool BenchRunState::shouldWorkerCollectStats() const {
}
void BenchRunState::onWorkerStarted() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(_numUnstartedWorkers > 0);
--_numUnstartedWorkers;
++_numActiveWorkers;
@@ -815,7 +815,7 @@ void BenchRunState::onWorkerStarted() {
}
void BenchRunState::onWorkerFinished() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
verify(_numActiveWorkers > 0);
--_numActiveWorkers;
if (_numActiveWorkers + _numUnstartedWorkers == 0) {
@@ -1376,7 +1376,7 @@ void BenchRunWorker::run() {
BenchRunner::BenchRunner(BenchRunConfig* config) : _brState(config->parallel), _config(config) {
_oid.init();
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
_activeRuns[_oid] = this;
}
@@ -1438,7 +1438,7 @@ void BenchRunner::stop() {
}
{
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
_activeRuns.erase(_oid);
}
}
@@ -1449,7 +1449,7 @@ BenchRunner* BenchRunner::createWithConfig(const BSONObj& configArgs) {
}
BenchRunner* BenchRunner::get(OID oid) {
- stdx::lock_guard<stdx::mutex> lk(_staticMutex);
+ stdx::lock_guard<Latch> lk(_staticMutex);
return _activeRuns[oid];
}
@@ -1523,7 +1523,7 @@ BSONObj BenchRunner::finish(BenchRunner* runner) {
return zoo;
}
-stdx::mutex BenchRunner::_staticMutex;
+Mutex BenchRunner::_staticMutex = MONGO_MAKE_LATCH("BenchRunner");
std::map<OID, BenchRunner*> BenchRunner::_activeRuns;
/**
diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h
index f73d2149abe..ea714789d3c 100644
--- a/src/mongo/shell/bench.h
+++ b/src/mongo/shell/bench.h
@@ -38,8 +38,8 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/ops/write_ops_parsers.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/timer.h"
@@ -449,7 +449,7 @@ public:
void onWorkerFinished();
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("BenchRunState::_mutex");
stdx::condition_variable _stateChangeCondition;
@@ -599,7 +599,7 @@ public:
private:
// TODO: Same as for createWithConfig.
- static stdx::mutex _staticMutex;
+ static Mutex _staticMutex;
static std::map<OID, BenchRunner*> _activeRuns;
OID _oid;
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index bb39c1d4060..b923ffc1fa7 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -158,7 +158,7 @@ private:
// This needs to use a mutex rather than an atomic bool because we need to ensure that no more
// logging will happen once we return from disable().
- static inline stdx::mutex mx;
+ static inline Mutex mx = MONGO_MAKE_LATCH("ShellConsoleAppender::mx");
static inline bool loggingEnabled = true;
};
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index 9fc6818866a..fc9f9e4d420 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -51,13 +51,13 @@
#include "mongo/client/dbclient_base.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/hasher.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/scripting/engine.h"
#include "mongo/shell/bench.h"
#include "mongo/shell/shell_options.h"
#include "mongo/shell/shell_utils_extended.h"
#include "mongo/shell/shell_utils_launcher.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
@@ -558,14 +558,14 @@ void ConnectionRegistry::registerConnection(DBClientBase& client) {
BSONObj info;
if (client.runCommand("admin", BSON("whatsmyuri" << 1), info)) {
std::string connstr = client.getServerAddress();
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_connectionUris[connstr].insert(info["you"].str());
}
}
void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const {
Prompter prompter("do you want to kill the current op(s) on the server?");
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
for (auto& connection : _connectionUris) {
auto status = ConnectionString::parse(connection.first);
if (!status.isOK()) {
@@ -660,6 +660,6 @@ bool fileExists(const std::string& file) {
}
-stdx::mutex& mongoProgramOutputMutex(*(new stdx::mutex()));
+Mutex& mongoProgramOutputMutex(*(new Mutex()));
} // namespace shell_utils
} // namespace mongo
diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h
index a5b8a0ce50c..1d3e4099998 100644
--- a/src/mongo/shell/shell_utils.h
+++ b/src/mongo/shell/shell_utils.h
@@ -35,7 +35,7 @@
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
@@ -84,14 +84,14 @@ public:
private:
std::map<std::string, std::set<std::string>> _connectionUris;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ConnectionRegistry::_mutex");
};
extern ConnectionRegistry connectionRegistry;
// This mutex helps the shell serialize output on exit, to avoid deadlocks at shutdown. So
// it also protects the global dbexitCalled.
-extern stdx::mutex& mongoProgramOutputMutex;
+extern Mutex& mongoProgramOutputMutex;
// Helper to tell if a file exists cross platform
// TODO: Remove this when we have a cross platform file utility library
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index dba5ded531e..bf82542cd13 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -134,7 +134,7 @@ void safeClose(int fd) {
}
}
-stdx::mutex _createProcessMtx;
+Mutex _createProcessMtx;
} // namespace
ProgramOutputMultiplexer programOutputLogger;
@@ -239,7 +239,7 @@ void ProgramOutputMultiplexer::appendLine(int port,
ProcessId pid,
const std::string& name,
const std::string& line) {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
boost::iostreams::tee_device<std::ostream, std::stringstream> teeDevice(cout, _buffer);
boost::iostreams::stream<decltype(teeDevice)> teeStream(teeDevice);
if (port > 0) {
@@ -250,12 +250,12 @@ void ProgramOutputMultiplexer::appendLine(int port,
}
string ProgramOutputMultiplexer::str() const {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
return _buffer.str();
}
void ProgramOutputMultiplexer::clear() {
- stdx::lock_guard<stdx::mutex> lk(mongoProgramOutputMutex);
+ stdx::lock_guard<Latch> lk(mongoProgramOutputMutex);
_buffer.str("");
}
@@ -405,7 +405,7 @@ void ProgramRunner::start() {
//
// Holding the lock for the duration of those events prevents the leaks and thus the
// associated deadlocks.
- stdx::lock_guard<stdx::mutex> lk(_createProcessMtx);
+ stdx::lock_guard<Latch> lk(_createProcessMtx);
int status = pipe(pipeEnds);
if (status != 0) {
const auto ewd = errnoWithDescription();
diff --git a/src/mongo/shell/shell_utils_launcher.h b/src/mongo/shell/shell_utils_launcher.h
index bad1d2bdba7..c93e77ec34a 100644
--- a/src/mongo/shell/shell_utils_launcher.h
+++ b/src/mongo/shell/shell_utils_launcher.h
@@ -37,8 +37,8 @@
#include <vector>
#include "mongo/bson/bsonobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/process_id.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/stdx/unordered_set.h"
diff --git a/src/mongo/stdx/condition_variable.h b/src/mongo/stdx/condition_variable.h
index a27567ee13b..21baa7d26cf 100644
--- a/src/mongo/stdx/condition_variable.h
+++ b/src/mongo/stdx/condition_variable.h
@@ -206,7 +206,7 @@ private:
AtomicWord<unsigned long long> _notifyableCount;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
std::list<Notifyable*> _notifyables;
};
diff --git a/src/mongo/stdx/condition_variable_bm.cpp b/src/mongo/stdx/condition_variable_bm.cpp
index a78e8e29411..0b032d99bee 100644
--- a/src/mongo/stdx/condition_variable_bm.cpp
+++ b/src/mongo/stdx/condition_variable_bm.cpp
@@ -59,7 +59,7 @@ volatile bool alwaysTrue = true;
void BM_stdWaitWithTruePredicate(benchmark::State& state) {
std::condition_variable cv; // NOLINT
- stdx::mutex mutex;
+ stdx::mutex mutex; // NOLINT
stdx::unique_lock<stdx::mutex> lk(mutex);
for (auto _ : state) {
@@ -70,7 +70,7 @@ void BM_stdWaitWithTruePredicate(benchmark::State& state) {
void BM_stdxWaitWithTruePredicate(benchmark::State& state) {
stdx::condition_variable cv;
- stdx::mutex mutex;
+ stdx::mutex mutex; // NOLINT
stdx::unique_lock<stdx::mutex> lk(mutex);
for (auto _ : state) {
diff --git a/src/mongo/tools/bridge.cpp b/src/mongo/tools/bridge.cpp
index 6f965e01760..a709769cf6f 100644
--- a/src/mongo/tools/bridge.cpp
+++ b/src/mongo/tools/bridge.cpp
@@ -41,11 +41,11 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/message.h"
#include "mongo/rpc/reply_builder_interface.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/tools/bridge_commands.h"
#include "mongo/tools/mongobridge_options.h"
@@ -116,7 +116,7 @@ public:
HostSettings getHostSettings(boost::optional<HostAndPort> host) {
if (host) {
- stdx::lock_guard<stdx::mutex> lk(_settingsMutex);
+ stdx::lock_guard<Latch> lk(_settingsMutex);
return (_settings)[*host];
}
return {};
@@ -132,7 +132,7 @@ public:
private:
static const ServiceContext::Decoration<BridgeContext> _get;
- stdx::mutex _settingsMutex;
+ Mutex _settingsMutex = MONGO_MAKE_LATCH("BridgeContext::_settingsMutex");
HostSettingsMap _settings;
};
diff --git a/src/mongo/tools/bridge_commands.cpp b/src/mongo/tools/bridge_commands.cpp
index a94153904d4..aa3bc583a15 100644
--- a/src/mongo/tools/bridge_commands.cpp
+++ b/src/mongo/tools/bridge_commands.cpp
@@ -48,7 +48,7 @@ const char kHostFieldName[] = "host";
class CmdDelayMessagesFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -69,7 +69,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kForward;
hostSettings.delay = Milliseconds{newDelay};
@@ -80,7 +80,7 @@ public:
class CmdAcceptConnectionsFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -92,7 +92,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kForward;
}
@@ -102,7 +102,7 @@ public:
class CmdRejectConnectionsFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -114,7 +114,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kHangUp;
}
@@ -124,7 +124,7 @@ public:
class CmdDiscardMessagesFrom final : public BridgeCommand {
public:
- Status run(const BSONObj& cmdObj, stdx::mutex* settingsMutex, HostSettingsMap* settings) final {
+ Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) final {
invariant(settingsMutex);
invariant(settings);
@@ -151,7 +151,7 @@ public:
HostAndPort host(hostName);
{
- stdx::lock_guard<stdx::mutex> lk(*settingsMutex);
+ stdx::lock_guard<Latch> lk(*settingsMutex);
auto& hostSettings = (*settings)[host];
hostSettings.state = HostSettings::State::kDiscard;
hostSettings.loss = newLoss;
diff --git a/src/mongo/tools/bridge_commands.h b/src/mongo/tools/bridge_commands.h
index 1df953a8be4..8b32fbba86c 100644
--- a/src/mongo/tools/bridge_commands.h
+++ b/src/mongo/tools/bridge_commands.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/time_support.h"
@@ -58,9 +58,7 @@ public:
virtual ~BridgeCommand() = 0;
- virtual Status run(const BSONObj& cmdObj,
- stdx::mutex* settingsMutex,
- HostSettingsMap* settings) = 0;
+ virtual Status run(const BSONObj& cmdObj, Mutex* settingsMutex, HostSettingsMap* settings) = 0;
};
} // namespace mongo
diff --git a/src/mongo/transport/baton_asio_linux.h b/src/mongo/transport/baton_asio_linux.h
index 3536bc16ab4..dd5062cab72 100644
--- a/src/mongo/transport/baton_asio_linux.h
+++ b/src/mongo/transport/baton_asio_linux.h
@@ -38,7 +38,7 @@
#include "mongo/base/checked_cast.h"
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/transport/baton.h"
#include "mongo/transport/session_asio.h"
@@ -158,7 +158,7 @@ public:
auto pf = makePromiseFuture<void>();
auto id = timer.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
return kDetached;
@@ -178,7 +178,7 @@ public:
bool cancelSession(Session& session) noexcept override {
const auto id = session.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_sessions.find(id) == _sessions.end()) {
return false;
@@ -192,7 +192,7 @@ public:
bool cancelTimer(const ReactorTimer& timer) noexcept override {
const auto id = timer.id();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_timersById.find(id) == _timersById.end()) {
return false;
@@ -211,7 +211,7 @@ public:
}
void schedule(Task func) noexcept override {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_opCtx) {
func(kDetached);
@@ -261,7 +261,7 @@ public:
promise.emplaceValue();
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_scheduled.size()) {
auto toRun = std::exchange(_scheduled, {});
@@ -273,7 +273,7 @@ public:
}
});
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If anything was scheduled, run it now. No need to poll
if (_scheduled.size()) {
@@ -374,7 +374,7 @@ private:
auto id = session.id();
auto pf = makePromiseFuture<void>();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (!_opCtx) {
return kDetached;
@@ -394,7 +394,7 @@ private:
decltype(_timers) timers;
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_opCtx->getBaton().get() == this);
_opCtx->setBaton(nullptr);
@@ -438,10 +438,10 @@ private:
* the eventfd. If not, we run inline.
*/
template <typename Callback>
- void _safeExecute(stdx::unique_lock<stdx::mutex> lk, Callback&& cb) {
+ void _safeExecute(stdx::unique_lock<Latch> lk, Callback&& cb) {
if (_inPoll) {
_scheduled.push_back([cb = std::forward<Callback>(cb), this](Status) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
cb();
});
@@ -455,7 +455,7 @@ private:
return EventFDHolder::getForClient(_opCtx->getClient());
}
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("BatonASIO::_mutex");
OperationContext* _opCtx;
diff --git a/src/mongo/transport/service_entry_point_impl.h b/src/mongo/transport/service_entry_point_impl.h
index 2e3f5219e21..1788ef8a146 100644
--- a/src/mongo/transport/service_entry_point_impl.h
+++ b/src/mongo/transport/service_entry_point_impl.h
@@ -32,8 +32,8 @@
#include <list>
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/variant.h"
#include "mongo/transport/service_entry_point.h"
#include "mongo/transport/service_executor_reserved.h"
@@ -81,7 +81,7 @@ private:
ServiceContext* const _svcCtx;
AtomicWord<std::size_t> _nWorkers;
- mutable stdx::mutex _sessionsMutex;
+ mutable Mutex _sessionsMutex = MONGO_MAKE_LATCH("ServiceEntryPointImpl::_sessionsMutex");
stdx::condition_variable _shutdownCondition;
SSMList _sessions;
diff --git a/src/mongo/transport/service_executor_adaptive.cpp b/src/mongo/transport/service_executor_adaptive.cpp
index 3f35fe07c78..848ca8eb531 100644
--- a/src/mongo/transport/service_executor_adaptive.cpp
+++ b/src/mongo/transport/service_executor_adaptive.cpp
@@ -160,7 +160,7 @@ Status ServiceExecutorAdaptive::shutdown(Milliseconds timeout) {
_scheduleCondition.notify_one();
_controllerThread.join();
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
_reactorHandle->stop();
bool result =
_deathCondition.wait_for(lk, timeout.toSystemDuration(), [&] { return _threads.empty(); });
@@ -285,7 +285,7 @@ bool ServiceExecutorAdaptive::_isStarved() const {
* by schedule().
*/
void ServiceExecutorAdaptive::_controllerThreadRoutine() {
- stdx::mutex noopLock;
+ auto noopLock = MONGO_MAKE_LATCH();
setThreadName("worker-controller"_sd);
// Setup the timers/timeout values for stuck thread detection.
@@ -294,7 +294,7 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
// Get the initial values for our utilization percentage calculations
auto getTimerTotals = [this]() {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
auto first = _getThreadTimerTotal(ThreadTimer::kExecuting, lk);
auto second = _getThreadTimerTotal(ThreadTimer::kRunning, lk);
return std::make_pair(first, second);
@@ -428,7 +428,7 @@ void ServiceExecutorAdaptive::_controllerThreadRoutine() {
}
void ServiceExecutorAdaptive::_startWorkerThread(ThreadCreationReason reason) {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
auto it = _threads.emplace(_threads.begin(), _tickSource);
auto num = _threads.size();
@@ -452,7 +452,7 @@ void ServiceExecutorAdaptive::_startWorkerThread(ThreadCreationReason reason) {
}
Milliseconds ServiceExecutorAdaptive::_getThreadJitter() const {
- static stdx::mutex jitterMutex;
+ static auto jitterMutex = MONGO_MAKE_LATCH();
static std::default_random_engine randomEngine = [] {
std::random_device seed;
return std::default_random_engine(seed());
@@ -464,7 +464,7 @@ Milliseconds ServiceExecutorAdaptive::_getThreadJitter() const {
std::uniform_int_distribution<> jitterDist(-jitterParam, jitterParam);
- stdx::lock_guard<stdx::mutex> lk(jitterMutex);
+ stdx::lock_guard<Latch> lk(jitterMutex);
auto jitter = jitterDist(randomEngine);
if (jitter > _config->workerThreadRunTime().count())
jitter = 0;
@@ -485,8 +485,8 @@ void ServiceExecutorAdaptive::_accumulateTaskMetrics(MetricsArray* outArray,
}
}
-void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(
- MetricsArray* outputMetricsArray, const stdx::unique_lock<stdx::mutex>& lk) const {
+void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(MetricsArray* outputMetricsArray,
+ const stdx::unique_lock<Latch>& lk) const {
_accumulateTaskMetrics(outputMetricsArray, _accumulatedMetrics);
for (auto& thread : _threads) {
_accumulateTaskMetrics(outputMetricsArray, thread.threadMetrics);
@@ -494,7 +494,7 @@ void ServiceExecutorAdaptive::_accumulateAllTaskMetrics(
}
TickSource::Tick ServiceExecutorAdaptive::_getThreadTimerTotal(
- ThreadTimer which, const stdx::unique_lock<stdx::mutex>& lk) const {
+ ThreadTimer which, const stdx::unique_lock<Latch>& lk) const {
TickSource::Tick accumulator;
switch (which) {
case ThreadTimer::kRunning:
@@ -539,7 +539,7 @@ void ServiceExecutorAdaptive::_workerThreadRoutine(
_accumulateTaskMetrics(&_accumulatedMetrics, state->threadMetrics);
{
- stdx::lock_guard<stdx::mutex> lk(_threadsMutex);
+ stdx::lock_guard<Latch> lk(_threadsMutex);
_threads.erase(state);
}
_deathCondition.notify_one();
@@ -631,7 +631,7 @@ StringData ServiceExecutorAdaptive::_threadStartedByToString(
}
void ServiceExecutorAdaptive::appendStats(BSONObjBuilder* bob) const {
- stdx::unique_lock<stdx::mutex> lk(_threadsMutex);
+ stdx::unique_lock<Latch> lk(_threadsMutex);
*bob << kExecutorLabel << kExecutorName //
<< kTotalQueued << _totalQueued.load() //
<< kTotalExecuted << _totalExecuted.load() //
diff --git a/src/mongo/transport/service_executor_adaptive.h b/src/mongo/transport/service_executor_adaptive.h
index 4c3b670549b..a0def9da063 100644
--- a/src/mongo/transport/service_executor_adaptive.h
+++ b/src/mongo/transport/service_executor_adaptive.h
@@ -35,7 +35,7 @@
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/service_executor_task_names.h"
@@ -138,7 +138,7 @@ private:
CumulativeTickTimer(TickSource* ts) : _timer(ts) {}
TickSource::Tick markStopped() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_running);
_running = false;
auto curTime = _timer.sinceStartTicks();
@@ -147,14 +147,14 @@ private:
}
void markRunning() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(!_running);
_timer.reset();
_running = true;
}
TickSource::Tick totalTime() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (!_running)
return _accumulator;
return _timer.sinceStartTicks() + _accumulator;
@@ -162,7 +162,7 @@ private:
private:
TickTimer _timer;
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
TickSource::Tick _accumulator = 0;
bool _running = false;
};
@@ -202,15 +202,15 @@ private:
void _accumulateTaskMetrics(MetricsArray* outArray, const MetricsArray& inputArray) const;
void _accumulateAllTaskMetrics(MetricsArray* outputMetricsArray,
- const stdx::unique_lock<stdx::mutex>& lk) const;
+ const stdx::unique_lock<Latch>& lk) const;
TickSource::Tick _getThreadTimerTotal(ThreadTimer which,
- const stdx::unique_lock<stdx::mutex>& lk) const;
+ const stdx::unique_lock<Latch>& lk) const;
ReactorHandle _reactorHandle;
std::unique_ptr<Options> _config;
- mutable stdx::mutex _threadsMutex;
+ mutable Mutex _threadsMutex = MONGO_MAKE_LATCH("ServiceExecutorAdaptive::_threadsMutex");
ThreadList _threads;
std::array<int64_t, static_cast<size_t>(ThreadCreationReason::kMax)> _threadStartCounters;
diff --git a/src/mongo/transport/service_executor_adaptive_test.cpp b/src/mongo/transport/service_executor_adaptive_test.cpp
index 8e27d91549d..e4abf9f276a 100644
--- a/src/mongo/transport/service_executor_adaptive_test.cpp
+++ b/src/mongo/transport/service_executor_adaptive_test.cpp
@@ -114,11 +114,11 @@ protected:
std::shared_ptr<asio::io_context> asioIoCtx;
- stdx::mutex mutex;
+ mutex = MONGO_MAKE_LATCH("ServiceExecutorAdaptiveFixture::mutex");
AtomicWord<int> waitFor{-1};
stdx::condition_variable cond;
std::function<void()> notifyCallback = [this] {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
invariant(waitFor.load() != -1);
waitFor.fetchAndSubtract(1);
cond.notify_one();
@@ -126,7 +126,7 @@ protected:
};
void waitForCallback(int expected, boost::optional<Milliseconds> timeout = boost::none) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
invariant(waitFor.load() != -1);
if (timeout) {
ASSERT_TRUE(cond.wait_for(
@@ -163,8 +163,8 @@ protected:
* that those threads retire when they become idle.
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -178,7 +178,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
ASSERT_OK(exec->schedule(
[this, &blockedMutex] {
notifyCallback();
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
},
ServiceExecutor::kEmptyFlags,
@@ -208,8 +208,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckTask) {
* threads are running a task for longer than the stuckThreadTimeout.
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -221,7 +221,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
auto blockedTask = [this, &blockedMutex] {
log() << "waiting on blocked mutex";
notifyCallback();
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
};
@@ -260,8 +260,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStuckThreads) {
TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
auto exec = makeAndStartExecutor<TestOptions>();
- // Mutex so we don't attempt to call schedule and shutdown concurrently
- stdx::mutex scheduleMutex;
+ // auto so = MONGO_MAKE_LATCH() we don't attempt to call schedule and shutdown concurrently
+ auto scheduleMutex = MONGO_MAKE_LATCH();
auto guard = makeGuard([&] { ASSERT_OK(exec->shutdown(config->workerThreadRunTime() * 2)); });
@@ -274,7 +274,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
stdx::this_thread::sleep_for(config->maxQueueLatency().toSystemDuration() * 5);
{
- stdx::unique_lock<stdx::mutex> lock(scheduleMutex);
+ stdx::unique_lock<Latch> lock(scheduleMutex);
if (scheduleNew) {
ASSERT_OK(exec->schedule(task,
@@ -298,7 +298,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestStarvation) {
stdx::this_thread::sleep_for(config->workerThreadRunTime().toSystemDuration() * 2);
ASSERT_EQ(exec->threadsRunning(), 2);
- stdx::unique_lock<stdx::mutex> lock(scheduleMutex);
+ stdx::unique_lock<Latch> lock(scheduleMutex);
scheduleNew = false;
}
@@ -310,7 +310,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
auto exec = makeAndStartExecutor<RecursionOptions>();
AtomicWord<int> remainingTasks{config->recursionLimit() - 1};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
std::function<void()> task;
@@ -334,7 +334,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
log() << "Completing task recursively";
};
- stdx::unique_lock<stdx::mutex> lock(mutex);
+ stdx::unique_lock<Latch> lock(mutex);
ASSERT_OK(exec->schedule(
task, ServiceExecutor::kEmptyFlags, ServiceExecutorTaskName::kSSMProcessMessage));
@@ -352,8 +352,8 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestRecursion) {
* with new normal tasks
*/
TEST_F(ServiceExecutorAdaptiveFixture, TestDeferredTasks) {
- stdx::mutex blockedMutex;
- stdx::unique_lock<stdx::mutex> blockedLock(blockedMutex);
+ auto blockedMutex = MONGO_MAKE_LATCH();
+ stdx::unique_lock<Latch> blockedLock(blockedMutex);
auto exec = makeAndStartExecutor<TestOptions>();
auto guard = makeGuard([&] {
@@ -366,7 +366,7 @@ TEST_F(ServiceExecutorAdaptiveFixture, TestDeferredTasks) {
log() << "Scheduling a blocking task";
ASSERT_OK(exec->schedule(
[this, &blockedMutex] {
- stdx::unique_lock<stdx::mutex> lk(blockedMutex);
+ stdx::unique_lock<Latch> lk(blockedMutex);
notifyCallback();
},
ServiceExecutor::kEmptyFlags,
diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp
index 24820ab1d91..902bf98d7c3 100644
--- a/src/mongo/transport/service_executor_reserved.cpp
+++ b/src/mongo/transport/service_executor_reserved.cpp
@@ -62,7 +62,7 @@ ServiceExecutorReserved::ServiceExecutorReserved(ServiceContext* ctx,
Status ServiceExecutorReserved::start() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stillRunning.store(true);
_numStartingThreads = _reservedThreads;
}
@@ -80,7 +80,7 @@ Status ServiceExecutorReserved::start() {
Status ServiceExecutorReserved::_startWorker() {
log() << "Starting new worker thread for " << _name << " service executor";
return launchServiceWorkerThread([this] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_numRunningWorkerThreads.addAndFetch(1);
auto numRunningGuard = makeGuard([&] {
_numRunningWorkerThreads.subtractAndFetch(1);
@@ -142,7 +142,7 @@ Status ServiceExecutorReserved::_startWorker() {
Status ServiceExecutorReserved::shutdown(Milliseconds timeout) {
LOG(3) << "Shutting down reserved executor";
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_stillRunning.store(false);
_threadWakeup.notify_all();
@@ -178,7 +178,7 @@ Status ServiceExecutorReserved::schedule(Task task,
return Status::OK();
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_readyTasks.push_back(std::move(task));
_threadWakeup.notify_one();
@@ -186,7 +186,7 @@ Status ServiceExecutorReserved::schedule(Task task,
}
void ServiceExecutorReserved::appendStats(BSONObjBuilder* bob) const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
*bob << kExecutorLabel << kExecutorName << kThreadsRunning
<< static_cast<int>(_numRunningWorkerThreads.loadRelaxed()) << kReadyThreads
<< static_cast<int>(_numReadyThreads) << kStartingThreads
diff --git a/src/mongo/transport/service_executor_reserved.h b/src/mongo/transport/service_executor_reserved.h
index d83a07566f5..53bd3b00ade 100644
--- a/src/mongo/transport/service_executor_reserved.h
+++ b/src/mongo/transport/service_executor_reserved.h
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/service_executor_task_names.h"
@@ -74,7 +74,7 @@ private:
AtomicWord<bool> _stillRunning{false};
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ServiceExecutorReserved::_mutex");
stdx::condition_variable _threadWakeup;
stdx::condition_variable _shutdownCondition;
diff --git a/src/mongo/transport/service_executor_synchronous.cpp b/src/mongo/transport/service_executor_synchronous.cpp
index 79fc88e0033..25104fd46dd 100644
--- a/src/mongo/transport/service_executor_synchronous.cpp
+++ b/src/mongo/transport/service_executor_synchronous.cpp
@@ -67,7 +67,7 @@ Status ServiceExecutorSynchronous::shutdown(Milliseconds timeout) {
_stillRunning.store(false);
- stdx::unique_lock<stdx::mutex> lock(_shutdownMutex);
+ stdx::unique_lock<Latch> lock(_shutdownMutex);
bool result = _shutdownCondition.wait_for(lock, timeout.toSystemDuration(), [this]() {
return _numRunningWorkerThreads.load() == 0;
});
diff --git a/src/mongo/transport/service_executor_synchronous.h b/src/mongo/transport/service_executor_synchronous.h
index ebe381d9fe2..192583bded7 100644
--- a/src/mongo/transport/service_executor_synchronous.h
+++ b/src/mongo/transport/service_executor_synchronous.h
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/service_executor.h"
#include "mongo/transport/service_executor_task_names.h"
@@ -66,7 +66,7 @@ private:
AtomicWord<bool> _stillRunning{false};
- mutable stdx::mutex _shutdownMutex;
+ mutable Mutex _shutdownMutex = MONGO_MAKE_LATCH("ServiceExecutorSynchronous::_shutdownMutex");
stdx::condition_variable _shutdownCondition;
AtomicWord<size_t> _numRunningWorkerThreads{0};
diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp
index a7482e09f17..d91cba4fbb7 100644
--- a/src/mongo/transport/service_executor_test.cpp
+++ b/src/mongo/transport/service_executor_test.cpp
@@ -178,13 +178,13 @@ protected:
void scheduleBasicTask(ServiceExecutor* exec, bool expectSuccess) {
stdx::condition_variable cond;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
auto task = [&cond, &mutex] {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cond.notify_all();
};
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
auto status = exec->schedule(
std::move(task), ServiceExecutor::kEmptyFlags, ServiceExecutorTaskName::kSSMStartSession);
if (expectSuccess) {
diff --git a/src/mongo/transport/service_state_machine.h b/src/mongo/transport/service_state_machine.h
index 840204e30f8..8125fd941b3 100644
--- a/src/mongo/transport/service_state_machine.h
+++ b/src/mongo/transport/service_state_machine.h
@@ -37,7 +37,7 @@
#include "mongo/config.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/message_compressor_base.h"
#include "mongo/transport/service_entry_point.h"
diff --git a/src/mongo/transport/service_state_machine_test.cpp b/src/mongo/transport/service_state_machine_test.cpp
index 02447e5a289..2cc54156c6b 100644
--- a/src/mongo/transport/service_state_machine_test.cpp
+++ b/src/mongo/transport/service_state_machine_test.cpp
@@ -282,19 +282,19 @@ private:
class SimpleEvent {
public:
void signal() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_signaled = true;
_cond.notify_one();
}
void wait() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cond.wait(lk, [this] { return _signaled; });
_signaled = false;
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SimpleEvent::_mutex");
stdx::condition_variable _cond;
bool _signaled = false;
};
diff --git a/src/mongo/transport/session_asio.h b/src/mongo/transport/session_asio.h
index 434371863e5..34547494aeb 100644
--- a/src/mongo/transport/session_asio.h
+++ b/src/mongo/transport/session_asio.h
@@ -222,7 +222,7 @@ protected:
#ifdef MONGO_CONFIG_SSL
// The unique_lock here is held by TransportLayerASIO to synchronize with the asyncConnect
// timeout callback. It will be unlocked before the SSL actually handshake begins.
- Future<void> handshakeSSLForEgressWithLock(stdx::unique_lock<stdx::mutex> lk,
+ Future<void> handshakeSSLForEgressWithLock(stdx::unique_lock<Latch> lk,
const HostAndPort& target) {
if (!_tl->_egressSSLContext) {
return Future<void>::makeReady(Status(ErrorCodes::SSLHandshakeFailed,
@@ -254,8 +254,8 @@ protected:
// For synchronous connections where we don't have an async timer, just take a dummy lock and
// pass it to the WithLock version of handshakeSSLForEgress
Future<void> handshakeSSLForEgress(const HostAndPort& target) {
- stdx::mutex mutex;
- return handshakeSSLForEgressWithLock(stdx::unique_lock<stdx::mutex>(mutex), target);
+ auto mutex = MONGO_MAKE_LATCH();
+ return handshakeSSLForEgressWithLock(stdx::unique_lock<Latch>(mutex), target);
}
#endif
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 58153a870e4..c2aef1e19e2 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -530,7 +530,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
AtomicWord<bool> done{false};
Promise<SessionHandle> promise;
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("AsyncConnectState::mutex");
GenericSocket socket;
ASIOReactorTimer timeoutTimer;
WrappedResolver resolver;
@@ -562,7 +562,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
connector->resolvedEndpoint));
std::error_code ec;
- stdx::lock_guard<stdx::mutex> lk(connector->mutex);
+ stdx::lock_guard<Latch> lk(connector->mutex);
connector->resolver.cancel();
if (connector->session) {
connector->session->end();
@@ -583,7 +583,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
<< " took " << timeAfter - timeBefore;
}
- stdx::lock_guard<stdx::mutex> lk(connector->mutex);
+ stdx::lock_guard<Latch> lk(connector->mutex);
connector->resolvedEndpoint = results.front();
connector->socket.open(connector->resolvedEndpoint->protocol());
@@ -595,7 +595,7 @@ Future<SessionHandle> TransportLayerASIO::asyncConnect(HostAndPort peer,
return connector->socket.async_connect(*connector->resolvedEndpoint, UseFuture{});
})
.then([this, connector, sslMode]() -> Future<void> {
- stdx::unique_lock<stdx::mutex> lk(connector->mutex);
+ stdx::unique_lock<Latch> lk(connector->mutex);
connector->session =
std::make_shared<ASIOSession>(this, std::move(connector->socket), false);
connector->session->ensureAsync();
@@ -780,7 +780,7 @@ Status TransportLayerASIO::setup() {
}
Status TransportLayerASIO::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running.store(true);
if (_listenerOptions.isIngress()) {
@@ -812,7 +812,7 @@ Status TransportLayerASIO::start() {
}
void TransportLayerASIO::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running.store(false);
// Loop through the acceptors and cancel their calls to async_accept. This will prevent new
diff --git a/src/mongo/transport/transport_layer_asio.h b/src/mongo/transport/transport_layer_asio.h
index 90008fe3c89..fef6aeecea9 100644
--- a/src/mongo/transport/transport_layer_asio.h
+++ b/src/mongo/transport/transport_layer_asio.h
@@ -36,8 +36,8 @@
#include "mongo/base/status_with.h"
#include "mongo/config.h"
#include "mongo/db/server_options.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/transport/transport_mode.h"
@@ -160,7 +160,7 @@ private:
SSLParams::SSLModes _sslMode() const;
#endif
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TransportLayerASIO::_mutex");
// There are three reactors that are used by TransportLayerASIO. The _ingressReactor contains
// all the accepted sockets and all ingress networking activity. The _acceptorReactor contains
diff --git a/src/mongo/transport/transport_layer_asio_test.cpp b/src/mongo/transport/transport_layer_asio_test.cpp
index 08dcd99dcae..53f979d9cd8 100644
--- a/src/mongo/transport/transport_layer_asio_test.cpp
+++ b/src/mongo/transport/transport_layer_asio_test.cpp
@@ -48,7 +48,7 @@ namespace {
class ServiceEntryPointUtil : public ServiceEntryPoint {
public:
void startSession(transport::SessionHandle session) override {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_sessions.push_back(std::move(session));
log() << "started session";
_cv.notify_one();
@@ -58,7 +58,7 @@ public:
log() << "end all sessions";
std::vector<transport::SessionHandle> old_sessions;
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
old_sessions.swap(_sessions);
}
old_sessions.clear();
@@ -75,7 +75,7 @@ public:
void appendStats(BSONObjBuilder*) const override {}
size_t numOpenSessions() const override {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return _sessions.size();
}
@@ -88,12 +88,12 @@ public:
}
void waitForConnect() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_cv.wait(lock, [&] { return !_sessions.empty(); });
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("::_mutex");
stdx::condition_variable _cv;
std::vector<transport::SessionHandle> _sessions;
transport::TransportLayer* _transport = nullptr;
@@ -107,7 +107,7 @@ public:
SockAddr sa{"localhost", _port, AF_INET};
s.connect(sa);
log() << "connection: port " << _port;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_cv.wait(lk, [&] { return _stop; });
log() << "connection: Rx stop request";
}};
@@ -115,7 +115,7 @@ public:
void stop() {
{
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_stop = true;
}
log() << "connection: Tx stop request";
@@ -125,7 +125,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("SimpleConnectionThread::_mutex");
stdx::condition_variable _cv;
stdx::thread _thr;
bool _stop = false;
@@ -196,7 +196,7 @@ public:
}
bool waitForTimeout(boost::optional<Milliseconds> timeout = boost::none) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
bool ret = true;
if (timeout) {
ret = _cond.wait_for(lk, timeout->toSystemDuration(), [this] { return _finished; });
@@ -210,7 +210,7 @@ public:
protected:
void notifyComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_finished = true;
_cond.notify_one();
}
@@ -221,7 +221,7 @@ protected:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TimeoutSEP::_mutex");
stdx::condition_variable _cond;
bool _finished = false;
diff --git a/src/mongo/transport/transport_layer_manager.cpp b/src/mongo/transport/transport_layer_manager.cpp
index dcc91cf3e3d..57a49fb563f 100644
--- a/src/mongo/transport/transport_layer_manager.cpp
+++ b/src/mongo/transport/transport_layer_manager.cpp
@@ -53,7 +53,7 @@ TransportLayerManager::TransportLayerManager() = default;
template <typename Callable>
void TransportLayerManager::_foreach(Callable&& cb) const {
{
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
for (auto&& tl : _tls) {
cb(tl.get());
}
@@ -111,7 +111,7 @@ Status TransportLayerManager::setup() {
Status TransportLayerManager::addAndStartTransportLayer(std::unique_ptr<TransportLayer> tl) {
auto ptr = tl.get();
{
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
_tls.emplace_back(std::move(tl));
}
return ptr->start();
diff --git a/src/mongo/transport/transport_layer_manager.h b/src/mongo/transport/transport_layer_manager.h
index 1dd5ef38527..3bc0e6ba5c6 100644
--- a/src/mongo/transport/transport_layer_manager.h
+++ b/src/mongo/transport/transport_layer_manager.h
@@ -32,7 +32,7 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/transport/session.h"
#include "mongo/transport/transport_layer.h"
#include "mongo/util/time_support.h"
@@ -91,7 +91,7 @@ public:
static std::unique_ptr<TransportLayer> makeAndStartDefaultEgressTransportLayer();
BatonHandle makeBaton(OperationContext* opCtx) const override {
- stdx::lock_guard<stdx::mutex> lk(_tlsMutex);
+ stdx::lock_guard<Latch> lk(_tlsMutex);
// TODO: figure out what to do about managers with more than one transport layer.
invariant(_tls.size() == 1);
return _tls[0]->makeBaton(opCtx);
@@ -101,7 +101,7 @@ private:
template <typename Callable>
void _foreach(Callable&& cb) const;
- mutable stdx::mutex _tlsMutex;
+ mutable Mutex _tlsMutex = MONGO_MAKE_LATCH("TransportLayerManager::_tlsMutex");
std::vector<std::unique_ptr<TransportLayer>> _tls;
};
diff --git a/src/mongo/unittest/barrier.h b/src/mongo/unittest/barrier.h
index 6b3d102fc6f..4332b2480d7 100644
--- a/src/mongo/unittest/barrier.h
+++ b/src/mongo/unittest/barrier.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
namespace unittest {
@@ -60,7 +60,7 @@ private:
size_t _threadCount;
size_t _threadsWaiting;
uint64_t _generation;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
stdx::condition_variable _condition;
};
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index 1aee738eb4a..83f7fa89434 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -46,7 +46,7 @@
#include "mongo/logger/logger.h"
#include "mongo/logger/message_event_utf8_encoder.h"
#include "mongo/logger/message_log_domain.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
@@ -251,7 +251,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
bool _enabled = false;
logger::MessageEventDetailsEncoder _encoder;
std::vector<std::string>* _lines;
diff --git a/src/mongo/util/alarm.cpp b/src/mongo/util/alarm.cpp
index b3236a9ef5b..95a3a88dfd3 100644
--- a/src/mongo/util/alarm.cpp
+++ b/src/mongo/util/alarm.cpp
@@ -50,7 +50,7 @@ public:
return {ErrorCodes::ShutdownInProgress, "The alarm scheduler was shutdown"};
}
- stdx::unique_lock<stdx::mutex> lk(service->_mutex);
+ stdx::unique_lock<Latch> lk(service->_mutex);
if (_done) {
return {ErrorCodes::AlarmAlreadyFulfilled, "The alarm has already been canceled"};
}
@@ -80,7 +80,7 @@ AlarmSchedulerPrecise::~AlarmSchedulerPrecise() {
}
AlarmScheduler::Alarm AlarmSchedulerPrecise::alarmAt(Date_t date) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (_shutdown) {
Alarm ret;
ret.future = Future<void>::makeReady(
@@ -107,7 +107,7 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
std::vector<Promise<void>> toExpire;
AlarmMapIt it;
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
for (it = _alarms.begin(); it != _alarms.end();) {
if (hook && !(*hook)(processed + 1)) {
break;
@@ -135,22 +135,22 @@ void AlarmSchedulerPrecise::processExpiredAlarms(
}
Date_t AlarmSchedulerPrecise::nextAlarm() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return (_alarms.empty()) ? Date_t::max() : _alarms.begin()->first;
}
void AlarmSchedulerPrecise::clearAllAlarms() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_clearAllAlarmsImpl(lk);
}
void AlarmSchedulerPrecise::clearAllAlarmsAndShutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown = true;
_clearAllAlarmsImpl(lk);
}
-void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk) {
+void AlarmSchedulerPrecise::_clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk) {
std::vector<Promise<void>> toExpire;
for (AlarmMapIt it = _alarms.begin(); it != _alarms.end();) {
toExpire.push_back(std::move(it->second.promise));
diff --git a/src/mongo/util/alarm.h b/src/mongo/util/alarm.h
index 449284a3b21..9727a133e69 100644
--- a/src/mongo/util/alarm.h
+++ b/src/mongo/util/alarm.h
@@ -32,7 +32,7 @@
#include <memory>
#include "mongo/base/status.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/functional.h"
#include "mongo/util/future.h"
@@ -185,9 +185,9 @@ private:
using AlarmMap = std::multimap<Date_t, AlarmData>;
using AlarmMapIt = AlarmMap::iterator;
- void _clearAllAlarmsImpl(stdx::unique_lock<stdx::mutex>& lk);
+ void _clearAllAlarmsImpl(stdx::unique_lock<Latch>& lk);
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmSchedulerPrecise::_mutex");
bool _shutdown = false;
AlarmMap _alarms;
};
diff --git a/src/mongo/util/alarm_runner_background_thread.cpp b/src/mongo/util/alarm_runner_background_thread.cpp
index 4d22f84e87d..d0a27927246 100644
--- a/src/mongo/util/alarm_runner_background_thread.cpp
+++ b/src/mongo/util/alarm_runner_background_thread.cpp
@@ -34,13 +34,13 @@
namespace mongo {
void AlarmRunnerBackgroundThread::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_running = true;
_thread = stdx::thread(&AlarmRunnerBackgroundThread::_threadRoutine, this);
}
void AlarmRunnerBackgroundThread::shutdown() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_running = false;
lk.unlock();
_condVar.notify_one();
@@ -56,7 +56,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
invariant(!schedulers.empty());
const auto registerHook = [this](Date_t next, const std::shared_ptr<AlarmScheduler>& which) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (next >= _nextAlarm) {
return;
}
@@ -81,7 +81,7 @@ AlarmRunnerBackgroundThread::_initializeSchedulers(std::vector<AlarmSchedulerHan
}
void AlarmRunnerBackgroundThread::_threadRoutine() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_running) {
const auto clockSource = _schedulers.front()->clockSource();
const auto now = clockSource->now();
diff --git a/src/mongo/util/alarm_runner_background_thread.h b/src/mongo/util/alarm_runner_background_thread.h
index 179f6350480..5f2b5d486cc 100644
--- a/src/mongo/util/alarm_runner_background_thread.h
+++ b/src/mongo/util/alarm_runner_background_thread.h
@@ -28,7 +28,7 @@
*/
#pragma once
-#include "mongo/stdx/condition_variable.h"
+#include "mongo/platform/condition_variable.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/alarm.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -64,7 +64,7 @@ private:
void _threadRoutine();
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("AlarmRunnerBackgroundThread::_mutex");
stdx::condition_variable _condVar;
bool _running = false;
Date_t _nextAlarm = Date_t::max();
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index e5b40238230..7f34141dfad 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -36,8 +36,8 @@
#include <functional>
#include "mongo/config.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/idle_thread_block.h"
#include "mongo/util/concurrency/mutex.h"
@@ -80,7 +80,7 @@ private:
void _runTask(PeriodicTask* task);
// _mutex protects the _shutdownRequested flag and the _tasks vector.
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicTaskRunner::_mutex");
// The condition variable is used to sleep for the interval between task
// executions, and is notified when the _shutdownRequested flag is toggled.
@@ -129,7 +129,7 @@ bool runnerDestroyed = false;
struct BackgroundJob::JobStatus {
JobStatus() : state(NotStarted) {}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("JobStatus::mutex");
stdx::condition_variable done;
State state;
};
@@ -154,7 +154,7 @@ void BackgroundJob::jobBody() {
{
// It is illegal to access any state owned by this BackgroundJob after leaving this
// scope, with the exception of the call to 'delete this' below.
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
_status->state = Done;
_status->done.notify_all();
}
@@ -164,7 +164,7 @@ void BackgroundJob::jobBody() {
}
void BackgroundJob::go() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
massert(17234,
str::stream() << "backgroundJob already running: " << name(),
_status->state != Running);
@@ -178,7 +178,7 @@ void BackgroundJob::go() {
}
Status BackgroundJob::cancel() {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
if (_status->state == Running)
return Status(ErrorCodes::IllegalOperation, "Cannot cancel a running BackgroundJob");
@@ -194,7 +194,7 @@ Status BackgroundJob::cancel() {
bool BackgroundJob::wait(unsigned msTimeOut) {
verify(!_selfDelete); // you cannot call wait on a self-deleting job
const auto deadline = Date_t::now() + Milliseconds(msTimeOut);
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
while (_status->state != Done) {
if (msTimeOut) {
if (stdx::cv_status::timeout ==
@@ -208,12 +208,12 @@ bool BackgroundJob::wait(unsigned msTimeOut) {
}
BackgroundJob::State BackgroundJob::getState() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state;
}
bool BackgroundJob::running() const {
- stdx::unique_lock<stdx::mutex> l(_status->mutex);
+ stdx::unique_lock<Latch> l(_status->mutex);
return _status->state == Running;
}
@@ -268,12 +268,12 @@ Status PeriodicTask::stopRunningPeriodicTasks(int gracePeriodMillis) {
}
void PeriodicTaskRunner::add(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_tasks.push_back(task);
}
void PeriodicTaskRunner::remove(PeriodicTask* task) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
for (size_t i = 0; i != _tasks.size(); i++) {
if (_tasks[i] == task) {
_tasks[i] = nullptr;
@@ -284,7 +284,7 @@ void PeriodicTaskRunner::remove(PeriodicTask* task) {
Status PeriodicTaskRunner::stop(int gracePeriodMillis) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_shutdownRequested = true;
_cond.notify_one();
}
@@ -300,7 +300,7 @@ void PeriodicTaskRunner::run() {
// Use a shorter cycle time in debug mode to help catch race conditions.
const Seconds waitTime(kDebugBuild ? 5 : 60);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_shutdownRequested) {
{
MONGO_IDLE_THREAD_BLOCK;
diff --git a/src/mongo/util/background_job_test.cpp b/src/mongo/util/background_job_test.cpp
index efca4fdbfa1..f95090d11a3 100644
--- a/src/mongo/util/background_job_test.cpp
+++ b/src/mongo/util/background_job_test.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/background.h"
@@ -114,7 +114,7 @@ TEST(BackgroundJobLifeCycle, Go) {
virtual void run() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
ASSERT_FALSE(_hasRun);
_hasRun = true;
}
@@ -127,7 +127,7 @@ TEST(BackgroundJobLifeCycle, Go) {
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("Job::_mutex");
bool _hasRun;
Notification<void> _n;
};
diff --git a/src/mongo/util/background_thread_clock_source.h b/src/mongo/util/background_thread_clock_source.h
index 4d106780601..b7c8feed705 100644
--- a/src/mongo/util/background_thread_clock_source.h
+++ b/src/mongo/util/background_thread_clock_source.h
@@ -34,8 +34,8 @@
#include <thread>
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/time_support.h"
@@ -93,7 +93,7 @@ private:
const Milliseconds _granularity;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
stdx::condition_variable _condition;
bool _inShutdown = false;
bool _started = false;
diff --git a/src/mongo/util/clock_source.cpp b/src/mongo/util/clock_source.cpp
index 6e81e0708fa..6f54a1bcc76 100644
--- a/src/mongo/util/clock_source.cpp
+++ b/src/mongo/util/clock_source.cpp
@@ -27,10 +27,10 @@
* it in the license file.
*/
+#include "mongo/util/clock_source.h"
#include "mongo/platform/basic.h"
-
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
-#include "mongo/util/clock_source.h"
#include "mongo/util/waitable.h"
namespace mongo {
@@ -55,7 +55,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
}
struct AlarmInfo {
- stdx::mutex controlMutex;
+ Mutex controlMutex = MONGO_MAKE_LATCH("AlarmInfo::controlMutex");
BasicLockableAdapter* waitLock;
stdx::condition_variable* waitCV;
stdx::cv_status cvWaitResult = stdx::cv_status::no_timeout;
@@ -66,7 +66,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
const auto waiterThreadId = stdx::this_thread::get_id();
bool invokedAlarmInline = false;
invariant(setAlarm(deadline, [alarmInfo, waiterThreadId, &invokedAlarmInline] {
- stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
+ stdx::lock_guard<Latch> controlLk(alarmInfo->controlMutex);
alarmInfo->cvWaitResult = stdx::cv_status::timeout;
if (!alarmInfo->waitLock) {
return;
@@ -86,7 +86,7 @@ stdx::cv_status ClockSource::waitForConditionUntil(stdx::condition_variable& cv,
Waitable::wait(waitable, this, cv, m);
}
m.unlock();
- stdx::lock_guard<stdx::mutex> controlLk(alarmInfo->controlMutex);
+ stdx::lock_guard<Latch> controlLk(alarmInfo->controlMutex);
m.lock();
alarmInfo->waitLock = nullptr;
alarmInfo->waitCV = nullptr;
diff --git a/src/mongo/util/clock_source.h b/src/mongo/util/clock_source.h
index 34b004f2712..c27d22b1dd3 100644
--- a/src/mongo/util/clock_source.h
+++ b/src/mongo/util/clock_source.h
@@ -39,7 +39,7 @@
namespace mongo {
-class Date_t;
+class Waitable;
/**
* An interface for getting the current wall clock time.
diff --git a/src/mongo/util/clock_source_mock.cpp b/src/mongo/util/clock_source_mock.cpp
index d05eb765722..0319e67b481 100644
--- a/src/mongo/util/clock_source_mock.cpp
+++ b/src/mongo/util/clock_source_mock.cpp
@@ -29,6 +29,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source_mock.h"
#include <algorithm>
diff --git a/src/mongo/util/clock_source_mock.h b/src/mongo/util/clock_source_mock.h
index 24c6851a240..689a03832f7 100644
--- a/src/mongo/util/clock_source_mock.h
+++ b/src/mongo/util/clock_source_mock.h
@@ -69,7 +69,7 @@ private:
using Alarm = std::pair<Date_t, unique_function<void()>>;
void _processAlarms(stdx::unique_lock<stdx::mutex> lk);
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
Date_t _now{Date_t::fromMillisSinceEpoch(1)};
std::vector<Alarm> _alarms;
};
diff --git a/src/mongo/util/concurrency/notification.h b/src/mongo/util/concurrency/notification.h
index 44bc7efc9ac..50e6b5e2302 100644
--- a/src/mongo/util/concurrency/notification.h
+++ b/src/mongo/util/concurrency/notification.h
@@ -32,8 +32,8 @@
#include <boost/optional.hpp>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/duration.h"
#include "mongo/util/time_support.h"
@@ -59,7 +59,7 @@ public:
* block).
*/
explicit operator bool() const {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return !!_value;
}
@@ -68,7 +68,7 @@ public:
* If the wait is interrupted, throws an exception.
*/
T& get(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
opCtx->waitForConditionOrInterrupt(_condVar, lock, [this]() -> bool { return !!_value; });
return _value.get();
}
@@ -78,7 +78,7 @@ public:
* This variant of get cannot be interrupted.
*/
T& get() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_value) {
_condVar.wait(lock);
}
@@ -91,7 +91,7 @@ public:
* call. Must only be called once for the lifetime of the notification.
*/
void set(T value) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(!_value);
_value = std::move(value);
_condVar.notify_all();
@@ -104,13 +104,13 @@ public:
* If the wait is interrupted, throws an exception.
*/
bool waitFor(OperationContext* opCtx, Milliseconds waitTimeout) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
return opCtx->waitForConditionOrInterruptFor(
_condVar, lock, waitTimeout, [&]() { return !!_value; });
}
private:
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("Notification::_mutex");
stdx::condition_variable _condVar;
// Protected by mutex and only moves from not-set to set once
diff --git a/src/mongo/util/concurrency/spin_lock.h b/src/mongo/util/concurrency/spin_lock.h
index 7f237dc3175..5c5a17b4b74 100644
--- a/src/mongo/util/concurrency/spin_lock.h
+++ b/src/mongo/util/concurrency/spin_lock.h
@@ -37,7 +37,7 @@
#include "mongo/config.h"
#include "mongo/platform/compiler.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -86,7 +86,7 @@ public:
}
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
};
#else
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index fd8d23377ea..ceaf9fcaf7e 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -79,7 +79,7 @@ ThreadPool::Options cleanUpOptions(ThreadPool::Options&& options) {
ThreadPool::ThreadPool(Options options) : _options(cleanUpOptions(std::move(options))) {}
ThreadPool::~ThreadPool() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_shutdown_inlock();
if (shutdownComplete != _state) {
_join_inlock(&lk);
@@ -94,7 +94,7 @@ ThreadPool::~ThreadPool() {
}
void ThreadPool::startup() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
if (_state != preStart) {
severe() << "Attempting to start pool " << _options.poolName
<< ", but it has already started";
@@ -110,7 +110,7 @@ void ThreadPool::startup() {
}
void ThreadPool::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_shutdown_inlock();
}
@@ -130,11 +130,11 @@ void ThreadPool::_shutdown_inlock() {
}
void ThreadPool::join() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_join_inlock(&lk);
}
-void ThreadPool::_join_inlock(stdx::unique_lock<stdx::mutex>* lk) {
+void ThreadPool::_join_inlock(stdx::unique_lock<Latch>* lk) {
_stateChange.wait(*lk, [this] {
switch (_state) {
case preStart:
@@ -177,7 +177,7 @@ void ThreadPool::_drainPendingTasks() {
<< _options.threadNamePrefix << _nextThreadId++;
setThreadName(threadName);
_options.onCreateThread(threadName);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (!_pendingTasks.empty()) {
_doOneTask(&lock);
}
@@ -186,7 +186,7 @@ void ThreadPool::_drainPendingTasks() {
}
void ThreadPool::schedule(Task task) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
switch (_state) {
case joinRequired:
@@ -221,7 +221,7 @@ void ThreadPool::schedule(Task task) {
}
void ThreadPool::waitForIdle() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
// If there are any pending tasks, or non-idle threads, the pool is not idle.
while (!_pendingTasks.empty() || _numIdleThreads < _threads.size()) {
_poolIsIdle.wait(lk);
@@ -229,7 +229,7 @@ void ThreadPool::waitForIdle() {
}
ThreadPool::Stats ThreadPool::getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats result;
result.options = _options;
result.numThreads = _threads.size();
@@ -257,7 +257,7 @@ void ThreadPool::_workerThreadBody(ThreadPool* pool, const std::string& threadNa
}
void ThreadPool::_consumeTasks() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_state == running) {
if (_pendingTasks.empty()) {
if (_threads.size() > _options.minThreads) {
@@ -331,7 +331,7 @@ void ThreadPool::_consumeTasks() {
fassertFailedNoTrace(28703);
}
-void ThreadPool::_doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept {
+void ThreadPool::_doOneTask(stdx::unique_lock<Latch>* lk) noexcept {
invariant(!_pendingTasks.empty());
LOG(3) << "Executing a task on behalf of pool " << _options.poolName;
Task task = std::move(_pendingTasks.front());
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
index bbae97d1ebe..c382df9544d 100644
--- a/src/mongo/util/concurrency/thread_pool.h
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -34,8 +34,8 @@
#include <string>
#include <vector>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
#include "mongo/util/time_support.h"
@@ -189,7 +189,7 @@ private:
/**
* Implementation of join once _mutex is owned by "lk".
*/
- void _join_inlock(stdx::unique_lock<stdx::mutex>* lk);
+ void _join_inlock(stdx::unique_lock<Latch>* lk);
/**
* Runs the remaining tasks on a new thread as part of the join process, blocking until
@@ -201,7 +201,7 @@ private:
* Executes one task from _pendingTasks. "lk" must own _mutex, and _pendingTasks must have at
* least one entry.
*/
- void _doOneTask(stdx::unique_lock<stdx::mutex>* lk) noexcept;
+ void _doOneTask(stdx::unique_lock<Latch>* lk) noexcept;
/**
* Changes the lifecycle state (_state) of the pool and wakes up any threads waiting for a state
@@ -213,7 +213,7 @@ private:
const Options _options;
// Mutex guarding all non-const member variables.
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ThreadPool::_mutex");
// This variable represents the lifecycle state of the pool.
//
diff --git a/src/mongo/util/concurrency/thread_pool_test.cpp b/src/mongo/util/concurrency/thread_pool_test.cpp
index 1d85b8b95df..5812a860eab 100644
--- a/src/mongo/util/concurrency/thread_pool_test.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test.cpp
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
#include "mongo/base/init.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/death_test.h"
@@ -70,7 +70,7 @@ protected:
}
void blockingWork() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
++count1;
cv1.notify_all();
while (!flag2) {
@@ -78,7 +78,7 @@ protected:
}
}
- stdx::mutex mutex;
+ Mutex mutex = MONGO_MAKE_LATCH("ThreadPoolTest::mutex");
stdx::condition_variable cv1;
stdx::condition_variable cv2;
size_t count1 = 0U;
@@ -86,7 +86,7 @@ protected:
private:
void tearDown() override {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
flag2 = true;
cv2.notify_all();
lk.unlock();
@@ -103,7 +103,7 @@ TEST_F(ThreadPoolTest, MinPoolSize0) {
auto& pool = makePool(options);
pool.startup();
ASSERT_EQ(0U, pool.getStats().numThreads);
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool.schedule([this](auto status) {
ASSERT_OK(status);
blockingWork();
@@ -155,7 +155,7 @@ TEST_F(ThreadPoolTest, MaxPoolSize20MinPoolSize15) {
options.maxIdleThreadAge = Milliseconds(100);
auto& pool = makePool(options);
pool.startup();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
for (size_t i = 0U; i < 30U; ++i) {
pool.schedule([this, i](auto status) {
ASSERT_OK(status) << i;
@@ -223,7 +223,7 @@ DEATH_TEST(ThreadPoolTest,
// mutex-lock is blocked waiting for the mutex, so the independent thread must be blocked inside
// of join(), until the pool thread finishes. At this point, if we destroy the pool, its
// destructor should trigger a fatal error due to double-join.
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
ThreadPool::Options options;
options.minThreads = 2;
options.poolName = "DoubleJoinPool";
@@ -233,10 +233,10 @@ DEATH_TEST(ThreadPoolTest,
while (pool->getStats().numThreads < 2U) {
sleepmillis(50);
}
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
pool->schedule([&mutex](auto status) {
ASSERT_OK(status);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
});
stdx::thread t([&pool] {
pool->shutdown();
diff --git a/src/mongo/util/concurrency/thread_pool_test_common.cpp b/src/mongo/util/concurrency/thread_pool_test_common.cpp
index 5f32e649c65..33ff3cb303b 100644
--- a/src/mongo/util/concurrency/thread_pool_test_common.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test_common.cpp
@@ -35,8 +35,8 @@
#include <memory>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/death_test.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/thread_pool_interface.h"
@@ -203,10 +203,10 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
auto& pool = getThreadPool();
std::function<void()> func;
std::size_t n = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable condvar;
func = [&pool, &n, &func, &condvar, &mutex, depth]() {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
if (n < depth) {
n++;
lk.unlock();
@@ -223,7 +223,7 @@ COMMON_THREAD_POOL_TEST(RepeatedScheduleDoesntSmashStack) {
pool.startup();
pool.join();
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
condvar.wait(lk, [&n, depth] { return n == depth; });
}
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index e30746807ae..a6abd154b2e 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -128,7 +128,7 @@ void TicketHolder::release() {
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_resizeMutex);
+ stdx::lock_guard<Latch> lk(_resizeMutex);
if (newSize < 5)
return Status(ErrorCodes::BadValue,
@@ -174,12 +174,12 @@ TicketHolder::TicketHolder(int num) : _outof(num), _num(num) {}
TicketHolder::~TicketHolder() = default;
bool TicketHolder::tryAcquire() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _tryAcquire();
}
void TicketHolder::waitForTicket(OperationContext* opCtx) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
opCtx->waitForConditionOrInterrupt(_newTicket, lk, [this] { return _tryAcquire(); });
@@ -189,7 +189,7 @@ void TicketHolder::waitForTicket(OperationContext* opCtx) {
}
bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
if (opCtx) {
return opCtx->waitForConditionOrInterruptUntil(
@@ -202,14 +202,14 @@ bool TicketHolder::waitForTicketUntil(OperationContext* opCtx, Date_t until) {
void TicketHolder::release() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_num++;
}
_newTicket.notify_one();
}
Status TicketHolder::resize(int newSize) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
int used = _outof.load() - _num;
if (used > newSize) {
diff --git a/src/mongo/util/concurrency/ticketholder.h b/src/mongo/util/concurrency/ticketholder.h
index 8ab3d4a39d9..d67e6bd04d1 100644
--- a/src/mongo/util/concurrency/ticketholder.h
+++ b/src/mongo/util/concurrency/ticketholder.h
@@ -33,8 +33,8 @@
#endif
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/time_support.h"
@@ -87,13 +87,13 @@ private:
// You can read _outof without a lock, but have to hold _resizeMutex to change.
AtomicWord<int> _outof;
- stdx::mutex _resizeMutex;
+ Mutex _resizeMutex = MONGO_MAKE_LATCH("TicketHolder::_resizeMutex");
#else
bool _tryAcquire();
AtomicWord<int> _outof;
int _num;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TicketHolder::_mutex");
stdx::condition_variable _newTicket;
#endif
};
diff --git a/src/mongo/util/concurrency/with_lock.h b/src/mongo/util/concurrency/with_lock.h
index d5c55a16cb3..9d7f24bed8e 100644
--- a/src/mongo/util/concurrency/with_lock.h
+++ b/src/mongo/util/concurrency/with_lock.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include <utility>
@@ -56,7 +56,7 @@ namespace mongo {
*
* A call to such a function looks like this:
*
- * stdx::lock_guard<stdx::mutex> lk(_mutex);
+ * stdx::lock_guard<Latch> lk(_mutex);
* _clobber(lk, opCtx); // instead of _clobber_inlock(opCtx)
*
* Note that the formal argument need not (and should not) be named unless it is needed to pass
@@ -68,11 +68,11 @@ namespace mongo {
*
*/
struct WithLock {
- template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex> const&) noexcept {}
+ template <typename LatchT>
+ WithLock(stdx::lock_guard<LatchT> const&) noexcept {}
- template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex> const& lock) noexcept {
+ template <typename LatchT>
+ WithLock(stdx::unique_lock<LatchT> const& lock) noexcept {
invariant(lock.owns_lock());
}
@@ -88,9 +88,9 @@ struct WithLock {
// No moving a lock_guard<> or unique_lock<> in.
template <typename Mutex>
- WithLock(stdx::lock_guard<Mutex>&&) = delete;
+ WithLock(stdx::lock_guard<Latch>&&) = delete;
template <typename Mutex>
- WithLock(stdx::unique_lock<Mutex>&&) = delete;
+ WithLock(stdx::unique_lock<Latch>&&) = delete;
/*
* Produces a WithLock without benefit of any actual lock, for use in cases where a lock is not
diff --git a/src/mongo/util/concurrency/with_lock_test.cpp b/src/mongo/util/concurrency/with_lock_test.cpp
index 0bfe2b3829e..5724f899471 100644
--- a/src/mongo/util/concurrency/with_lock_test.cpp
+++ b/src/mongo/util/concurrency/with_lock_test.cpp
@@ -31,7 +31,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/log.h"
@@ -46,15 +46,15 @@ struct Beerp {
explicit Beerp(int i) {
_blerp(WithLock::withoutLock(), i);
}
- Beerp(stdx::lock_guard<stdx::mutex> const& lk, int i) {
+ Beerp(stdx::lock_guard<Latch> const& lk, int i) {
_blerp(lk, i);
}
int bleep(char n) {
- stdx::lock_guard<stdx::mutex> lk(_m);
+ stdx::lock_guard<Latch> lk(_m);
return _bloop(lk, n - '0');
}
int bleep(int i) {
- stdx::unique_lock<stdx::mutex> lk(_m);
+ stdx::unique_lock<Latch> lk(_m);
return _bloop(lk, i);
}
@@ -66,7 +66,7 @@ private:
log() << i << " bleep" << (i == 1 ? "\n" : "s\n");
return i;
}
- stdx::mutex _m;
+ Mutex _m = MONGO_MAKE_LATCH("Beerp::_m");
};
TEST(WithLockTest, OverloadSet) {
@@ -74,8 +74,8 @@ TEST(WithLockTest, OverloadSet) {
ASSERT_EQ(1, b.bleep('1'));
ASSERT_EQ(2, b.bleep(2));
- stdx::mutex m;
- stdx::lock_guard<stdx::mutex> lk(m);
+ auto m = MONGO_MAKE_LATCH();
+ stdx::lock_guard<Latch> lk(m);
Beerp(lk, 3);
}
diff --git a/src/mongo/util/diagnostic_info.cpp b/src/mongo/util/diagnostic_info.cpp
index 9681414e797..ea6cbe376df 100644
--- a/src/mongo/util/diagnostic_info.cpp
+++ b/src/mongo/util/diagnostic_info.cpp
@@ -33,12 +33,17 @@
#include "mongo/util/diagnostic_info.h"
+#include "mongo/config.h"
+
#if defined(__linux__)
#include <elf.h>
-#include <execinfo.h>
#include <link.h>
#endif
+#if defined(MONGO_CONFIG_HAVE_EXECINFO_BACKTRACE)
+#include <execinfo.h>
+#endif
+
#include <fmt/format.h>
#include <fmt/ostream.h>
@@ -52,8 +57,6 @@
using namespace fmt::literals;
namespace mongo {
-// Maximum number of stack frames to appear in a backtrace.
-const unsigned int kMaxBackTraceFrames = 100;
namespace {
MONGO_FAIL_POINT_DEFINE(currentOpSpawnsThreadWaitingForLatch);
@@ -67,10 +70,10 @@ public:
void setIsContended(bool value);
private:
- Mutex _testMutex{kBlockedOpMutexName};
+ Mutex _testMutex = MONGO_MAKE_LATCH(kBlockedOpMutexName);
stdx::condition_variable _cv;
- stdx::mutex _m;
+ stdx::mutex _m; // NOLINT
struct State {
bool isContended = false;
@@ -127,34 +130,45 @@ void BlockedOp::setIsContended(bool value) {
_cv.notify_one();
}
-const auto gDiagnosticHandle = Client::declareDecoration<DiagnosticInfo::Diagnostic>();
+struct DiagnosticInfoHandle {
+ stdx::mutex mutex; // NOLINT
+ boost::optional<DiagnosticInfo> maybeInfo = boost::none;
+};
+const auto getDiagnosticInfoHandle = Client::declareDecoration<DiagnosticInfoHandle>();
MONGO_INITIALIZER(LockActions)(InitializerContext* context) {
-
- class LockActionsSubclass : public LockActions {
+ class LockActionsSubclass : public Mutex::LockActions {
void onContendedLock(const StringData& name) override {
- if (haveClient()) {
- DiagnosticInfo::Diagnostic::set(
- Client::getCurrent(),
- std::make_shared<DiagnosticInfo>(takeDiagnosticInfo(name)));
- }
-
- if (currentOpSpawnsThreadWaitingForLatch.shouldFail() &&
- (name == kBlockedOpMutexName)) {
- gBlockedOp.setIsContended(true);
+ auto client = Client::getCurrent();
+ if (client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ handle.maybeInfo.emplace(DiagnosticInfo::capture(name));
+
+ if (currentOpSpawnsThreadWaitingForLatch.shouldFail() &&
+ (name == kBlockedOpMutexName)) {
+ gBlockedOp.setIsContended(true);
+ }
}
}
- void onUnlock(const StringData&) override {
- DiagnosticInfo::Diagnostic::clearDiagnostic();
+ void onUnlock(const StringData& name) override {
+ auto client = Client::getCurrent();
+ if (client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ handle.maybeInfo.reset();
+ }
}
};
- std::unique_ptr<LockActions> mutexPointer = std::make_unique<LockActionsSubclass>();
- Mutex::setLockActions(std::move(mutexPointer));
+ // Intentionally leaked, people use Latches in detached threads
+ static auto& actions = *new LockActionsSubclass;
+ Mutex::LockActions::add(&actions);
return Status::OK();
}
+/*
MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
class ConditionVariableActionsSubclass : public ConditionVariableActions {
@@ -162,7 +176,7 @@ MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
if (haveClient()) {
DiagnosticInfo::Diagnostic::set(
Client::getCurrent(),
- std::make_shared<DiagnosticInfo>(takeDiagnosticInfo(name)));
+ std::make_shared<DiagnosticInfo>(capture(name)));
}
}
void onFulfilledConditionVariable() override {
@@ -176,28 +190,10 @@ MONGO_INITIALIZER(ConditionVariableActions)(InitializerContext* context) {
return Status::OK();
}
+*/
} // namespace
-auto DiagnosticInfo::Diagnostic::get(Client* const client) -> std::shared_ptr<DiagnosticInfo> {
- auto& handle = gDiagnosticHandle(client);
- stdx::lock_guard lk(handle.m);
- return handle.diagnostic;
-}
-
-void DiagnosticInfo::Diagnostic::set(Client* const client,
- std::shared_ptr<DiagnosticInfo> newDiagnostic) {
- auto& handle = gDiagnosticHandle(client);
- stdx::lock_guard lk(handle.m);
- handle.diagnostic = newDiagnostic;
-}
-
-void DiagnosticInfo::Diagnostic::clearDiagnostic() {
- if (haveClient()) {
- DiagnosticInfo::Diagnostic::set(Client::getCurrent(), nullptr);
- }
-}
-
#if defined(__linux__)
namespace {
@@ -232,8 +228,6 @@ MONGO_INITIALIZER(InitializeDynamicObjectMap)(InitializerContext* context) {
return Status::OK();
};
-} // anonymous namespace
-
int DynamicObjectMap::addToMap(dl_phdr_info* info, size_t size, void* data) {
auto& addr_map = *reinterpret_cast<decltype(DynamicObjectMap::_map)*>(data);
for (int j = 0; j < info->dlpi_phnum; j++) {
@@ -271,30 +265,33 @@ DiagnosticInfo::StackFrame DynamicObjectMap::getFrame(void* instructionPtr) cons
return DiagnosticInfo::StackFrame{frame.objectPath, fileOffset};
}
+} // namespace
+#endif // linux
+
+#if defined(MONGO_CONFIG_HAVE_EXECINFO_BACKTRACE) && defined(__linux__)
// iterates through the backtrace instruction pointers to
// find the instruction pointer that refers to a segment in the addr_map
DiagnosticInfo::StackTrace DiagnosticInfo::makeStackTrace() const {
DiagnosticInfo::StackTrace trace;
- for (auto addr : _backtraceAddresses) {
- trace.frames.emplace_back(gDynamicObjectMap.getFrame(addr));
+ for (auto address : _backtrace.data) {
+ trace.frames.emplace_back(gDynamicObjectMap.getFrame(address));
}
return trace;
}
-static std::vector<void*> getBacktraceAddresses() {
- std::vector<void*> backtraceAddresses(kMaxBackTraceFrames, 0);
- int addressCount = backtrace(backtraceAddresses.data(), kMaxBackTraceFrames);
- // backtrace will modify the vector's underlying array without updating its size
- backtraceAddresses.resize(static_cast<unsigned int>(addressCount));
- return backtraceAddresses;
+auto DiagnosticInfo::getBacktrace() -> Backtrace {
+ Backtrace list;
+ auto len = ::backtrace(list.data.data(), list.data.size());
+ list.data.resize(len);
+ return list;
}
#else
DiagnosticInfo::StackTrace DiagnosticInfo::makeStackTrace() const {
return DiagnosticInfo::StackTrace();
}
-static std::vector<void*> getBacktraceAddresses() {
- return std::vector<void*>();
+auto DiagnosticInfo::getBacktrace() -> Backtrace {
+ return {};
}
#endif
@@ -311,7 +308,7 @@ bool operator==(const DiagnosticInfo::StackTrace& trace1,
bool operator==(const DiagnosticInfo& info1, const DiagnosticInfo& info2) {
return info1._captureName == info2._captureName && info1._timestamp == info2._timestamp &&
- info1._backtraceAddresses == info2._backtraceAddresses;
+ info1._backtrace.data == info2._backtrace.data;
}
std::string DiagnosticInfo::StackFrame::toString() const {
@@ -335,15 +332,16 @@ std::string DiagnosticInfo::StackTrace::toString() const {
std::string DiagnosticInfo::toString() const {
return "{{ \"name\": \"{}\", \"time\": \"{}\", \"backtraceSize\": {} }}"_format(
- _captureName.toString(), _timestamp.toString(), _backtraceAddresses.size());
+ _captureName.toString(), _timestamp.toString(), _backtrace.data.size());
}
-DiagnosticInfo takeDiagnosticInfo(const StringData& captureName) {
+DiagnosticInfo DiagnosticInfo::capture(const StringData& captureName, Options options) {
// uses backtrace to retrieve an array of instruction pointers for currently active
// function calls of the program
return DiagnosticInfo(getGlobalServiceContext()->getFastClockSource()->now(),
captureName,
- getBacktraceAddresses());
+ options.shouldTakeBacktrace ? DiagnosticInfo::getBacktrace()
+ : Backtrace{{}});
}
DiagnosticInfo::BlockedOpGuard::~BlockedOpGuard() {
@@ -365,4 +363,10 @@ auto DiagnosticInfo::maybeMakeBlockedOpForTest(Client* client) -> std::unique_pt
return guard;
}
+boost::optional<DiagnosticInfo> DiagnosticInfo::get(Client& client) {
+ auto& handle = getDiagnosticInfoHandle(client);
+ stdx::lock_guard<stdx::mutex> lk(handle.mutex);
+ return handle.maybeInfo;
+}
+
} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info.h b/src/mongo/util/diagnostic_info.h
index a2c3339d3a5..1e578cecd80 100644
--- a/src/mongo/util/diagnostic_info.h
+++ b/src/mongo/util/diagnostic_info.h
@@ -53,19 +53,15 @@ public:
~BlockedOpGuard();
};
- struct Diagnostic {
- static std::shared_ptr<DiagnosticInfo> get(Client*);
- static void set(Client*, std::shared_ptr<DiagnosticInfo>);
- static void clearDiagnostic();
- stdx::mutex m;
- std::shared_ptr<DiagnosticInfo> diagnostic;
- };
+ static boost::optional<DiagnosticInfo> get(Client& client);
virtual ~DiagnosticInfo() = default;
- DiagnosticInfo(const DiagnosticInfo&) = delete;
- DiagnosticInfo& operator=(const DiagnosticInfo&) = delete;
- DiagnosticInfo(DiagnosticInfo&&) = default;
- DiagnosticInfo& operator=(DiagnosticInfo&&) = default;
+
+ // Maximum number of stack frames to appear in a backtrace.
+ static constexpr size_t kMaxBackTraceFrames = 100ull;
+ struct Backtrace {
+ std::vector<void*> data = std::vector<void*>(kMaxBackTraceFrames, nullptr);
+ };
struct StackFrame {
std::string toString() const;
@@ -97,10 +93,23 @@ public:
StackTrace makeStackTrace() const;
- static std::vector<void*> getBacktraceAddresses();
+ static Backtrace getBacktrace();
std::string toString() const;
- friend DiagnosticInfo takeDiagnosticInfo(const StringData& captureName);
+
+ /**
+ * Simple options struct to go with takeDiagnosticInfo
+ */
+ struct Options {
+ Options() : shouldTakeBacktrace{false} {}
+
+ bool shouldTakeBacktrace;
+ };
+
+ /**
+ * Captures the diagnostic information based on the caller's context.
+ */
+ static DiagnosticInfo capture(const StringData& captureName, Options options = Options{});
/**
* This function checks the FailPoint currentOpSpawnsThreadWaitingForLatch and potentially
@@ -117,14 +126,10 @@ private:
Date_t _timestamp;
StringData _captureName;
- std::vector<void*> _backtraceAddresses;
-
- DiagnosticInfo(const Date_t& timestamp,
- const StringData& captureName,
- std::vector<void*> backtraceAddresses)
- : _timestamp(timestamp),
- _captureName(captureName),
- _backtraceAddresses(backtraceAddresses) {}
+ Backtrace _backtrace;
+
+ DiagnosticInfo(const Date_t& timestamp, const StringData& captureName, Backtrace backtrace)
+ : _timestamp(timestamp), _captureName(captureName), _backtrace(std::move(backtrace)) {}
};
@@ -136,9 +141,4 @@ inline std::ostream& operator<<(std::ostream& s, const DiagnosticInfo& info) {
return s << info.toString();
}
-/**
- * Captures the diagnostic information based on the caller's context.
- */
-DiagnosticInfo takeDiagnosticInfo(const StringData& captureName);
-
} // namespace mongo
diff --git a/src/mongo/util/diagnostic_info_test.cpp b/src/mongo/util/diagnostic_info_test.cpp
index 853962f515e..dc8ebdda058 100644
--- a/src/mongo/util/diagnostic_info_test.cpp
+++ b/src/mongo/util/diagnostic_info_test.cpp
@@ -33,6 +33,8 @@
#include <string>
+#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/compiler.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/log.h"
@@ -47,7 +49,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
setGlobalServiceContext(std::move(serviceContext));
// take the initial diagnostic info
- DiagnosticInfo capture1 = takeDiagnosticInfo("capture1"_sd);
+ DiagnosticInfo capture1 = DiagnosticInfo::capture("capture1"_sd);
ASSERT_EQ(capture1.getCaptureName(), "capture1");
// mock time advancing and check that the current time is greater than capture1's timestamp
@@ -55,7 +57,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
ASSERT_LT(capture1.getTimestamp(), clockSourcePointer->now());
// take a second diagnostic capture and compare its fields to the first
- DiagnosticInfo capture2 = takeDiagnosticInfo("capture2"_sd);
+ DiagnosticInfo capture2 = DiagnosticInfo::capture("capture2"_sd);
ASSERT_LT(capture1.getTimestamp(), capture2.getTimestamp());
ASSERT_EQ(capture2.getCaptureName(), "capture2");
ASSERT_NE(capture2, capture1);
@@ -65,7 +67,7 @@ TEST(DiagnosticInfo, BasicSingleThread) {
}
using MaybeDiagnosticInfo = boost::optional<DiagnosticInfo>;
-void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, size_t i);
+void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, AtomicWord<int>& i);
TEST(DiagnosticInfo, StackTraceTest) {
// set up serviceContext and clock source
@@ -75,7 +77,11 @@ TEST(DiagnosticInfo, StackTraceTest) {
setGlobalServiceContext(std::move(serviceContext));
MaybeDiagnosticInfo infoRecurse0;
- recurseAndCaptureInfo(infoRecurse0, 0);
+ {
+ AtomicWord<int> i{0};
+ recurseAndCaptureInfo(infoRecurse0, i);
+ }
+
ASSERT(infoRecurse0);
log() << *infoRecurse0;
auto trace0 = infoRecurse0->makeStackTrace();
@@ -120,34 +126,33 @@ TEST(DiagnosticInfo, StackTraceTest) {
};
{
- volatile size_t i = 3; // NOLINT
+ constexpr auto k = 3;
+ AtomicWord<int> i{k};
MaybeDiagnosticInfo infoRecurse;
recurseAndCaptureInfo(infoRecurse, i);
- testRecursion(i, infoRecurse);
+ testRecursion(k, infoRecurse);
}
{
- volatile size_t i = 10; // NOLINT
+ constexpr auto k = 10;
+ AtomicWord<int> i{k};
MaybeDiagnosticInfo infoRecurse;
recurseAndCaptureInfo(infoRecurse, i);
- testRecursion(i, infoRecurse);
+ testRecursion(k, infoRecurse);
}
#else
ASSERT_TRUE(trace0.frames.empty());
#endif
}
-MONGO_COMPILER_NOINLINE void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, size_t i) {
- // Prevent tail-call optimization.
-#ifndef _WIN32
- asm volatile(""); // NOLINT
-#endif
-
- if (i == 0) {
- info = takeDiagnosticInfo("Recursion!"_sd);
+MONGO_COMPILER_NOINLINE void recurseAndCaptureInfo(MaybeDiagnosticInfo& info, AtomicWord<int>& i) {
+ if (i.fetchAndSubtract(1) == 0) {
+ DiagnosticInfo::Options options;
+ options.shouldTakeBacktrace = true;
+ info = DiagnosticInfo::capture("Recursion!"_sd, std::move(options));
return;
}
- recurseAndCaptureInfo(info, --i);
+ recurseAndCaptureInfo(info, i);
}
} // namespace mongo
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index b92b59253ea..49b741c4493 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -37,8 +37,8 @@
#include <functional>
#include <stack>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/log.h"
#include "mongo/util/quick_exit.h"
@@ -47,7 +47,7 @@ namespace mongo {
namespace {
-stdx::mutex shutdownMutex;
+Mutex shutdownMutex;
stdx::condition_variable shutdownTasksComplete;
boost::optional<ExitCode> shutdownExitCode;
bool shutdownTasksInProgress = false;
@@ -83,7 +83,7 @@ bool globalInShutdownDeprecated() {
}
ExitCode waitForShutdown() {
- stdx::unique_lock<stdx::mutex> lk(shutdownMutex);
+ stdx::unique_lock<Latch> lk(shutdownMutex);
shutdownTasksComplete.wait(lk, [] {
const auto shutdownStarted = static_cast<bool>(shutdownExitCode);
return shutdownStarted && !shutdownTasksInProgress;
@@ -93,7 +93,7 @@ ExitCode waitForShutdown() {
}
void registerShutdownTask(unique_function<void(const ShutdownTaskArgs&)> task) {
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
invariant(!globalInShutdownDeprecated());
shutdownTasks.emplace(std::move(task));
}
@@ -102,7 +102,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::unique_lock<stdx::mutex> lock(shutdownMutex);
+ stdx::unique_lock<Latch> lock(shutdownMutex);
if (shutdownTasksInProgress) {
// Someone better have called shutdown in some form already.
@@ -138,7 +138,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownTasksComplete.notify_all();
@@ -151,7 +151,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
decltype(shutdownTasks) localTasks;
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
if (globalInShutdownDeprecated())
return;
@@ -166,7 +166,7 @@ void shutdownNoTerminate(const ShutdownTaskArgs& shutdownArgs) {
runTasks(std::move(localTasks), shutdownArgs);
{
- stdx::lock_guard<stdx::mutex> lock(shutdownMutex);
+ stdx::lock_guard<Latch> lock(shutdownMutex);
shutdownTasksInProgress = false;
shutdownExitCode.emplace(EXIT_CLEAN);
}
diff --git a/src/mongo/util/fail_point.cpp b/src/mongo/util/fail_point.cpp
index c40a4b829d6..389a809ef23 100644
--- a/src/mongo/util/fail_point.cpp
+++ b/src/mongo/util/fail_point.cpp
@@ -93,7 +93,7 @@ void FailPoint::setMode(Mode mode, ValType val, BSONObj extra) {
* 3. Sets the new mode.
*/
- stdx::lock_guard<stdx::mutex> scoped(_modMutex);
+ stdx::lock_guard<Latch> scoped(_modMutex);
// Step 1
disable();
@@ -259,7 +259,7 @@ StatusWith<FailPoint::ModeOptions> FailPoint::parseBSON(const BSONObj& obj) {
BSONObj FailPoint::toBSON() const {
BSONObjBuilder builder;
- stdx::lock_guard<stdx::mutex> scoped(_modMutex);
+ stdx::lock_guard<Latch> scoped(_modMutex);
builder.append("mode", _mode);
builder.append("data", _data);
diff --git a/src/mongo/util/fail_point.h b/src/mongo/util/fail_point.h
index 57ee76bca9d..daf39bcba49 100644
--- a/src/mongo/util/fail_point.h
+++ b/src/mongo/util/fail_point.h
@@ -35,7 +35,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -314,7 +314,7 @@ private:
BSONObj _data;
// protects _mode, _timesOrPeriod, _data
- mutable stdx::mutex _modMutex;
+ mutable Mutex _modMutex = MONGO_MAKE_LATCH("FailPoint::_modMutex");
};
} // namespace mongo
diff --git a/src/mongo/util/fail_point_test.cpp b/src/mongo/util/fail_point_test.cpp
index 1880b0d18a1..a99d7132a40 100644
--- a/src/mongo/util/fail_point_test.cpp
+++ b/src/mongo/util/fail_point_test.cpp
@@ -36,6 +36,7 @@
#include <string>
#include <vector>
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/fail_point.h"
@@ -46,6 +47,7 @@
using mongo::BSONObj;
using mongo::FailPoint;
using mongo::FailPointEnableBlock;
+
namespace stdx = mongo::stdx;
namespace mongo_test {
@@ -157,7 +159,7 @@ public:
void stopTest() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
_inShutdown = true;
}
for (auto& t : _tasks) {
@@ -179,7 +181,7 @@ private:
}
});
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -200,7 +202,7 @@ private:
} catch (const std::logic_error&) {
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -209,7 +211,7 @@ private:
void simpleTask() {
while (true) {
static_cast<void>(MONGO_unlikely(_fp.shouldFail()));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -223,7 +225,7 @@ private:
_fp.setMode(FailPoint::alwaysOn, 0, BSON("a" << 44));
}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<mongo::Latch> lk(_mutex);
if (_inShutdown)
break;
}
@@ -231,7 +233,8 @@ private:
FailPoint _fp;
std::vector<stdx::thread> _tasks;
- stdx::mutex _mutex;
+
+ mongo::Mutex _mutex = MONGO_MAKE_LATCH();
bool _inShutdown = false;
};
diff --git a/src/mongo/util/future_impl.h b/src/mongo/util/future_impl.h
index 64aa57a22d8..db83f1c0cbc 100644
--- a/src/mongo/util/future_impl.h
+++ b/src/mongo/util/future_impl.h
@@ -38,8 +38,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/type_traits.h"
#include "mongo/stdx/utility.h"
#include "mongo/util/assert_util.h"
@@ -362,7 +362,7 @@ public:
if (state.load(std::memory_order_acquire) == SSBState::kFinished)
return;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
if (!cv) {
cv.emplace();
@@ -430,7 +430,7 @@ public:
Children localChildren;
- stdx::unique_lock<stdx::mutex> lk(mx);
+ stdx::unique_lock<Latch> lk(mx);
localChildren.swap(children);
if (cv) {
// This must be done inside the lock to correctly synchronize with wait().
@@ -483,8 +483,8 @@ public:
// These are only used to signal completion to blocking waiters. Benchmarks showed that it was
// worth deferring the construction of cv, so it can be avoided when it isn't necessary.
- stdx::mutex mx; // F (not that it matters)
- boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
+ Mutex mx = MONGO_MAKE_LATCH("FutureResolution"); // F
+ boost::optional<stdx::condition_variable> cv; // F (but guarded by mutex)
// This holds the children created from a SharedSemiFuture. When this SharedState is completed,
// the result will be copied in to each of the children. This allows their continuations to have
diff --git a/src/mongo/util/heap_profiler.cpp b/src/mongo/util/heap_profiler.cpp
index 05a8a59f03a..2784eac8cfc 100644
--- a/src/mongo/util/heap_profiler.cpp
+++ b/src/mongo/util/heap_profiler.cpp
@@ -282,8 +282,10 @@ private:
// >1: sample ever sampleIntervalBytes bytes allocated - less accurate but fast and small
std::atomic_size_t sampleIntervalBytes; // NOLINT
- stdx::mutex hashtable_mutex; // guards updates to both object and stack hash tables
- stdx::mutex stackinfo_mutex; // guards against races updating the StackInfo bson representation
+ // guards updates to both object and stack hash tables
+ stdx::mutex hashtable_mutex; // NOLINT
+ // guards against races updating the StackInfo bson representation
+ stdx::mutex stackinfo_mutex; // NOLINT
// cumulative bytes allocated - determines when samples are taken
std::atomic_size_t bytesAllocated{0}; // NOLINT
diff --git a/src/mongo/util/interruptible.h b/src/mongo/util/interruptible.h
index 446e61849cc..6e182d6bbd7 100644
--- a/src/mongo/util/interruptible.h
+++ b/src/mongo/util/interruptible.h
@@ -331,9 +331,9 @@ public:
* Sleeps until "deadline"; throws an exception if the interruptible is interrupted before then.
*/
void sleepUntil(Date_t deadline) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptUntil(cv, lk, deadline, [] { return false; }));
}
@@ -342,9 +342,9 @@ public:
* then.
*/
void sleepFor(Milliseconds duration) {
- stdx::mutex m;
+ auto m = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
- stdx::unique_lock<stdx::mutex> lk(m);
+ stdx::unique_lock<Latch> lk(m);
invariant(!waitForConditionOrInterruptFor(cv, lk, duration, [] { return false; }));
}
diff --git a/src/mongo/util/invalidating_lru_cache.h b/src/mongo/util/invalidating_lru_cache.h
index 7f899151b48..6cef29f63c8 100644
--- a/src/mongo/util/invalidating_lru_cache.h
+++ b/src/mongo/util/invalidating_lru_cache.h
@@ -34,8 +34,8 @@
#include <boost/optional.hpp>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/concurrency/with_lock.h"
@@ -140,7 +140,7 @@ public:
* cache.
*/
boost::optional<std::shared_ptr<Value>> get(const Key& key) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
auto myGeneration = _generation;
auto cacheIt = _cache.find(key);
@@ -192,7 +192,7 @@ public:
* Returns a vector of info about items in the cache for testing/reporting purposes
*/
std::vector<CachedItemInfo> getCacheInfo() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
std::vector<CachedItemInfo> ret;
ret.reserve(_active.size() + _cache.size());
@@ -255,7 +255,7 @@ private:
private:
InvalidatingLRUCache<Key, Value, Invalidator>* _cache;
- stdx::unique_lock<stdx::mutex> _lk;
+ stdx::unique_lock<Latch> _lk;
std::vector<std::shared_ptr<Value>> _activePtrsToDestroy;
};
@@ -331,7 +331,7 @@ private:
auto _makeDeleterWithLock(const Key& key, uint64_t myGeneration) -> auto {
return [this, key, myGeneration](Value* d) {
std::unique_ptr<Value> owned(d);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
auto it = _active.find(key);
if (it != _active.end() && it->second.expired()) {
_active.erase(it);
@@ -345,7 +345,7 @@ private:
};
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("InvalidatingLRUCache::_mutex");
// The generation count - items will not be returned to the cache if their generation count
// does not match the current generation count
diff --git a/src/mongo/util/lockable_adapter_test.cpp b/src/mongo/util/lockable_adapter_test.cpp
index f5325635c74..f0431d19c0e 100644
--- a/src/mongo/util/lockable_adapter_test.cpp
+++ b/src/mongo/util/lockable_adapter_test.cpp
@@ -68,7 +68,7 @@ public:
int unlockCalls{0};
private:
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
};
} // namespace
@@ -76,7 +76,7 @@ private:
TEST(BasicLockableAdapter, TestWithConditionVariable) {
bool ready = false;
stdx::condition_variable_any cv;
- stdx::mutex mut;
+ stdx::mutex mut; // NOLINT
auto result = stdx::async(stdx::launch::async, [&ready, &mut, &cv] {
stdx::lock_guard lock(mut);
@@ -93,7 +93,7 @@ TEST(BasicLockableAdapter, TestWithConditionVariable) {
TEST(BasicLockableAdapter, TestWithMutexTypes) {
{
- stdx::mutex mut;
+ stdx::mutex mut; // NOLINT
callUnderLock(mut);
}
diff --git a/src/mongo/util/net/http_client_curl.cpp b/src/mongo/util/net/http_client_curl.cpp
index fc5307bcb45..ca51d73111f 100644
--- a/src/mongo/util/net/http_client_curl.cpp
+++ b/src/mongo/util/net/http_client_curl.cpp
@@ -44,7 +44,7 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/net/http_client.h"
@@ -131,17 +131,17 @@ private:
}
static void _lockShare(CURL*, curl_lock_data, curl_lock_access, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->lock();
+ reinterpret_cast<Mutex*>(ctx)->lock();
}
static void _unlockShare(CURL*, curl_lock_data, void* ctx) {
- reinterpret_cast<stdx::mutex*>(ctx)->unlock();
+ reinterpret_cast<Mutex*>(ctx)->unlock();
}
private:
bool _initialized = false;
CURLSH* _share = nullptr;
- stdx::mutex _shareMutex;
+ Mutex _shareMutex = MONGO_MAKE_LATCH("CurlLibraryManager::_shareMutex");
} curlLibraryManager;
/**
diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp
index f3933dc640f..864e55d40f1 100644
--- a/src/mongo/util/net/ssl_manager_openssl.cpp
+++ b/src/mongo/util/net/ssl_manager_openssl.cpp
@@ -346,7 +346,7 @@ private:
class ThreadIDManager {
public:
unsigned long reserveID() {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
if (!_idLast.empty()) {
unsigned long ret = _idLast.top();
_idLast.pop();
@@ -356,13 +356,14 @@ private:
}
void releaseID(unsigned long id) {
- stdx::unique_lock<stdx::mutex> lock(_idMutex);
+ stdx::unique_lock<Latch> lock(_idMutex);
_idLast.push(id);
}
private:
// Machinery for producing IDs that are unique for the life of a thread.
- stdx::mutex _idMutex; // Protects _idNext and _idLast.
+ Mutex _idMutex =
+ MONGO_MAKE_LATCH("ThreadIDManager::_idMutex"); // Protects _idNext and _idLast.
unsigned long _idNext = 0; // Stores the next thread ID to use, if none already allocated.
std::stack<unsigned long, std::vector<unsigned long>>
_idLast; // Stores old thread IDs, for reuse.
@@ -476,7 +477,7 @@ private:
/** Either returns a cached password, or prompts the user to enter one. */
StatusWith<StringData> fetchPassword() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (_password->size()) {
return StringData(_password->c_str());
}
@@ -501,7 +502,7 @@ private:
}
private:
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PasswordFetcher::_mutex");
SecureString _password; // Protected by _mutex
std::string _prompt;
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 65b61b08ab5..7c2b223633e 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -3473,7 +3473,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3482,7 +3481,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
environment = moe::Environment();
@@ -3492,7 +3490,6 @@ TEST(Constraints, MutuallyExclusiveConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
@@ -3517,7 +3514,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_NOT_OK(environment.validate());
- ;
environment = moe::Environment();
argv.clear();
@@ -3527,7 +3523,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("option1"), &value));
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
@@ -3538,7 +3533,6 @@ TEST(Constraints, RequiresOtherConstraint) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
ASSERT_OK(environment.validate());
- ;
ASSERT_OK(environment.get(moe::Key("section.option2"), &value));
}
diff --git a/src/mongo/util/periodic_runner.h b/src/mongo/util/periodic_runner.h
index e9dcfa67489..210bd3c4ecf 100644
--- a/src/mongo/util/periodic_runner.h
+++ b/src/mongo/util/periodic_runner.h
@@ -35,7 +35,7 @@
#include <boost/optional.hpp>
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/util/periodic_runner_impl.cpp b/src/mongo/util/periodic_runner_impl.cpp
index 98a517cf7d9..dc9f091505d 100644
--- a/src/mongo/util/periodic_runner_impl.cpp
+++ b/src/mongo/util/periodic_runner_impl.cpp
@@ -77,7 +77,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::_run() {
}
startPromise.emplaceValue();
- stdx::unique_lock lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
while (_execStatus != ExecutionStatus::CANCELED) {
// Wait until it's unpaused or canceled
_condvar.wait(lk, [&] { return _execStatus != ExecutionStatus::PAUSED; });
@@ -120,14 +120,14 @@ void PeriodicRunnerImpl::PeriodicJobImpl::start() {
}
void PeriodicRunnerImpl::PeriodicJobImpl::pause() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING);
_execStatus = PeriodicJobImpl::ExecutionStatus::PAUSED;
}
void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
{
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
invariant(_execStatus == PeriodicJobImpl::ExecutionStatus::PAUSED);
_execStatus = PeriodicJobImpl::ExecutionStatus::RUNNING;
}
@@ -136,7 +136,7 @@ void PeriodicRunnerImpl::PeriodicJobImpl::resume() {
void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
auto lastExecStatus = [&] {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return std::exchange(_execStatus, ExecutionStatus::CANCELED);
}();
@@ -158,12 +158,12 @@ void PeriodicRunnerImpl::PeriodicJobImpl::stop() {
}
Milliseconds PeriodicRunnerImpl::PeriodicJobImpl::getPeriod() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
return _job.interval;
}
void PeriodicRunnerImpl::PeriodicJobImpl::setPeriod(Milliseconds ms) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_job.interval = ms;
if (_execStatus == PeriodicJobImpl::ExecutionStatus::RUNNING) {
diff --git a/src/mongo/util/periodic_runner_impl.h b/src/mongo/util/periodic_runner_impl.h
index a921a66c59f..07ed7db0ebd 100644
--- a/src/mongo/util/periodic_runner_impl.h
+++ b/src/mongo/util/periodic_runner_impl.h
@@ -32,8 +32,8 @@
#include <memory>
#include <vector>
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/future.h"
@@ -82,7 +82,7 @@ private:
stdx::thread _thread;
SharedPromise<void> _stopPromise;
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex");
stdx::condition_variable _condvar;
/**
* The current execution status of the job.
diff --git a/src/mongo/util/periodic_runner_impl_test.cpp b/src/mongo/util/periodic_runner_impl_test.cpp
index 86f2f1a96d6..21018ea09b7 100644
--- a/src/mongo/util/periodic_runner_impl_test.cpp
+++ b/src/mongo/util/periodic_runner_impl_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/periodic_runner_impl.h"
#include "mongo/db/service_context_test_fixture.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/clock_source_mock.h"
namespace mongo {
@@ -75,14 +75,14 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -96,7 +96,7 @@ TEST_F(PeriodicRunnerImplTest, OneJobTest) {
for (int i = 0; i < 10; i++) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&count, &i] { return count > i; });
}
}
@@ -108,14 +108,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobDoesNotRunWithoutStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -133,14 +133,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -152,7 +152,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobRunsCorrectlyWithStart) {
// Fast forward ten times, we should run all ten times.
for (int i = 0; i < 10; i++) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == i + 1; });
}
clockSource().advance(interval);
@@ -166,14 +166,14 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
bool isPaused = false;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// This will fail if pause does not work correctly.
ASSERT_FALSE(isPaused);
hasExecuted = true;
@@ -186,12 +186,12 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobPausesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return hasExecuted; });
}
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
isPaused = true;
jobAnchor.pause();
}
@@ -211,13 +211,13 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
int count = 0;
Milliseconds interval{5};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
PeriodicRunner::PeriodicJob job("job",
[&count, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
count++;
}
cv.notify_all();
@@ -228,7 +228,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count == 1; });
}
@@ -242,7 +242,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
clockSource().advance(interval);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
// Wait for count to increment due to job execution.
cv.wait(lk, [&] { return count == i + 1; });
}
@@ -264,7 +264,7 @@ TEST_F(PeriodicRunnerImplTest, OnePausableJobResumesCorrectly) {
// Wait for count to increase. Test will hang if resume() does not work correctly.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return count > numIterationsBeforePause; });
}
@@ -277,14 +277,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
Milliseconds intervalA{5};
Milliseconds intervalB{10};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add two jobs, ensure they both run the proper number of times
PeriodicRunner::PeriodicJob jobA("job",
[&countA, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countA++;
}
cv.notify_all();
@@ -294,7 +294,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
PeriodicRunner::PeriodicJob jobB("job",
[&countB, &mutex, &cv](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
countB++;
}
cv.notify_all();
@@ -311,7 +311,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
for (int i = 0; i <= 10; i++) {
clockSource().advance(intervalA);
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&countA, &countB, &i] { return (countA > i && countB >= i / 2); });
}
}
@@ -320,7 +320,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsTest) {
}
TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
stdx::condition_variable doneCv;
bool a = false;
@@ -328,7 +328,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobA("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
a = true;
cv.notify_one();
@@ -339,7 +339,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
PeriodicRunner::PeriodicJob jobB("job",
[&](Client*) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
b = true;
cv.notify_one();
@@ -357,7 +357,7 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
clockSource().advance(Milliseconds(1));
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
doneCv.wait(lk, [&] { return a && b; });
ASSERT(a);
@@ -370,14 +370,14 @@ TEST_F(PeriodicRunnerImplTest, TwoJobsDontDeadlock) {
TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
size_t timesCalled = 0;
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
stdx::condition_variable cv;
// Add a job, ensure that it runs once
PeriodicRunner::PeriodicJob job("job",
[&](Client*) {
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
timesCalled++;
}
cv.notify_one();
@@ -388,7 +388,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
jobAnchor.start();
// Wait for the first execution.
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled; });
}
@@ -397,7 +397,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// if we change the period to a longer duration, that doesn't trigger a run
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -405,7 +405,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// We actually changed the period
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 1ul);
}
@@ -413,7 +413,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Now we hit the new cutoff
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 2ul; });
}
@@ -421,7 +421,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// Haven't hit it
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT_EQ(timesCalled, 2ul);
}
@@ -430,7 +430,7 @@ TEST_F(PeriodicRunnerImplTest, ChangingIntervalWorks) {
// shortening triggers the period
{
- stdx::unique_lock<stdx::mutex> lk(mutex);
+ stdx::unique_lock<Latch> lk(mutex);
cv.wait(lk, [&] { return timesCalled == 3ul; });
}
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
index 43cde512599..5040484b46b 100644
--- a/src/mongo/util/processinfo.h
+++ b/src/mongo/util/processinfo.h
@@ -34,8 +34,8 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/process_id.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
diff --git a/src/mongo/util/producer_consumer_queue.h b/src/mongo/util/producer_consumer_queue.h
index 05b39eff7db..44a87f93aec 100644
--- a/src/mongo/util/producer_consumer_queue.h
+++ b/src/mongo/util/producer_consumer_queue.h
@@ -35,8 +35,8 @@
#include <numeric>
#include "mongo/db/operation_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/util/concurrency/with_lock.h"
#include "mongo/util/interruptible.h"
#include "mongo/util/scopeguard.h"
@@ -468,7 +468,7 @@ public:
//
// Leaves T unchanged if an interrupt exception is thrown while waiting for space
void push(T&& t, Interruptible* interruptible = Interruptible::notInterruptible()) {
- _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ _pushRunner([&](stdx::unique_lock<Latch>& lk) {
auto cost = _invokeCostFunc(t, lk);
uassert(ErrorCodes::ProducerConsumerQueueBatchTooLarge,
str::stream() << "cost of item (" << cost
@@ -496,7 +496,7 @@ public:
void pushMany(StartIterator start,
EndIterator last,
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _pushRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _pushRunner([&](stdx::unique_lock<Latch>& lk) {
size_t cost = 0;
for (auto iter = start; iter != last; ++iter) {
cost += _invokeCostFunc(*iter, lk);
@@ -521,12 +521,12 @@ public:
// Leaves T unchanged if it fails
bool tryPush(T&& t) {
return _pushRunner(
- [&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPush(lk, std::move(t)); });
+ [&](stdx::unique_lock<Latch>& lk) { return _tryPush(lk, std::move(t)); });
}
// Pops one T out of the queue
T pop(Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return _pop(lk);
});
@@ -538,7 +538,7 @@ public:
// Returns the popped values, along with the cost value of the items extracted
std::pair<std::deque<T>, size_t> popMany(
Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
return std::make_pair(std::exchange(_queue, {}), std::exchange(_current, 0));
});
@@ -554,7 +554,7 @@ public:
//
std::pair<std::deque<T>, size_t> popManyUpTo(
size_t budget, Interruptible* interruptible = Interruptible::notInterruptible()) {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) {
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) {
_waitForNonEmpty(lk, interruptible);
if (_current <= budget) {
@@ -584,13 +584,13 @@ public:
// Attempts a non-blocking pop of a value
boost::optional<T> tryPop() {
- return _popRunner([&](stdx::unique_lock<stdx::mutex>& lk) { return _tryPop(lk); });
+ return _popRunner([&](stdx::unique_lock<Latch>& lk) { return _tryPop(lk); });
}
// Closes the producer end. Consumers will continue to consume until the queue is exhausted, at
// which time they will begin to throw with an interruption dbexception
void closeProducerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_producerEndClosed = true;
@@ -599,7 +599,7 @@ public:
// Closes the consumer end. This causes all callers to throw with an interruption dbexception
void closeConsumerEnd() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
_consumerEndClosed = true;
_producerEndClosed = true;
@@ -608,7 +608,7 @@ public:
}
Stats getStats() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<Latch> lk(_mutex);
Stats stats;
stats.queueDepth = _current;
stats.waitingConsumers = _consumers;
@@ -804,7 +804,7 @@ private:
template <typename Callback>
auto _pushRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkProducerClosed(lk);
@@ -815,7 +815,7 @@ private:
template <typename Callback>
auto _popRunner(Callback&& cb) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::unique_lock<Latch> lk(_mutex);
_checkConsumerClosed(lk);
@@ -866,9 +866,7 @@ private:
return t;
}
- void _waitForSpace(stdx::unique_lock<stdx::mutex>& lk,
- size_t cost,
- Interruptible* interruptible) {
+ void _waitForSpace(stdx::unique_lock<Latch>& lk, size_t cost, Interruptible* interruptible) {
// We do some pre-flight checks to avoid creating a cv if we don't need one
_checkProducerClosed(lk);
@@ -885,7 +883,7 @@ private:
});
}
- void _waitForNonEmpty(stdx::unique_lock<stdx::mutex>& lk, Interruptible* interruptible) {
+ void _waitForNonEmpty(stdx::unique_lock<Latch>& lk, Interruptible* interruptible) {
typename Consumers::Waiter waiter(_consumers);
interruptible->waitForConditionOrInterrupt(_consumers.cv(), lk, [&] {
@@ -894,7 +892,7 @@ private:
});
}
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("ProducerConsumerQueue::_mutex");
Options _options;
diff --git a/src/mongo/util/producer_consumer_queue_test.cpp b/src/mongo/util/producer_consumer_queue_test.cpp
index ba39482d0d0..d474c7bb7f9 100644
--- a/src/mongo/util/producer_consumer_queue_test.cpp
+++ b/src/mongo/util/producer_consumer_queue_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/util/producer_consumer_queue.h"
#include "mongo/db/service_context.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/assert_util.h"
@@ -622,7 +622,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(popManyUpToPopWithBlockingWithSpecialCost,
PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false, true>) {
typename Helper::template ProducerConsumerQueue<MoveOnly> pcq{};
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t successes = 0;
size_t failures = 0;
@@ -632,10 +632,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(singleProducerMultiConsumer, runPermutations<false,
{
try {
pcq.pop(opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
successes++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueConsumed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failures++;
}
}
@@ -665,7 +665,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
size_t success = 0;
size_t failure = 0;
@@ -675,10 +675,10 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
{
try {
pcq.push(MoveOnly(1), opCtx);
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
success++;
} catch (const ExceptionFor<ErrorCodes::ProducerConsumerQueueEndClosed>&) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
failure++;
}
}
@@ -688,7 +688,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerSingleConsumer, runPermutations<true,
pcq.pop();
while (true) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
if (success == 1)
break;
stdx::this_thread::yield();
@@ -744,7 +744,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
pcq.push(MoveOnly(1));
- stdx::mutex mutex;
+ auto mutex = MONGO_MAKE_LATCH();
bool failed = false;
OperationContext* threadBopCtx = nullptr;
@@ -757,7 +757,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
auto threadB = helper.runThread("ProducerB", [&](OperationContext* opCtx) {
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
threadBopCtx = opCtx;
}
@@ -773,7 +773,7 @@ PRODUCER_CONSUMER_QUEUE_TEST(multiProducerMiddleWaiterBreaks, runPermutations<tr
};
{
- stdx::lock_guard<stdx::mutex> lk(mutex);
+ stdx::lock_guard<Latch> lk(mutex);
ASSERT(threadBopCtx != nullptr);
}
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index c3a56d4db21..ec927066172 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -34,9 +34,9 @@
#include <limits>
#include <queue>
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
namespace mongo {
@@ -61,12 +61,12 @@ public:
BlockingQueue(size_t size, GetSizeFn f) : _maxSize(size), _getSize(f) {}
void pushEvenIfFull(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
pushImpl_inlock(t, _getSize(t));
}
void push(T const& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
size_t tSize = _getSize(t);
_waitForSpace_inlock(tSize, lk);
@@ -89,7 +89,7 @@ public:
return;
}
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
const auto startedEmpty = _queue.empty();
_clearing = false;
@@ -111,12 +111,12 @@ public:
* NOTE: Should only be used in a single producer case.
*/
void waitForSpace(size_t size) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_waitForSpace_inlock(size, lk);
}
bool empty() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.empty();
}
@@ -124,7 +124,7 @@ public:
* The size as measured by the size function. Default to counting each item
*/
size_t size() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _currentSize;
}
@@ -139,12 +139,12 @@ public:
* The number/count of items in the queue ( _queue.size() )
*/
size_t count() const {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
return _queue.size();
}
void clear() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
_clearing = true;
_queue = std::queue<T>();
_currentSize = 0;
@@ -153,7 +153,7 @@ public:
}
bool tryPop(T& t) {
- stdx::lock_guard<stdx::mutex> lk(_lock);
+ stdx::lock_guard<Latch> lk(_lock);
if (_queue.empty())
return false;
@@ -166,7 +166,7 @@ public:
}
T blockingPop() {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing)
_cvNoLongerEmpty.wait(lk);
@@ -191,7 +191,7 @@ public:
bool blockingPop(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -213,7 +213,7 @@ public:
bool blockingPeek(T& t, int maxSecondsToWait) {
using namespace stdx::chrono;
const auto deadline = system_clock::now() + seconds(maxSecondsToWait);
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
_clearing = false;
while (_queue.empty() && !_clearing) {
if (stdx::cv_status::timeout == _cvNoLongerEmpty.wait_until(lk, deadline))
@@ -229,7 +229,7 @@ public:
// Obviously, this should only be used when you have
// only one consumer
bool peek(T& t) {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return false;
}
@@ -242,7 +242,7 @@ public:
* Returns the item most recently added to the queue or nothing if the queue is empty.
*/
boost::optional<T> lastObjectPushed() const {
- stdx::unique_lock<stdx::mutex> lk(_lock);
+ stdx::unique_lock<Latch> lk(_lock);
if (_queue.empty()) {
return {};
}
@@ -254,7 +254,7 @@ private:
/**
* Returns when enough space is available.
*/
- void _waitForSpace_inlock(size_t size, stdx::unique_lock<stdx::mutex>& lk) {
+ void _waitForSpace_inlock(size_t size, stdx::unique_lock<Latch>& lk) {
while (_currentSize + size > _maxSize) {
_cvNoLongerFull.wait(lk);
}
@@ -268,7 +268,7 @@ private:
_cvNoLongerEmpty.notify_one();
}
- mutable stdx::mutex _lock;
+ mutable Mutex _lock = MONGO_MAKE_LATCH("BlockingQueue::_lock");
std::queue<T> _queue;
const size_t _maxSize;
size_t _currentSize = 0;
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index da60a37fd1f..9d9e6896c03 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -157,12 +157,12 @@ public:
}
private:
- static stdx::mutex _streamMutex;
+ static stdx::mutex _streamMutex; // NOLINT
static thread_local int terminateDepth;
stdx::unique_lock<stdx::mutex> _lk;
};
-stdx::mutex MallocFreeOStreamGuard::_streamMutex;
+stdx::mutex MallocFreeOStreamGuard::_streamMutex; // NOLINT
thread_local int MallocFreeOStreamGuard::terminateDepth = 0;
// must hold MallocFreeOStreamGuard to call
diff --git a/src/mongo/util/stacktrace_windows.cpp b/src/mongo/util/stacktrace_windows.cpp
index cbf449a75d2..00e734b0555 100644
--- a/src/mongo/util/stacktrace_windows.cpp
+++ b/src/mongo/util/stacktrace_windows.cpp
@@ -122,7 +122,7 @@ public:
private:
boost::optional<HANDLE> _processHandle;
- stdx::mutex _mutex;
+ stdx::mutex _mutex; // NOLINT
DWORD _origOptions;
};
diff --git a/src/mongo/util/synchronized_value.h b/src/mongo/util/synchronized_value.h
index a49585b9426..28033cd7bf8 100644
--- a/src/mongo/util/synchronized_value.h
+++ b/src/mongo/util/synchronized_value.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/mutex.h"
namespace mongo {
@@ -46,7 +46,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit update_guard(T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit update_guard(T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~update_guard() = default;
// Only move construction is permitted so that synchronized_value may return update_guard
@@ -81,7 +81,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
T& _value;
@@ -96,7 +96,7 @@ public:
/**
* Take lock on construction to guard value.
*/
- explicit const_update_guard(const T& value, stdx::mutex& mtx) : _lock(mtx), _value(value) {}
+ explicit const_update_guard(const T& value, Mutex& mtx) : _lock(mtx), _value(value) {}
~const_update_guard() = default;
// Only move construction is permitted so that synchronized_value may return const_update_guard
@@ -121,7 +121,7 @@ public:
private:
// Held lock from synchronized_value
- stdx::unique_lock<stdx::mutex> _lock;
+ stdx::unique_lock<Latch> _lock;
// Reference to the value from synchronized_value
const T& _value;
@@ -156,7 +156,7 @@ public:
// Support assigning from the contained value
synchronized_value& operator=(const T& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = value;
}
return *this;
@@ -164,7 +164,7 @@ public:
synchronized_value& operator=(T&& value) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_value = std::move(value);
}
return *this;
@@ -174,7 +174,7 @@ public:
* Return a copy of the protected object.
*/
T get() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _value;
}
@@ -204,26 +204,26 @@ public:
bool operator==(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value == rhs._value;
}
bool operator!=(synchronized_value const& rhs) const {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(_mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(_mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(_mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return _value != rhs._value;
}
bool operator==(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value == rhs;
}
bool operator!=(T const& rhs) const {
- stdx::lock_guard<stdx::mutex> lock1(_mutex);
+ stdx::lock_guard<Latch> lock1(_mutex);
return _value != rhs;
}
@@ -250,12 +250,12 @@ private:
T _value;
// Mutex to guard value
- mutable stdx::mutex _mutex;
+ mutable Mutex _mutex = MONGO_MAKE_LATCH("synchronized_value::_mutex");
};
template <class T>
bool operator==(const synchronized_value<T>& lhs, const T& rhs) {
- stdx::lock_guard<stdx::mutex> lock(lhs._mutex);
+ stdx::lock_guard<Latch> lock(lhs._mutex);
return lhs._value == rhs;
}
@@ -267,7 +267,7 @@ bool operator!=(const synchronized_value<T>& lhs, const T& rhs) {
template <class T>
bool operator==(const T& lhs, const synchronized_value<T>& rhs) {
- stdx::lock_guard<stdx::mutex> lock(rhs._mutex);
+ stdx::lock_guard<Latch> lock(rhs._mutex);
return lhs == rhs._value;
}
@@ -281,8 +281,8 @@ template <class T>
bool operator==(const synchronized_value<T>& lhs, const synchronized_value<T>& rhs) {
// TODO: C++17 - move from std::lock to std::scoped_lock
std::lock(lhs._mutex, rhs._mutex);
- stdx::lock_guard<stdx::mutex> lk1(lhs._mutex, stdx::adopt_lock);
- stdx::lock_guard<stdx::mutex> lk2(rhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk1(lhs._mutex, stdx::adopt_lock);
+ stdx::lock_guard<Latch> lk2(rhs._mutex, stdx::adopt_lock);
return lhs._value == rhs._value;
}
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
index 3639d41efd3..9511a735f5c 100644
--- a/src/mongo/util/time_support.h
+++ b/src/mongo/util/time_support.h
@@ -36,8 +36,8 @@
#include "mongo/base/status_with.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/duration.h"
namespace mongo {
diff --git a/src/mongo/util/uuid.cpp b/src/mongo/util/uuid.cpp
index 66835454a0c..d729777cf30 100644
--- a/src/mongo/util/uuid.cpp
+++ b/src/mongo/util/uuid.cpp
@@ -34,15 +34,15 @@
#include "mongo/util/uuid.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/platform/mutex.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/util/hex.h"
namespace mongo {
namespace {
-stdx::mutex uuidGenMutex;
+Mutex uuidGenMutex;
auto uuidGen = SecureRandom::create();
// Regex to match valid version 4 UUIDs with variant bits set
@@ -100,7 +100,7 @@ UUID UUID::gen() {
int64_t randomWords[2];
{
- stdx::lock_guard<stdx::mutex> lk(uuidGenMutex);
+ stdx::lock_guard<Latch> lk(uuidGenMutex);
// Generate 128 random bits
randomWords[0] = uuidGen->nextInt64();
diff --git a/src/mongo/watchdog/watchdog.cpp b/src/mongo/watchdog/watchdog.cpp
index bc12a7fb1eb..360b98a0be9 100644
--- a/src/mongo/watchdog/watchdog.cpp
+++ b/src/mongo/watchdog/watchdog.cpp
@@ -61,7 +61,7 @@ WatchdogPeriodicThread::WatchdogPeriodicThread(Milliseconds period, StringData t
void WatchdogPeriodicThread::start() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -76,7 +76,7 @@ void WatchdogPeriodicThread::shutdown() {
stdx::thread thread;
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
@@ -101,7 +101,7 @@ void WatchdogPeriodicThread::shutdown() {
}
void WatchdogPeriodicThread::setPeriod(Milliseconds period) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool wasEnabled = _enabled;
@@ -130,7 +130,7 @@ void WatchdogPeriodicThread::doLoop() {
auto preciseClockSource = client->getServiceContext()->getPreciseClockSource();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
// Ensure state is starting from a clean slate.
resetState();
@@ -144,7 +144,7 @@ void WatchdogPeriodicThread::doLoop() {
Date_t startTime = preciseClockSource->now();
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
MONGO_IDLE_THREAD_BLOCK;
@@ -257,7 +257,7 @@ void WatchdogMonitor::start() {
_watchdogMonitorThread.start();
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
invariant(_state == State::kNotStarted);
_state = State::kStarted;
@@ -266,7 +266,7 @@ void WatchdogMonitor::start() {
void WatchdogMonitor::setPeriod(Milliseconds duration) {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
if (duration > Milliseconds(0)) {
dassert(duration >= Milliseconds(1));
@@ -290,7 +290,7 @@ void WatchdogMonitor::setPeriod(Milliseconds duration) {
void WatchdogMonitor::shutdown() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
bool started = (_state == State::kStarted);
diff --git a/src/mongo/watchdog/watchdog.h b/src/mongo/watchdog/watchdog.h
index bd29038a12a..289d2dadac9 100644
--- a/src/mongo/watchdog/watchdog.h
+++ b/src/mongo/watchdog/watchdog.h
@@ -35,8 +35,8 @@
#include <vector>
#include "mongo/platform/atomic_word.h"
-#include "mongo/stdx/condition_variable.h"
-#include "mongo/stdx/mutex.h"
+#include "mongo/platform/condition_variable.h"
+#include "mongo/platform/mutex.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/duration.h"
@@ -204,7 +204,7 @@ private:
stdx::thread _thread;
// Lock to protect _state and control _thread
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WatchdogPeriodicThread::_mutex");
stdx::condition_variable _condvar;
};
@@ -367,7 +367,7 @@ private:
};
// Lock to protect _state and control _thread
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("WatchdogMonitor::_mutex");
// State of watchdog
State _state{State::kNotStarted};
diff --git a/src/mongo/watchdog/watchdog_test.cpp b/src/mongo/watchdog/watchdog_test.cpp
index ed62f7b9060..bc8bac2cc7b 100644
--- a/src/mongo/watchdog/watchdog_test.cpp
+++ b/src/mongo/watchdog/watchdog_test.cpp
@@ -54,7 +54,7 @@ public:
void run(OperationContext* opCtx) final {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
++_counter;
}
@@ -70,7 +70,7 @@ public:
void waitForCount() {
invariant(_wait != 0);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_counter < _wait) {
_condvar.wait(lock);
}
@@ -80,7 +80,7 @@ public:
std::uint32_t getCounter() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _counter;
}
}
@@ -88,7 +88,7 @@ public:
private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TestPeriodicThread::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -198,7 +198,7 @@ class TestCounterCheck : public WatchdogCheck {
public:
void run(OperationContext* opCtx) final {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
++_counter;
}
@@ -218,7 +218,7 @@ public:
void waitForCount() {
invariant(_wait != 0);
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
while (_counter < _wait) {
_condvar.wait(lock);
}
@@ -226,7 +226,7 @@ public:
std::uint32_t getCounter() {
{
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
return _counter;
}
}
@@ -234,7 +234,7 @@ public:
private:
std::uint32_t _counter{0};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("TestCounterCheck::_mutex");
stdx::condition_variable _condvar;
std::uint32_t _wait{0};
};
@@ -274,14 +274,14 @@ TEST_F(WatchdogCheckThreadTest, Basic) {
class ManualResetEvent {
public:
void set() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ stdx::lock_guard<Latch> lock(_mutex);
_set = true;
_condvar.notify_one();
}
void wait() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
+ stdx::unique_lock<Latch> lock(_mutex);
_condvar.wait(lock, [this]() { return _set; });
}
@@ -289,7 +289,7 @@ public:
private:
bool _set{false};
- stdx::mutex _mutex;
+ Mutex _mutex = MONGO_MAKE_LATCH("ManualResetEvent::_mutex");
stdx::condition_variable _condvar;
};