summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBilly Donahue <BillyDonahue@users.noreply.github.com>2022-07-27 18:17:24 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-27 19:38:08 +0000
commit958ad9abfc80861d3f43f44da694e83464b01e1d (patch)
treeca14e7097c1cb8ab20dfad7fa6888511f0226650
parentf8a1ac19be6279e7ace012dafa8cfcaa028d49e1 (diff)
downloadmongo-958ad9abfc80861d3f43f44da694e83464b01e1d.tar.gz
SERVER-68246 rewrite calls to boost::optional get and is_initialized
-rw-r--r--src/mongo/bson/util/simple8b_type_util_test.cpp88
-rw-r--r--src/mongo/client/authenticate.cpp2
-rw-r--r--src/mongo/client/connpool.cpp3
-rw-r--r--src/mongo/client/internal_auth.cpp2
-rw-r--r--src/mongo/client/mongo_uri_connect.cpp2
-rw-r--r--src/mongo/client/read_preference.cpp2
-rw-r--r--src/mongo/client/sasl_aws_client_protocol.cpp4
-rw-r--r--src/mongo/client/sdam/server_selection_json_test_runner.cpp2
-rw-r--r--src/mongo/client/server_discovery_monitor_test.cpp2
-rw-r--r--src/mongo/crypto/fle_tags.cpp6
-rw-r--r--src/mongo/db/auth/address_restriction.cpp8
-rw-r--r--src/mongo/db/auth/auth_name.cpp2
-rw-r--r--src/mongo/db/auth/authorization_checks.cpp2
-rw-r--r--src/mongo/db/auth/authorization_manager_impl.cpp2
-rw-r--r--src/mongo/db/auth/authorization_session_for_test.cpp2
-rw-r--r--src/mongo/db/auth/authorization_session_impl.cpp32
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp16
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp2
-rw-r--r--src/mongo/db/catalog/catalog_control.cpp8
-rw-r--r--src/mongo/db/catalog/catalog_helper.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/catalog/collection.cpp2
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp8
-rw-r--r--src/mongo/db/catalog/collection_catalog_helper.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp8
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp2
-rw-r--r--src/mongo/db/catalog/collection_options_validation.cpp2
-rw-r--r--src/mongo/db/catalog/create_collection.cpp4
-rw-r--r--src/mongo/db/catalog/database_impl.cpp10
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp2
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp8
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp4
-rw-r--r--src/mongo/db/catalog/index_build_entry_test.cpp4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_entry_impl.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp7
-rw-r--r--src/mongo/db/catalog/list_indexes.cpp2
-rw-r--r--src/mongo/db/catalog/multi_index_block.cpp12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp6
-rw-r--r--src/mongo/db/catalog/rename_collection_test.cpp2
-rw-r--r--src/mongo/db/catalog/validate_adaptor.cpp2
-rw-r--r--src/mongo/db/catalog/validate_results.cpp2
-rw-r--r--src/mongo/db/catalog_raii.cpp2
-rw-r--r--src/mongo/db/coll_mod_reply_validation.cpp8
-rw-r--r--src/mongo/db/commands/command_mirroring_test.cpp2
-rw-r--r--src/mongo/db/commands/connection_status.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp4
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp2
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/generic.cpp2
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp4
-rw-r--r--src/mongo/db/commands/http_client.cpp2
-rw-r--r--src/mongo/db/commands/kill_sessions_command.cpp2
-rw-r--r--src/mongo/db/commands/list_databases.cpp6
-rw-r--r--src/mongo/db/commands/list_indexes.cpp2
-rw-r--r--src/mongo/db/commands/mr_common.cpp2
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp4
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp2
-rw-r--r--src/mongo/db/commands/tenant_migration_donor_cmds.cpp4
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/txn_cmds.cpp3
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp6
-rw-r--r--src/mongo/db/commands/validate_db_metadata_cmd.cpp3
-rw-r--r--src/mongo/db/commands/write_commands.cpp2
-rw-r--r--src/mongo/db/curop.cpp12
-rw-r--r--src/mongo/db/curop_failpoint_helpers.cpp2
-rw-r--r--src/mongo/db/cursor_manager.cpp2
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/dbdirectclient_test.cpp6
-rw-r--r--src/mongo/db/error_labels.cpp17
-rw-r--r--src/mongo/db/exec/bucket_unpacker.cpp10
-rw-r--r--src/mongo/db/exec/sbe/abt/abt_lower.cpp8
-rw-r--r--src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp2
-rw-r--r--src/mongo/db/exec/sbe/stages/bson_scan.cpp2
-rw-r--r--src/mongo/db/exec/sbe/stages/column_scan.cpp4
-rw-r--r--src/mongo/db/exec/sbe/stages/ix_scan.cpp6
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.cpp26
-rw-r--r--src/mongo/db/exec/working_set.cpp2
-rw-r--r--src/mongo/db/exhaust_cursor_currentop_integration_test.cpp2
-rw-r--r--src/mongo/db/fle_crud.cpp24
-rw-r--r--src/mongo/db/free_mon/free_mon_controller_test.cpp56
-rw-r--r--src/mongo/db/free_mon/free_mon_processor.cpp68
-rw-r--r--src/mongo/db/free_mon/free_mon_queue_test.cpp14
-rw-r--r--src/mongo/db/free_mon/free_mon_storage_test.cpp22
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp14
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp8
-rw-r--r--src/mongo/db/index/index_build_interceptor.cpp4
-rw-r--r--src/mongo/db/index/skipped_record_tracker.cpp4
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp2
-rw-r--r--src/mongo/db/index_build_entry_helpers_test.cpp4
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp11
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp14
-rw-r--r--src/mongo/db/initialize_operation_session_info.cpp4
-rw-r--r--src/mongo/db/kill_sessions.cpp6
-rw-r--r--src/mongo/db/logical_session_id_helpers.cpp14
-rw-r--r--src/mongo/db/logical_time_validator.cpp2
-rw-r--r--src/mongo/db/matcher/expression.cpp4
-rw-r--r--src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp4
-rw-r--r--src/mongo/db/op_observer/fcv_op_observer.cpp4
-rw-r--r--src/mongo/db/op_observer/op_observer_impl.cpp22
-rw-r--r--src/mongo/db/op_observer/op_observer_impl_test.cpp34
-rw-r--r--src/mongo/db/op_observer/op_observer_util.cpp15
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp6
-rw-r--r--src/mongo/db/pipeline/abt/document_source_visitor.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator_rank.cpp4
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_bucket_auto.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_densify.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_fill.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp22
-rw-r--r--src/mongo/db/pipeline/document_source_list_local_sessions.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_list_sessions.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp4
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp4
-rw-r--r--src/mongo/db/pipeline/expression.cpp6
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp17
-rw-r--r--src/mongo/db/pipeline/lite_parsed_document_source.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp14
-rw-r--r--src/mongo/db/pipeline/pipeline_test.cpp6
-rw-r--r--src/mongo/db/pipeline/resharding_initial_split_policy_test.cpp36
-rw-r--r--src/mongo/db/pipeline/sequential_document_cache_test.cpp6
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp2
-rw-r--r--src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp4
-rw-r--r--src/mongo/db/process_health/fault_manager.cpp2
-rw-r--r--src/mongo/db/query/count_command_as_aggregation_command.cpp14
-rw-r--r--src/mongo/db/query/count_command_test.cpp14
-rw-r--r--src/mongo/db/query/cursor_response.cpp2
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp4
-rw-r--r--src/mongo/db/query/datetime/date_time_support.cpp2
-rw-r--r--src/mongo/db/query/fle/server_rewrite.cpp22
-rw-r--r--src/mongo/db/query/fle/server_rewrite_test.cpp2
-rw-r--r--src/mongo/db/query/get_executor.cpp2
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp32
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp2
-rw-r--r--src/mongo/db/query/planner_access.cpp4
-rw-r--r--src/mongo/db/query/planner_analysis.cpp6
-rw-r--r--src/mongo/db/query/query_planner.cpp2
-rw-r--r--src/mongo/db/query/query_planner_options_test.cpp2
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp2
-rw-r--r--src/mongo/db/read_write_concern_defaults.cpp16
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp4
-rw-r--r--src/mongo/db/repl/idempotency_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/member_config.cpp8
-rw-r--r--src/mongo/db/repl/oplog.cpp30
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/oplog_applier_utils.cpp4
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp20
-rw-r--r--src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp2
-rw-r--r--src/mongo/db/repl/primary_only_service.cpp2
-rw-r--r--src/mongo/db/repl/primary_only_service_test.cpp26
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp4
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp2
-rw-r--r--src/mongo/db/repl/repl_set_test_egress.cpp2
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp12
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp30
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp36
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp16
-rw-r--r--src/mongo/db/repl/replication_info.cpp6
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp12
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp14
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp2
-rw-r--r--src/mongo/db/repl/tenant_database_cloner_test.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_access_blocker_util.cpp24
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_op_observer.cpp13
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp8
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp8
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp9
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp14
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp4
-rw-r--r--src/mongo/db/repl_index_build_state.cpp6
-rw-r--r--src/mongo/db/s/active_migrations_registry.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp36
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata.cpp2
-rw-r--r--src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp10
-rw-r--r--src/mongo/db/s/config/configsvr_collmod_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp20
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp32
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp2
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp2
-rw-r--r--src/mongo/db/s/forwardable_operation_metadata.cpp4
-rw-r--r--src/mongo/db/s/metadata_manager.cpp6
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp4
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp8
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp2
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp2
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_observer.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp13
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp25
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_op_observer.cpp15
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_util.cpp6
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination.cpp2
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp8
-rw-r--r--src/mongo/db/s/session_catalog_migration_source_test.cpp6
-rw-r--r--src/mongo/db/s/shard_key_index_util.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp6
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.cpp2
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp5
-rw-r--r--src/mongo/db/s/split_vector.cpp15
-rw-r--r--src/mongo/db/s/transaction_coordinator_factory_mongod.cpp2
-rw-r--r--src/mongo/db/s/type_lockpings.cpp4
-rw-r--r--src/mongo/db/s/type_locks.cpp12
-rw-r--r--src/mongo/db/serverless/shard_split_commands.cpp4
-rw-r--r--src/mongo/db/serverless/shard_split_donor_op_observer.cpp14
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service.cpp6
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service_test.cpp2
-rw-r--r--src/mongo/db/service_entry_point_common.cpp4
-rw-r--r--src/mongo/db/session_killer.cpp4
-rw-r--r--src/mongo/db/sessions_collection.cpp4
-rw-r--r--src/mongo/db/stats/single_transaction_stats.cpp6
-rw-r--r--src/mongo/db/stats/storage_stats.cpp2
-rw-r--r--src/mongo/db/storage/backup_block.cpp2
-rw-r--r--src/mongo/db/storage/durable_catalog_impl.cpp2
-rw-r--r--src/mongo/db/storage/durable_history_pin.cpp4
-rw-r--r--src/mongo/db/storage/kv/durable_catalog_test.cpp4
-rw-r--r--src/mongo/db/storage/record_store_test_oplog.cpp2
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp10
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp2
-rw-r--r--src/mongo/db/timeseries/bucket_catalog_test.cpp4
-rw-r--r--src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp2
-rw-r--r--src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp2
-rw-r--r--src/mongo/db/transaction_api_test.cpp6
-rw-r--r--src/mongo/db/transaction_participant.cpp4
-rw-r--r--src/mongo/db/transaction_participant_test.cpp16
-rw-r--r--src/mongo/db/update/push_node.cpp19
-rw-r--r--src/mongo/db/update/update_driver_test.cpp2
-rw-r--r--src/mongo/db/views/view_catalog_helpers.cpp4
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp12
-rw-r--r--src/mongo/embedded/index_builds_coordinator_embedded.cpp2
-rw-r--r--src/mongo/executor/network_interface_tl.cpp8
-rw-r--r--src/mongo/executor/remote_command_request.cpp4
-rw-r--r--src/mongo/executor/remote_command_response.cpp2
-rw-r--r--src/mongo/executor/task_executor_cursor_test.cpp56
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp8
-rw-r--r--src/mongo/idl/idl_test.cpp52
-rw-r--r--src/mongo/logv2/log_detail.cpp3
-rw-r--r--src/mongo/logv2/log_util.cpp2
-rw-r--r--src/mongo/platform/stack_locator_test.cpp8
-rw-r--r--src/mongo/rpc/metadata/client_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp12
-rw-r--r--src/mongo/rpc/metadata/impersonated_user_metadata.cpp2
-rw-r--r--src/mongo/rpc/metadata/security_token_metadata_test.cpp2
-rw-r--r--src/mongo/rpc/op_msg_integration_test.cpp2
-rw-r--r--src/mongo/rpc/write_concern_error_detail.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp12
-rw-r--r--src/mongo/s/catalog/type_changelog.cpp12
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp10
-rw-r--r--src/mongo/s/catalog/type_config_version.cpp6
-rw-r--r--src/mongo/s/catalog/type_mongos.cpp8
-rw-r--r--src/mongo/s/catalog/type_shard.cpp8
-rw-r--r--src/mongo/s/catalog/type_tags.cpp14
-rw-r--r--src/mongo/s/catalog_cache.cpp4
-rw-r--r--src/mongo/s/catalog_cache_test.cpp4
-rw-r--r--src/mongo/s/chunk.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp14
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp16
-rw-r--r--src/mongo/s/cluster_identity_loader_test.cpp2
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_collections_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_indexes_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp6
-rw-r--r--src/mongo/s/commands/strategy.cpp8
-rw-r--r--src/mongo/s/mongos_main.cpp2
-rw-r--r--src/mongo/s/mongos_topology_coordinator.cpp4
-rw-r--r--src/mongo/s/query/cluster_aggregate.cpp2
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp2
-rw-r--r--src/mongo/s/query/cluster_exchange_test.cpp30
-rw-r--r--src/mongo/s/request_types/migration_secondary_throttle_options.cpp2
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp2
-rw-r--r--src/mongo/s/transaction_router.cpp4
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp2
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp2
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp2
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp2
-rw-r--r--src/mongo/shell/kms_aws.cpp6
-rw-r--r--src/mongo/shell/kms_azure.cpp2
-rw-r--r--src/mongo/shell/kms_gcp.cpp2
-rw-r--r--src/mongo/shell/mongo_main.cpp6
-rw-r--r--src/mongo/transport/session_asio.cpp2
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp2
-rw-r--r--src/mongo/util/exit.cpp6
-rw-r--r--src/mongo/util/net/ssl_manager.cpp4
-rw-r--r--src/mongo/util/net/ssl_parameters.cpp2
-rw-r--r--src/mongo/util/testing_proctor.cpp4
-rw-r--r--src/mongo/util/tracing_support.cpp2
-rw-r--r--src/mongo/util/tracing_support_test.cpp2
331 files changed, 1227 insertions, 1210 deletions
diff --git a/src/mongo/bson/util/simple8b_type_util_test.cpp b/src/mongo/bson/util/simple8b_type_util_test.cpp
index bd2ed7e1038..c8775cdfd3b 100644
--- a/src/mongo/bson/util/simple8b_type_util_test.cpp
+++ b/src/mongo/bson/util/simple8b_type_util_test.cpp
@@ -144,11 +144,11 @@ TEST(Simple8bTypeUtil, DecimalPositiveValue) {
double val = 1.0;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(1));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(1));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 1);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 1);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -156,11 +156,11 @@ TEST(Simple8bTypeUtil, EightDigitDecimalValue) {
double val = 1.12345678;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100000000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100000000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 112345678);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 112345678);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -168,11 +168,11 @@ TEST(Simple8bTypeUtil, TwoDigitDecimalValue) {
double val = 1.12;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 112);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 112);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -186,11 +186,11 @@ TEST(Simple8bTypeUtil, SparseDecimalValue) {
double val = 1.00000001;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100000000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100000000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 100000001);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 100000001);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -198,11 +198,11 @@ TEST(Simple8bTypeUtil, RoundingDecimalValue) {
double val = 1.455454;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100000000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100000000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 145545400);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 145545400);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -210,11 +210,11 @@ TEST(Simple8bTypeUtil, AllNines) {
double val = 1.99999999;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100000000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100000000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), 199999999);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), 199999999);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -222,11 +222,11 @@ TEST(Simple8bTypeUtil, 3DigitValue) {
double val = 123.123;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(10000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(10000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
- ASSERT_EQUALS(encodeResult.get(), (1231230));
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), (1231230));
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -301,12 +301,12 @@ TEST(Simple8bTypeUtil, TestMaxInt) {
double val = std::pow(2, 53);
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(1));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(1));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
// Handle negative case
- ASSERT_EQUALS(encodeResult.get(), int64_t(val));
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), int64_t(val));
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -314,12 +314,12 @@ TEST(Simple8bTypeUtil, NegativeValue) {
double val = -123.123;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(10000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(10000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
// Handle negative case
- ASSERT_EQUALS(encodeResult.get(), -1231230);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), -1231230);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -327,12 +327,12 @@ TEST(Simple8bTypeUtil, NegativeSixDecimalValue) {
double val = -123.123456;
boost::optional<uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(100000000));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(100000000));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
// Handle negative case by subtracting 1
- ASSERT_EQUALS(encodeResult.get(), -12312345600);
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), -12312345600);
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
@@ -341,12 +341,12 @@ TEST(Simple8bTypeUtil, TestMinInt) {
double val = -std::pow(2, 53);
boost::optional<std::uint8_t> scalar = Simple8bTypeUtil::calculateDecimalShiftMultiplier(val);
ASSERT_TRUE(scalar);
- ASSERT_EQUALS(scalar.get(), scaleIndexForMultiplier(1));
- boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.get());
+ ASSERT_EQUALS(scalar.value(), scaleIndexForMultiplier(1));
+ boost::optional<int64_t> encodeResult = Simple8bTypeUtil::encodeDouble(val, scalar.value());
ASSERT_TRUE(encodeResult);
// Handle negative case
- ASSERT_EQUALS(encodeResult.get(), int64_t(val));
- double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.get(), scalar.get());
+ ASSERT_EQUALS(encodeResult.value(), int64_t(val));
+ double decodeResult = Simple8bTypeUtil::decodeDouble(encodeResult.value(), scalar.value());
ASSERT_EQUALS(val, decodeResult);
}
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 1e4dd8da074..a31dbb54672 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -378,7 +378,7 @@ SpeculativeAuthType speculateAuth(BSONObjBuilder* isMasterRequest,
return SpeculativeAuthType::kNone;
}
- auto params = std::move(optParams.get());
+ auto params = std::move(optParams.value());
auto ret = _speculateAuth(isMasterRequest,
mechanism,
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 67e52f1a1b8..7ab1860e68f 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -436,7 +436,8 @@ DBClientBase* DBConnectionPool::get(const string& host, double socketTimeout) {
DBClientBase* DBConnectionPool::get(const MongoURI& uri, double socketTimeout) {
auto connect = [&] {
string errmsg;
- std::unique_ptr<DBClientBase> c(uri.connect(uri.getAppName().get(), errmsg, socketTimeout));
+ std::unique_ptr<DBClientBase> c(
+ uri.connect(uri.getAppName().value(), errmsg, socketTimeout));
uassert(40356, fmt::format("{}: connect failed {} : {}", _name, uri.toString(), errmsg), c);
return c.release();
};
diff --git a/src/mongo/client/internal_auth.cpp b/src/mongo/client/internal_auth.cpp
index 467fb219546..f016f3a4f06 100644
--- a/src/mongo/client/internal_auth.cpp
+++ b/src/mongo/client/internal_auth.cpp
@@ -78,7 +78,7 @@ BSONObj createInternalX509AuthDocument(boost::optional<StringData> userName) {
builder.append(saslCommandUserDBFieldName, "$external");
if (userName) {
- builder.append(saslCommandUserFieldName, userName.get());
+ builder.append(saslCommandUserFieldName, userName.value());
}
return builder.obj();
diff --git a/src/mongo/client/mongo_uri_connect.cpp b/src/mongo/client/mongo_uri_connect.cpp
index 331ec055119..1ac571a7e30 100644
--- a/src/mongo/client/mongo_uri_connect.cpp
+++ b/src/mongo/client/mongo_uri_connect.cpp
@@ -69,7 +69,7 @@ DBClientBase* MongoURI::connect(StringData applicationName,
auto optAuthObj = makeAuthObjFromOptions(connection->getMaxWireVersion(),
connection->getIsPrimarySaslMechanisms());
if (optAuthObj) {
- connection->auth(optAuthObj.get());
+ connection->auth(optAuthObj.value());
}
}
diff --git a/src/mongo/client/read_preference.cpp b/src/mongo/client/read_preference.cpp
index 124d7a0ac83..5fd56d8b2ce 100644
--- a/src/mongo/client/read_preference.cpp
+++ b/src/mongo/client/read_preference.cpp
@@ -255,7 +255,7 @@ void ReadPreferenceSetting::toInnerBSON(BSONObjBuilder* bob) const {
bob->append(kMaxStalenessSecondsFieldName, maxStalenessSeconds.count());
}
if (hedgingMode) {
- bob->append(kHedgeFieldName, hedgingMode.get().toBSON());
+ bob->append(kHedgeFieldName, hedgingMode.value().toBSON());
}
}
diff --git a/src/mongo/client/sasl_aws_client_protocol.cpp b/src/mongo/client/sasl_aws_client_protocol.cpp
index 60fb0250816..ae6e6a6596b 100644
--- a/src/mongo/client/sasl_aws_client_protocol.cpp
+++ b/src/mongo/client/sasl_aws_client_protocol.cpp
@@ -182,9 +182,9 @@ std::string generateClientSecond(StringData serverFirstBase64,
if (credentials.sessionToken) {
// TODO: move this into kms-message
uassertKmsRequest(kms_request_add_header_field(
- request.get(), "X-Amz-Security-Token", credentials.sessionToken.get().c_str()));
+ request.get(), "X-Amz-Security-Token", credentials.sessionToken.value().c_str()));
- second.setXAmzSecurityToken(boost::optional<StringData>(credentials.sessionToken.get()));
+ second.setXAmzSecurityToken(boost::optional<StringData>(credentials.sessionToken.value()));
}
UniqueKmsCharBuffer kmsSignature(kms_request_get_signature(request.get()));
diff --git a/src/mongo/client/sdam/server_selection_json_test_runner.cpp b/src/mongo/client/sdam/server_selection_json_test_runner.cpp
index 3c0cfed7532..42610c4366c 100644
--- a/src/mongo/client/sdam/server_selection_json_test_runner.cpp
+++ b/src/mongo/client/sdam/server_selection_json_test_runner.cpp
@@ -189,7 +189,7 @@ private:
return;
}
- auto newAvgRtt = duration_cast<Milliseconds>(newServerDescription->getRtt().get());
+ auto newAvgRtt = duration_cast<Milliseconds>(newServerDescription->getRtt().value());
if (newAvgRtt.compare(duration_cast<Milliseconds>(Milliseconds(_newAvgRtt))) != 0) {
std::stringstream errorMessage;
errorMessage << "new average RTT is incorrect, got '" << newAvgRtt
diff --git a/src/mongo/client/server_discovery_monitor_test.cpp b/src/mongo/client/server_discovery_monitor_test.cpp
index 0ab19dc9d2a..d82348a47d5 100644
--- a/src/mongo/client/server_discovery_monitor_test.cpp
+++ b/src/mongo/client/server_discovery_monitor_test.cpp
@@ -238,7 +238,7 @@ protected:
while (elapsed() < deadline) {
ASSERT_FALSE(hasReadyRequests());
if (hostAndPort) {
- ASSERT_FALSE(_topologyListener->hasIsMasterResponse(hostAndPort.get()));
+ ASSERT_FALSE(_topologyListener->hasIsMasterResponse(hostAndPort.value()));
}
advanceTime(Milliseconds(1));
}
diff --git a/src/mongo/crypto/fle_tags.cpp b/src/mongo/crypto/fle_tags.cpp
index 4737ff13144..abad71d2b71 100644
--- a/src/mongo/crypto/fle_tags.cpp
+++ b/src/mongo/crypto/fle_tags.cpp
@@ -135,7 +135,7 @@ std::vector<PrfBlock> readTagsWithContention(const FLEStateCollectionReader& esc
// n => n inserts for this field value pair.
// none => compaction => query ESC for null document to find # of inserts.
auto insertCounter = ESCCollection::emuBinary(esc, escTag, escVal);
- if (insertCounter && insertCounter.get() == 0) {
+ if (insertCounter && insertCounter.value() == 0) {
return std::move(binaryTags);
}
@@ -208,11 +208,11 @@ std::vector<PrfBlock> readTags(const FLEStateCollectionReader& esc,
// The output of readTags will be used as the argument to a $in expression, so make sure we
// don't exceed the configured memory limit.
auto limit = static_cast<size_t>(internalQueryFLERewriteMemoryLimit.load());
- if (!cm || cm.get() == 0) {
+ if (!cm || cm.value() == 0) {
auto binaryTags = readTagsWithContention(esc, ecc, s, c, d, 0, limit, {});
}
std::vector<PrfBlock> binaryTags;
- for (auto i = 0; i <= cm.get(); i++) {
+ for (auto i = 0; i <= cm.value(); i++) {
binaryTags = readTagsWithContention(esc, ecc, s, c, d, i, limit, std::move(binaryTags));
}
return binaryTags;
diff --git a/src/mongo/db/auth/address_restriction.cpp b/src/mongo/db/auth/address_restriction.cpp
index e8fd687bc75..c6aa462dcee 100644
--- a/src/mongo/db/auth/address_restriction.cpp
+++ b/src/mongo/db/auth/address_restriction.cpp
@@ -49,12 +49,12 @@ mongo::StatusWith<mongo::RestrictionSet<>> mongo::parseAddressRestrictionSet(
const boost::optional<std::vector<StringData>>& client = ar.getClientSource();
if (client) {
- vec.push_back(std::make_unique<ClientSourceRestriction>(client.get()));
+ vec.push_back(std::make_unique<ClientSourceRestriction>(client.value()));
}
const boost::optional<std::vector<StringData>>& server = ar.getServerAddress();
if (server) {
- vec.push_back(std::make_unique<ServerAddressRestriction>(server.get()));
+ vec.push_back(std::make_unique<ServerAddressRestriction>(server.value()));
}
if (vec.empty()) {
@@ -109,11 +109,11 @@ mongo::StatusWith<mongo::BSONArray> mongo::getRawAuthenticationRestrictions(
auto const ar = Address_restriction::parse(ctx, elem.Obj());
if (auto const&& client = ar.getClientSource()) {
// Validate
- ClientSourceRestriction(client.get());
+ ClientSourceRestriction(client.value());
}
if (auto const&& server = ar.getServerAddress()) {
// Validate
- ServerAddressRestriction(server.get());
+ ServerAddressRestriction(server.value());
}
if (!ar.getClientSource() && !ar.getServerAddress()) {
return Status(ErrorCodes::CollectionIsEmpty,
diff --git a/src/mongo/db/auth/auth_name.cpp b/src/mongo/db/auth/auth_name.cpp
index 4b8116b23e8..4eb73e327be 100644
--- a/src/mongo/db/auth/auth_name.cpp
+++ b/src/mongo/db/auth/auth_name.cpp
@@ -153,7 +153,7 @@ template <typename T>
void AuthName<T>::appendToBSON(BSONObjBuilder* bob, bool encodeTenant) const {
*bob << T::kFieldName << getName() << "db"_sd << getDB();
if (encodeTenant && _tenant) {
- *bob << kTenantFieldName << _tenant.get();
+ *bob << kTenantFieldName << _tenant.value();
}
}
diff --git a/src/mongo/db/auth/authorization_checks.cpp b/src/mongo/db/auth/authorization_checks.cpp
index fa3d947f3f1..6af135d0ee2 100644
--- a/src/mongo/db/auth/authorization_checks.cpp
+++ b/src/mongo/db/auth/authorization_checks.cpp
@@ -225,7 +225,7 @@ Status checkAuthForCreate(OperationContext* opCtx,
// Parse the viewOn namespace and the pipeline. If no pipeline was specified, use the empty
// pipeline.
- NamespaceString viewOnNs(ns.db(), optViewOn.get());
+ NamespaceString viewOnNs(ns.db(), optViewOn.value());
auto pipeline = cmd.getPipeline().get_value_or(std::vector<BSONObj>());
BSONArrayBuilder pipelineArray;
for (const auto& stage : pipeline) {
diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp
index e82202e1f8f..cc86cf0fbc5 100644
--- a/src/mongo/db/auth/authorization_manager_impl.cpp
+++ b/src/mongo/db/auth/authorization_manager_impl.cpp
@@ -606,7 +606,7 @@ void AuthorizationManagerImpl::_pinnedUsersThreadRoutine() noexcept try {
_pinnedUsersCond, lk, timeout, [&] { return _usersToPin.has_value(); });
if (waitRes) {
- usersToPin = std::move(_usersToPin.get());
+ usersToPin = std::move(_usersToPin.value());
_usersToPin = boost::none;
}
lk.unlock();
diff --git a/src/mongo/db/auth/authorization_session_for_test.cpp b/src/mongo/db/auth/authorization_session_for_test.cpp
index 3b0d7f247db..620716c5e4f 100644
--- a/src/mongo/db/auth/authorization_session_for_test.cpp
+++ b/src/mongo/db/auth/authorization_session_for_test.cpp
@@ -49,7 +49,7 @@ void AuthorizationSessionForTest::assumePrivilegesForDB(Privilege privilege, Str
void AuthorizationSessionForTest::assumePrivilegesForDB(PrivilegeVector privileges,
StringData dbName) {
_authenticatedUser = UserHandle(User(UserName("authorizationSessionForTestUser", dbName)));
- _authenticatedUser.get()->addPrivileges(privileges);
+ _authenticatedUser.value()->addPrivileges(privileges);
_authenticationMode = AuthorizationSession::AuthenticationMode::kConnection;
_updateInternalAuthorizationState();
}
diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp
index 5ebc878a07c..4f33b63ef66 100644
--- a/src/mongo/db/auth/authorization_session_impl.cpp
+++ b/src/mongo/db/auth/authorization_session_impl.cpp
@@ -184,7 +184,7 @@ void AuthorizationSessionImpl::startRequest(OperationContext* opCtx) {
3,
"security token based user still authenticated at start of request, "
"clearing from authentication state",
- "user"_attr = user.get()->getName().toBSON(true /* encode tenant */));
+ "user"_attr = user.value()->getName().toBSON(true /* encode tenant */));
_updateInternalAuthorizationState();
}
_authenticationMode = AuthenticationMode::kNone;
@@ -205,7 +205,7 @@ Status AuthorizationSessionImpl::addAndAuthorizeUser(OperationContext* opCtx,
// because only the Client thread can mutate _authenticatedUser.
if (_authenticatedUser) {
// Already logged in.
- auto previousUser = _authenticatedUser.get()->getName();
+ auto previousUser = _authenticatedUser.value()->getName();
if (previousUser == userName) {
// Allow reauthenticating as the same user, but warn.
LOGV2_WARNING(5626700,
@@ -274,7 +274,7 @@ Status AuthorizationSessionImpl::addAndAuthorizeUser(OperationContext* opCtx,
User* AuthorizationSessionImpl::lookupUser(const UserName& name) {
_contract.addAccessCheck(AccessCheckEnum::kLookupUser);
- if (!_authenticatedUser || (_authenticatedUser.get()->getName() != name)) {
+ if (!_authenticatedUser || (_authenticatedUser.value()->getName() != name)) {
return nullptr;
}
return _authenticatedUser->get();
@@ -297,7 +297,7 @@ void AuthorizationSessionImpl::logoutSecurityTokenUser(Client* client) {
LOGV2_DEBUG(6161506,
5,
"security token based user explicitly logged out",
- "user"_attr = user.get()->getName().toBSON(true /* encode tenant */));
+ "user"_attr = user.value()->getName().toBSON(true /* encode tenant */));
}
// Explicitly skip auditing the logout event,
@@ -318,7 +318,7 @@ void AuthorizationSessionImpl::logoutAllDatabases(Client* client, StringData rea
return;
}
- auto names = BSON_ARRAY(user.get()->getName().toBSON());
+ auto names = BSON_ARRAY(user.value()->getName().toBSON());
audit::logLogout(client, reason, names, BSONArray());
clearImpersonatedUserData();
@@ -335,11 +335,11 @@ void AuthorizationSessionImpl::logoutDatabase(Client* client,
"May not log out while using a security token based authentication",
_authenticationMode != AuthenticationMode::kSecurityToken);
- if (!_authenticatedUser || (_authenticatedUser.get()->getName().getDB() != dbname)) {
+ if (!_authenticatedUser || (_authenticatedUser.value()->getName().getDB() != dbname)) {
return;
}
- auto names = BSON_ARRAY(_authenticatedUser.get()->getName().toBSON());
+ auto names = BSON_ARRAY(_authenticatedUser.value()->getName().toBSON());
audit::logLogout(client, reason, names, BSONArray());
_authenticatedUser = boost::none;
@@ -351,7 +351,7 @@ boost::optional<UserName> AuthorizationSessionImpl::getAuthenticatedUserName() {
_contract.addAccessCheck(AccessCheckEnum::kGetAuthenticatedUserName);
if (_authenticatedUser) {
- return _authenticatedUser.get()->getName();
+ return _authenticatedUser.value()->getName();
} else {
return boost::none;
}
@@ -366,7 +366,7 @@ RoleNameIterator AuthorizationSessionImpl::getAuthenticatedRoleNames() {
void AuthorizationSessionImpl::grantInternalAuthorization(Client* client) {
stdx::lock_guard<Client> lk(*client);
if (MONGO_unlikely(_authenticatedUser != boost::none)) {
- auto previousUser = _authenticatedUser.get()->getName();
+ auto previousUser = _authenticatedUser.value()->getName();
uassert(ErrorCodes::Unauthorized,
str::stream() << "Unable to grant internal authorization, previously authorized as "
<< previousUser.getUnambiguousName(),
@@ -470,7 +470,7 @@ bool AuthorizationSessionImpl::isAuthorizedToCreateRole(const RoleName& roleName
// The user may create a role if the localhost exception is enabled, and they already own the
// role. This implies they have obtained the role through an external authorization mechanism.
if (_externalState->shouldAllowLocalhost()) {
- if (_authenticatedUser && _authenticatedUser.get()->hasRole(roleName)) {
+ if (_authenticatedUser && _authenticatedUser.value()->hasRole(roleName)) {
return true;
}
LOGV2(20241,
@@ -646,7 +646,7 @@ StatusWith<PrivilegeVector> AuthorizationSessionImpl::checkAuthorizedToListColle
bool AuthorizationSessionImpl::isAuthenticatedAsUserWithRole(const RoleName& roleName) {
_contract.addAccessCheck(AccessCheckEnum::kIsAuthenticatedAsUserWithRole);
- return (_authenticatedUser && _authenticatedUser.get()->hasRole(roleName));
+ return (_authenticatedUser && _authenticatedUser.value()->hasRole(roleName));
}
bool AuthorizationSessionImpl::shouldIgnoreAuthChecks() {
@@ -666,7 +666,7 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx)
return;
}
- auto currentUser = _authenticatedUser.get();
+ auto currentUser = _authenticatedUser.value();
const auto& name = currentUser->getName();
const auto clearUser = [&] {
@@ -777,7 +777,7 @@ bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB(StringD
return false;
}
- const auto& user = _authenticatedUser.get();
+ const auto& user = _authenticatedUser.value();
// First lookup any Privileges on this database specifying Database resources
if (user->hasActionsForResource(ResourcePattern::forDatabaseName(db))) {
return true;
@@ -849,7 +849,7 @@ bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnResource(const Resource
const int resourceSearchListLength =
buildResourceSearchList(resource, resourceSearchList.data());
- const auto& user = _authenticatedUser.get();
+ const auto& user = _authenticatedUser.value();
for (int i = 0; i < resourceSearchListLength; ++i) {
if (user->hasActionsForResource(resourceSearchList[i])) {
return true;
@@ -888,7 +888,7 @@ bool AuthorizationSessionImpl::_isAuthorizedForPrivilege(const Privilege& privil
return false;
}
- const auto& user = _authenticatedUser.get();
+ const auto& user = _authenticatedUser.value();
for (int i = 0; i < resourceSearchListLength; ++i) {
ActionSet userActions = user->getActionsForResource(resourceSearchList[i]);
unmetRequirements.removeAllActionsFromSet(userActions);
@@ -1063,7 +1063,7 @@ void AuthorizationSessionImpl::_updateInternalAuthorizationState() {
if (_authenticatedUser == boost::none) {
_authenticationMode = AuthenticationMode::kNone;
} else {
- RoleNameIterator roles = _authenticatedUser.get()->getIndirectRoles();
+ RoleNameIterator roles = _authenticatedUser.value()->getIndirectRoles();
while (roles.more()) {
RoleName roleName = roles.next();
_authenticatedRoleNames.push_back(RoleName(roleName.getRole(), roleName.getDB()));
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index c860cb839b2..9f98e5f7032 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -105,7 +105,7 @@ void serializeResolvedRoles(BSONObjBuilder* user,
const AuthzManagerExternalState::ResolvedRoleData& data,
boost::optional<const BSONObj&> roleDoc = boost::none) {
BSONArrayBuilder rolesBuilder(user->subarrayStart("inheritedRoles"));
- for (const auto& roleName : data.roles.get()) {
+ for (const auto& roleName : data.roles.value()) {
roleName.serializeToBSON(&rolesBuilder);
}
rolesBuilder.doneFast();
@@ -113,14 +113,14 @@ void serializeResolvedRoles(BSONObjBuilder* user,
if (data.privileges) {
BSONArrayBuilder privsBuilder(user->subarrayStart("inheritedPrivileges"));
if (roleDoc) {
- auto privs = roleDoc.get()["privileges"];
+ auto privs = roleDoc.value()["privileges"];
if (privs) {
for (const auto& privilege : privs.Obj()) {
privsBuilder.append(privilege);
}
}
}
- for (const auto& privilege : data.privileges.get()) {
+ for (const auto& privilege : data.privileges.value()) {
privsBuilder.append(privilege.toBSON());
}
privsBuilder.doneFast();
@@ -129,7 +129,7 @@ void serializeResolvedRoles(BSONObjBuilder* user,
if (data.restrictions) {
BSONArrayBuilder arBuilder(user->subarrayStart("inheritedAuthenticationRestrictions"));
if (roleDoc) {
- auto ar = roleDoc.get()["authenticationRestrictions"];
+ auto ar = roleDoc.value()["authenticationRestrictions"];
if ((ar.type() == Array) && (ar.Obj().nFields() > 0)) {
arBuilder.append(ar);
}
@@ -340,9 +340,9 @@ StatusWith<User> AuthzManagerExternalStateLocal::getUserObject(OperationContext*
auto data = uassertStatusOK(resolveRoles(opCtx, directRoles, ResolveRoleOption::kAll));
data.roles->insert(directRoles.cbegin(), directRoles.cend());
- user.setIndirectRoles(makeRoleNameIteratorForContainer(data.roles.get()));
- user.addPrivileges(data.privileges.get());
- user.setIndirectRestrictions(data.restrictions.get());
+ user.setIndirectRoles(makeRoleNameIteratorForContainer(data.roles.value()));
+ user.addPrivileges(data.privileges.value());
+ user.setIndirectRestrictions(data.restrictions.value());
LOGV2_DEBUG(5517200,
3,
@@ -814,7 +814,7 @@ void _invalidateUserCache(OperationContext* opCtx,
UserName userName(id.substr(splitPoint + 1), id.substr(0, splitPoint), coll.getTenant());
authzManager->invalidateUserByName(opCtx, userName);
} else if (const auto& tenant = coll.getTenant()) {
- authzManager->invalidateUsersByTenant(opCtx, tenant.get());
+ authzManager->invalidateUsersByTenant(opCtx, tenant.value());
} else {
authzManager->invalidateUserCache(opCtx);
}
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index de1a436a516..29260b921e3 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -104,7 +104,7 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam
opCtx->recoveryUnit()->onCommit([writableCollection](auto commitTime) {
// Ban reading from this collection on snapshots before now.
if (commitTime) {
- writableCollection->setMinimumVisibleSnapshot(commitTime.get());
+ writableCollection->setMinimumVisibleSnapshot(commitTime.value());
}
});
diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp
index eb05c05adab..6d653b29e36 100644
--- a/src/mongo/db/catalog/catalog_control.cpp
+++ b/src/mongo/db/catalog/catalog_control.cpp
@@ -75,9 +75,9 @@ void reopenAllDatabasesAndReloadCollectionCatalog(
23992, 1, "openCatalog: dbholder reopening database", "db"_attr = dbName);
auto db = databaseHolder->openDb(opCtx, dbName);
invariant(db, str::stream() << "failed to reopen database " << dbName.toString());
- for (auto&& collNss : catalogWriter.get()->getAllCollectionNamesFromDb(opCtx, dbName)) {
+ for (auto&& collNss : catalogWriter.value()->getAllCollectionNamesFromDb(opCtx, dbName)) {
// Note that the collection name already includes the database component.
- auto collection = catalogWriter.get()->lookupCollectionByNamespace(opCtx, collNss);
+ auto collection = catalogWriter.value()->lookupCollectionByNamespace(opCtx, collNss);
invariant(collection,
str::stream()
<< "failed to get valid collection pointer for namespace " << collNss);
@@ -95,8 +95,8 @@ void reopenAllDatabasesAndReloadCollectionCatalog(
auto minVisible = std::min(stableTimestamp,
minVisibleTimestampMap.find(collection->uuid())->second);
auto writableCollection =
- catalogWriter.get()->lookupCollectionByUUIDForMetadataWrite(opCtx,
- collection->uuid());
+ catalogWriter.value()->lookupCollectionByUUIDForMetadataWrite(
+ opCtx, collection->uuid());
writableCollection->setMinimumVisibleSnapshot(minVisible);
}
diff --git a/src/mongo/db/catalog/catalog_helper.cpp b/src/mongo/db/catalog/catalog_helper.cpp
index 02d1cb1d1c2..c8307dcdf7d 100644
--- a/src/mongo/db/catalog/catalog_helper.cpp
+++ b/src/mongo/db/catalog/catalog_helper.cpp
@@ -81,7 +81,7 @@ void assertIsPrimaryShardForDb(OperationContext* opCtx, const StringData& dbName
AutoGetDb autoDb(opCtx, dbName, MODE_IS);
invariant(autoDb.getDb());
- const auto primaryShardId = DatabaseHolder::get(opCtx)->getDbPrimary(opCtx, dbName).get();
+ const auto primaryShardId = DatabaseHolder::get(opCtx)->getDbPrimary(opCtx, dbName).value();
const auto thisShardId = ShardingState::get(opCtx)->shardId();
uassert(ErrorCodes::IllegalOperation,
str::stream() << "This is not the primary shard for the database " << dbName
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index dc2ef9b5763..6e8196aef0f 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -265,7 +265,7 @@ StatusWith<std::pair<ParsedCollModRequest, BSONObj>> parseCollModRequest(Operati
"for the collection's clusteredIndex",
indexSpec.getName());
- if ((!indexName.empty() && indexName == StringData(indexSpec.getName().get())) ||
+ if ((!indexName.empty() && indexName == StringData(indexSpec.getName().value())) ||
keyPattern.woCompare(indexSpec.getKey()) == 0) {
// The indexName or keyPattern match the collection's clusteredIndex.
return {ErrorCodes::Error(6011800),
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index c891700c0ec..85b7158f6c7 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -107,7 +107,7 @@ void CollectionPtr::restore() const {
const BSONObj& CollectionPtr::getShardKeyPattern() const {
dassert(_shardKeyPattern);
- return _shardKeyPattern.get();
+ return _shardKeyPattern.value();
}
// ----
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 13e56007e84..60b4121b3e0 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -172,7 +172,7 @@ public:
// fine because the collection should not be visible in the catalog until we
// call setCommitted(true).
if (commitTime) {
- collPtr->setMinimumVisibleSnapshot(commitTime.get());
+ collPtr->setMinimumVisibleSnapshot(commitTime.value());
}
collPtr->setCommitted(true);
break;
@@ -180,7 +180,7 @@ public:
case UncommittedCatalogUpdates::Entry::Action::kReplacedViewsForDatabase: {
writeJobs.push_back(
[dbName = entry.nss.dbName(),
- &viewsForDb = entry.viewsForDb.get()](CollectionCatalog& catalog) {
+ &viewsForDb = entry.viewsForDb.value()](CollectionCatalog& catalog) {
catalog._replaceViewsForDatabase(dbName, std::move(viewsForDb));
});
break;
@@ -920,8 +920,8 @@ boost::optional<NamespaceString> CollectionCatalog::lookupNSSByUUID(OperationCon
auto foundIt = _catalog.find(uuid);
if (foundIt != _catalog.end()) {
boost::optional<NamespaceString> ns = foundIt->second->ns();
- invariant(!ns.get().isEmpty());
- return _collections.find(ns.get())->second->isCommitted() ? ns : boost::none;
+ invariant(!ns.value().isEmpty());
+ return _collections.find(ns.value())->second->isCommitted() ? ns : boost::none;
}
// Only in the case that the catalog is closed and a UUID is currently unknown, resolve it
diff --git a/src/mongo/db/catalog/collection_catalog_helper.cpp b/src/mongo/db/catalog/collection_catalog_helper.cpp
index a06a6f6566e..a8382878417 100644
--- a/src/mongo/db/catalog/collection_catalog_helper.cpp
+++ b/src/mongo/db/catalog/collection_catalog_helper.cpp
@@ -68,7 +68,7 @@ void forEachCollectionFromDb(OperationContext* opCtx,
auto catalogForIteration = CollectionCatalog::get(opCtx);
for (auto collectionIt = catalogForIteration->begin(opCtx, dbName);
collectionIt != catalogForIteration->end(opCtx);) {
- auto uuid = collectionIt.uuid().get();
+ auto uuid = collectionIt.uuid().value();
if (predicate && !catalogForIteration->checkIfCollectionSatisfiable(uuid, predicate)) {
++collectionIt;
continue;
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 4787f99c1cd..f02a479e079 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -438,7 +438,7 @@ CollectionImpl::CollectionImpl(OperationContext* opCtx,
std::unique_ptr<RecordStore> recordStore)
: _ns(nss),
_catalogId(std::move(catalogId)),
- _uuid(options.uuid.get()),
+ _uuid(options.uuid.value()),
_shared(std::make_shared<SharedState>(this, std::move(recordStore), options)),
_indexCatalog(std::make_unique<IndexCatalogImpl>()) {}
@@ -1237,7 +1237,7 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx,
}
void CollectionImpl::setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) {
- if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.get())) {
+ if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.value())) {
_minVisibleSnapshot = newMinimumVisibleSnapshot;
}
}
@@ -1346,7 +1346,7 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx,
checkRecordId);
_shared->_recordStore->deleteRecord(opCtx, loc);
if (deletedDoc) {
- deleteArgs.deletedDoc = &(deletedDoc.get());
+ deleteArgs.deletedDoc = &(deletedDoc.value());
}
getGlobalServiceContext()->getOpObserver()->onDelete(opCtx, ns(), uuid(), stmtId, deleteArgs);
@@ -1606,7 +1606,7 @@ bool CollectionImpl::doesTimeseriesBucketsDocContainMixedSchemaData(
}
bool CollectionImpl::isClustered() const {
- return getClusteredInfo().is_initialized();
+ return getClusteredInfo().has_value();
}
boost::optional<ClusteredCollectionInfo> CollectionImpl::getClusteredInfo() const {
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 6312989cb09..f723859b9c3 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -313,7 +313,7 @@ TEST(CollectionOptions, ParseUUID) {
// Check successful parse and roundtrip.
options =
assertGet(CollectionOptions::parse(uuid.toBSON(), CollectionOptions::parseForStorage));
- ASSERT(options.uuid.get() == uuid);
+ ASSERT(options.uuid.value() == uuid);
// Check that a collection options containing a UUID passes validation.
ASSERT_OK(options.validateForStorage());
diff --git a/src/mongo/db/catalog/collection_options_validation.cpp b/src/mongo/db/catalog/collection_options_validation.cpp
index 63f6e68a8f4..c9597f4671d 100644
--- a/src/mongo/db/catalog/collection_options_validation.cpp
+++ b/src/mongo/db/catalog/collection_options_validation.cpp
@@ -82,7 +82,7 @@ EncryptedFieldConfig processAndValidateEncryptedFields(EncryptedFieldConfig conf
fieldPaths.push_back(std::move(newPath));
if (field.getQueries().has_value()) {
- auto queriesVariant = field.getQueries().get();
+ auto queriesVariant = field.getQueries().value();
auto queries = stdx::get_if<std::vector<mongo::QueryTypeConfig>>(&queriesVariant);
if (queries) {
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index e9f07b88523..5b60b59147a 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -585,7 +585,7 @@ CollectionOptions clusterByDefaultIfNecessary(const NamespaceString& nss,
CollectionOptions collectionOptions,
const boost::optional<BSONObj>& idIndex) {
if (MONGO_unlikely(clusterAllCollectionsByDefault.shouldFail()) &&
- !collectionOptions.isView() && !collectionOptions.clusteredIndex.is_initialized() &&
+ !collectionOptions.isView() && !collectionOptions.clusteredIndex.has_value() &&
(!idIndex || idIndex->isEmpty()) && !collectionOptions.capped &&
!clustered_util::requiresLegacyFormat(nss) &&
feature_flags::gClusteredIndexes.isEnabled(serverGlobalParams.featureCompatibility)) {
@@ -698,7 +698,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx,
// create a database, which could result in createCollection failing if the database
// does not yet exist.
if (ui) {
- auto uuid = ui.get();
+ auto uuid = ui.value();
uassert(ErrorCodes::InvalidUUID,
"Invalid UUID in applyOps create command: " + uuid.toString(),
uuid.isRFC4122v4());
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 9d867c96e4c..442fb8f5051 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -675,7 +675,7 @@ Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx,
return;
}
- HistoricalIdentTracker::get(opCtx).recordDrop(ident, nss, uuid, commitTime.get());
+ HistoricalIdentTracker::get(opCtx).recordDrop(ident, nss, uuid, commitTime.value());
});
CollectionCatalog::get(opCtx)->dropCollection(opCtx, collection);
@@ -737,16 +737,16 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx,
writableCollection->getSharedIdent()->getIdent(),
fromNss,
writableCollection->uuid(),
- commitTime.get());
+ commitTime.value());
const auto readyIndexes = writableCollection->getIndexCatalog()->getAllReadyEntriesShared();
for (const auto& readyIndex : readyIndexes) {
HistoricalIdentTracker::get(opCtx).recordRename(
- readyIndex->getIdent(), fromNss, writableCollection->uuid(), commitTime.get());
+ readyIndex->getIdent(), fromNss, writableCollection->uuid(), commitTime.value());
}
// Ban reading from this collection on committed reads on snapshots before now.
- writableCollection->setMinimumVisibleSnapshot(commitTime.get());
+ writableCollection->setMinimumVisibleSnapshot(commitTime.value());
});
return status;
@@ -874,7 +874,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx,
"createCollection",
"namespace"_attr = nss,
"uuidDisposition"_attr = (generatedUUID ? "generated" : "provided"),
- "uuid"_attr = optionsWithUUID.uuid.get(),
+ "uuid"_attr = optionsWithUUID.uuid.value(),
"options"_attr = options);
// Create Collection object
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index 57192461061..0e7a08a747e 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -94,7 +94,7 @@ void warnEncryptedCollectionsIfNeeded(OperationContext* opCtx, const CollectionP
}
auto catalog = CollectionCatalog::get(opCtx);
- auto efc = coll->getCollectionOptions().encryptedFieldConfig.get();
+ auto efc = coll->getCollectionOptions().encryptedFieldConfig.value();
std::vector<std::string> leaked;
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index cce44ba1869..3589c047bcc 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -160,20 +160,20 @@ bool containsClusteredIndex(const CollectionPtr& collection, const IndexArgument
// creation, it should always be filled in by default on the
// collection object.
auto clusteredIndexName = clusteredIndexSpec.getName();
- invariant(clusteredIndexName.is_initialized());
+ invariant(clusteredIndexName.has_value());
- return clusteredIndexName.get() == indexName;
+ return clusteredIndexName.value() == indexName;
},
[&](const std::vector<std::string>& indexNames) -> bool {
// While the clusteredIndex's name is optional during user
// creation, it should always be filled in by default on the
// collection object.
auto clusteredIndexName = clusteredIndexSpec.getName();
- invariant(clusteredIndexName.is_initialized());
+ invariant(clusteredIndexName.has_value());
return std::find(indexNames.begin(),
indexNames.end(),
- clusteredIndexName.get()) != indexNames.end();
+ clusteredIndexName.value()) != indexNames.end();
},
[&](const BSONObj& indexKey) -> bool {
return clusteredIndexSpec.getKey().woCompare(indexKey) == 0;
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index 6c3c5df5ae3..feaa50eacd4 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -179,7 +179,7 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection) {
[entry = indexCatalogEntry, coll = collection](boost::optional<Timestamp> commitTime) {
// This will prevent the unfinished index from being visible on index iterators.
if (commitTime) {
- entry->setMinimumVisibleSnapshot(commitTime.get());
+ entry->setMinimumVisibleSnapshot(commitTime.value());
}
});
}
@@ -270,7 +270,7 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) {
"commitTimestamp"_attr = commitTime);
if (commitTime) {
- entry->setMinimumVisibleSnapshot(commitTime.get());
+ entry->setMinimumVisibleSnapshot(commitTime.value());
}
// Add the index to the TTLCollectionCache upon successfully committing the index build.
diff --git a/src/mongo/db/catalog/index_build_entry_test.cpp b/src/mongo/db/catalog/index_build_entry_test.cpp
index 7ac4206c0d7..6aa53a8a2ba 100644
--- a/src/mongo/db/catalog/index_build_entry_test.cpp
+++ b/src/mongo/db/catalog/index_build_entry_test.cpp
@@ -74,8 +74,8 @@ void checkIfEqual(IndexBuildEntry lhs, IndexBuildEntry rhs) {
ASSERT_TRUE(std::equal(lhsIndexNames.begin(), lhsIndexNames.end(), rhsIndexNames.begin()));
if (lhs.getCommitReadyMembers() && rhs.getCommitReadyMembers()) {
- auto lhsMembers = lhs.getCommitReadyMembers().get();
- auto rhsMembers = rhs.getCommitReadyMembers().get();
+ auto lhsMembers = lhs.getCommitReadyMembers().value();
+ auto rhsMembers = rhs.getCommitReadyMembers().value();
ASSERT_TRUE(std::equal(lhsMembers.begin(), lhsMembers.end(), rhsMembers.begin()));
} else {
ASSERT_FALSE(lhs.getCommitReadyMembers());
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 394ad121ce2..cdf11a99dd2 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -72,7 +72,7 @@ const IndexCatalogEntry* ReadyIndexesIterator::_advance() {
_opCtx->recoveryUnit()->getPointInTimeReadTimestamp(_opCtx).get_value_or(
_opCtx->recoveryUnit()->getCatalogConflictingTimestamp());
- if (!mySnapshot.isNull() && mySnapshot < minSnapshot.get()) {
+ if (!mySnapshot.isNull() && mySnapshot < minSnapshot.value()) {
// This index isn't finished in my snapshot.
continue;
}
diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
index 81d62cda8b1..4ef162247d4 100644
--- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp
@@ -172,7 +172,7 @@ MultikeyPaths IndexCatalogEntryImpl::getMultikeyPaths(OperationContext* opCtx,
// ---
void IndexCatalogEntryImpl::setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) {
- if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.get())) {
+ if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.value())) {
_minVisibleSnapshot = newMinimumVisibleSnapshot;
}
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 906909149c3..837fe9f52e3 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -129,8 +129,7 @@ Status isSpecOKClusteredIndexCheck(const BSONObj& indexSpec,
}
auto name = indexSpec.getStringField("name");
- bool namesMatch =
- !collInfo.is_initialized() || collInfo->getIndexSpec().getName().get() == name;
+ bool namesMatch = !collInfo.has_value() || collInfo->getIndexSpec().getName().value() == name;
if (!keysMatch && !namesMatch) {
@@ -248,7 +247,7 @@ Status IndexCatalogImpl::init(OperationContext* opCtx, Collection* collection) {
// to non _id indexes to the recovery timestamp. The _id index is left visible. It's
// assumed if the collection is visible, it's _id is valid to be used.
if (recoveryTs && !entry->descriptor()->isIdIndex()) {
- entry->setMinimumVisibleSnapshot(recoveryTs.get());
+ entry->setMinimumVisibleSnapshot(recoveryTs.value());
}
}
}
@@ -1292,7 +1291,7 @@ public:
void commit(boost::optional<Timestamp> commitTime) final {
if (commitTime) {
HistoricalIdentTracker::get(_opCtx).recordDrop(
- _entry->getIdent(), _nss, _uuid, commitTime.get());
+ _entry->getIdent(), _nss, _uuid, commitTime.value());
}
_entry->setDropped();
diff --git a/src/mongo/db/catalog/list_indexes.cpp b/src/mongo/db/catalog/list_indexes.cpp
index c0d9817eaf5..378ce1006a1 100644
--- a/src/mongo/db/catalog/list_indexes.cpp
+++ b/src/mongo/db/catalog/list_indexes.cpp
@@ -87,7 +87,7 @@ std::list<BSONObj> listIndexesInLock(OperationContext* opCtx,
collation = collator->getSpec().toBSON();
}
auto clusteredSpec = clustered_util::formatClusterKeyForListIndexes(
- collection->getClusteredInfo().get(), collation);
+ collection->getClusteredInfo().value(), collation);
if (additionalInclude == ListIndexesInclude::IndexBuildInfo) {
indexSpecs.push_back(BSON("spec"_sd << clusteredSpec));
} else {
diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp
index 3c693d92d3f..03c41a60991 100644
--- a/src/mongo/db/catalog/multi_index_block.cpp
+++ b/src/mongo/db/catalog/multi_index_block.cpp
@@ -130,7 +130,7 @@ void MultiIndexBlock::abortIndexBuild(OperationContext* opCtx,
if (_collectionUUID) {
// init() was previously called with a collection pointer, so ensure that the same
// collection is being provided for clean up and the interface in not being abused.
- invariant(_collectionUUID.get() == collection->uuid());
+ invariant(_collectionUUID.value() == collection->uuid());
}
if (_buildIsCleanedUp) {
@@ -404,7 +404,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(
// UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt.
if (_collectionUUID) {
- invariant(_collectionUUID.get() == collection->uuid());
+ invariant(_collectionUUID.value() == collection->uuid());
}
// Refrain from persisting any multikey updates as a result from building the index. Instead,
@@ -852,7 +852,7 @@ Status MultiIndexBlock::drainBackgroundWrites(
ReadSourceScope readSourceScope(opCtx, readSource);
const CollectionPtr& coll =
- CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, _collectionUUID.get());
+ CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, _collectionUUID.value());
// Drain side-writes table for each index. This only drains what is visible. Assuming intent
// locks are held on the user collection, more writes can come in after this drain completes.
@@ -925,7 +925,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
// UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt.
if (_collectionUUID) {
- invariant(_collectionUUID.get() == collection->uuid());
+ invariant(_collectionUUID.value() == collection->uuid());
}
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
@@ -964,12 +964,12 @@ Status MultiIndexBlock::commit(OperationContext* opCtx,
if (interceptor) {
auto multikeyPaths = interceptor->getMultikeyPaths();
if (multikeyPaths) {
- indexCatalogEntry->setMultikey(opCtx, collection, {}, multikeyPaths.get());
+ indexCatalogEntry->setMultikey(opCtx, collection, {}, multikeyPaths.value());
}
multikeyPaths = interceptor->getSkippedRecordTracker()->getMultikeyPaths();
if (multikeyPaths) {
- indexCatalogEntry->setMultikey(opCtx, collection, {}, multikeyPaths.get());
+ indexCatalogEntry->setMultikey(opCtx, collection, {}, multikeyPaths.value());
}
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 0fd96248734..a1ba01cd4eb 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -414,7 +414,7 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
// dropping the wrong collection.
if (!targetColl && uuidToDrop) {
invariant(options.dropTarget);
- auto collToDropBasedOnUUID = getNamespaceFromUUID(opCtx, uuidToDrop.get());
+ auto collToDropBasedOnUUID = getNamespaceFromUUID(opCtx, uuidToDrop.value());
if (collToDropBasedOnUUID && !collToDropBasedOnUUID->isDropPendingNamespace()) {
invariant(collToDropBasedOnUUID->db() == target.db());
targetColl = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(
@@ -924,7 +924,7 @@ Status renameCollectionForApplyOps(OperationContext* opCtx,
NamespaceString sourceNss(sourceNsElt.valueStringData());
NamespaceString targetNss(targetNsElt.valueStringData());
if (uuidToRename) {
- auto nss = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, uuidToRename.get());
+ auto nss = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, uuidToRename.value());
if (nss)
sourceNss = *nss;
}
@@ -965,7 +965,7 @@ Status renameCollectionForApplyOps(OperationContext* opCtx,
dropTargetNss = targetNss;
if (uuidToDrop)
- dropTargetNss = getNamespaceFromUUID(opCtx, uuidToDrop.get());
+ dropTargetNss = getNamespaceFromUUID(opCtx, uuidToDrop.value());
// Downgrade renameCollection to dropCollection.
if (dropTargetNss) {
diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp
index 66e175b30ce..f28c5c25eb5 100644
--- a/src/mongo/db/catalog/rename_collection_test.cpp
+++ b/src/mongo/db/catalog/rename_collection_test.cpp
@@ -413,7 +413,7 @@ CollectionOptions _makeCollectionOptionsWithUuid() {
UUID _createCollectionWithUUID(OperationContext* opCtx, const NamespaceString& nss) {
const auto options = _makeCollectionOptionsWithUuid();
_createCollection(opCtx, nss, options);
- return options.uuid.get();
+ return options.uuid.value();
}
/**
diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp
index 20ca1036537..282a28fdab6 100644
--- a/src/mongo/db/catalog/validate_adaptor.cpp
+++ b/src/mongo/db/catalog/validate_adaptor.cpp
@@ -251,7 +251,7 @@ Status _validateTimeseriesMinMax(const BSONObj& recordBson, const CollectionPtr&
// timestamp granularity into account.
auto checkMinAndMaxMatch = [&]() {
// Needed for granularity, which determines how the min timestamp is rounded down .
- const auto options = coll->getTimeseriesOptions().get();
+ const auto options = coll->getTimeseriesOptions().value();
if (fieldName == options.getTimeField()) {
return controlFieldMin.Date() ==
timeseries::roundTimestampToGranularity(min.getField(fieldName).Date(),
diff --git a/src/mongo/db/catalog/validate_results.cpp b/src/mongo/db/catalog/validate_results.cpp
index 32b66f2bf92..b1ce131a2de 100644
--- a/src/mongo/db/catalog/validate_results.cpp
+++ b/src/mongo/db/catalog/validate_results.cpp
@@ -35,7 +35,7 @@ void ValidateResults::appendToResultObj(BSONObjBuilder* resultObj, bool debuggin
resultObj->appendBool("valid", valid);
resultObj->appendBool("repaired", repaired);
if (readTimestamp) {
- resultObj->append("readTimestamp", readTimestamp.get());
+ resultObj->append("readTimestamp", readTimestamp.value());
}
static constexpr std::size_t kMaxErrorWarningSizeBytes = 2 * 1024 * 1024;
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index 56ee3846b63..d5725462252 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -97,7 +97,7 @@ void verifyDbAndCollection(OperationContext* opCtx,
"changes; please retry the operation. Snapshot timestamp is "
<< mySnapshot.toString() << ". Collection minimum is "
<< minSnapshot->toString(),
- mySnapshot.isNull() || mySnapshot >= minSnapshot.get());
+ mySnapshot.isNull() || mySnapshot >= minSnapshot.value());
}
}
}
diff --git a/src/mongo/db/coll_mod_reply_validation.cpp b/src/mongo/db/coll_mod_reply_validation.cpp
index 07de476ddde..c760eb40c82 100644
--- a/src/mongo/db/coll_mod_reply_validation.cpp
+++ b/src/mongo/db/coll_mod_reply_validation.cpp
@@ -31,8 +31,8 @@
namespace mongo::coll_mod_reply_validation {
void validateReply(const CollModReply& reply) {
- auto hidden_new = reply.getHidden_new().is_initialized();
- auto hidden_old = reply.getHidden_old().is_initialized();
+ auto hidden_new = reply.getHidden_new().has_value();
+ auto hidden_old = reply.getHidden_old().has_value();
if ((!hidden_new && hidden_old) || (hidden_new && !hidden_old)) {
uassert(ErrorCodes::CommandResultSchemaViolation,
@@ -41,8 +41,8 @@ void validateReply(const CollModReply& reply) {
false);
}
- auto prepareUnique_new = reply.getPrepareUnique_new().is_initialized();
- auto prepareUnique_old = reply.getPrepareUnique_old().is_initialized();
+ auto prepareUnique_new = reply.getPrepareUnique_new().has_value();
+ auto prepareUnique_old = reply.getPrepareUnique_old().has_value();
if ((!prepareUnique_new && prepareUnique_old) || (prepareUnique_new && !prepareUnique_old)) {
uassert(ErrorCodes::CommandResultSchemaViolation,
diff --git a/src/mongo/db/commands/command_mirroring_test.cpp b/src/mongo/db/commands/command_mirroring_test.cpp
index 286d20813a2..377234fc9a6 100644
--- a/src/mongo/db/commands/command_mirroring_test.cpp
+++ b/src/mongo/db/commands/command_mirroring_test.cpp
@@ -117,7 +117,7 @@ public:
OpMsgRequest makeCommand(std::string coll, std::vector<BSONObj> updates) override {
std::vector<BSONObj> args;
if (shardVersion) {
- args.push_back(shardVersion.get());
+ args.push_back(shardVersion.value());
}
auto request = CommandMirroringTest::makeCommand(coll, args);
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index 903fc7fce38..8e32d25f87e 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -52,7 +52,7 @@ public:
ConnectionStatusReplyAuthInfo info;
std::vector<UserName> userNames;
if (auto userName = as->getAuthenticatedUserName()) {
- userNames.push_back(std::move(userName.get()));
+ userNames.push_back(std::move(userName.value()));
}
info.setAuthenticatedUsers(std::move(userNames));
info.setAuthenticatedUserRoles(
@@ -82,7 +82,7 @@ public:
User::ResourcePrivilegeMap unified;
if (auto authUser = as->getAuthenticatedUser()) {
- for (const auto& privIter : authUser.get()->getPrivileges()) {
+ for (const auto& privIter : authUser.value()->getPrivileges()) {
auto it = unified.find(privIter.first);
if (it == unified.end()) {
unified[privIter.first] = privIter.second;
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index dfb1c8f0db6..4cf9bc4880b 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -477,7 +477,7 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx,
: IndexBuildProtocol::kSinglePhase;
auto commitQuorum = parseAndGetCommitQuorum(opCtx, protocol, cmd);
if (commitQuorum) {
- uassertStatusOK(replCoord->checkIfCommitQuorumCanBeSatisfied(commitQuorum.get()));
+ uassertStatusOK(replCoord->checkIfCommitQuorumCanBeSatisfied(commitQuorum.value()));
}
validateTTLOptions(opCtx, ns, cmd);
@@ -753,7 +753,7 @@ public:
opCtx, origCmd.getNamespace(), !isCommandOnTimeseriesBucketNamespace)) {
timeseriesCmdOwnership =
timeseries::makeTimeseriesCreateIndexesCommand(opCtx, origCmd, *options);
- cmd = &timeseriesCmdOwnership.get();
+ cmd = &timeseriesCmdOwnership.value();
}
// If we encounter an IndexBuildAlreadyInProgress error for any of the requested index
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 0961660ded5..ee65c3df6be 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -278,7 +278,7 @@ void FeatureCompatibilityVersion::validateSetFeatureCompatibilityVersionRequest(
auto fcvObj = findFeatureCompatibilityVersionDocument(opCtx);
auto fcvDoc = FeatureCompatibilityVersionDocument::parse(
- IDLParserContext("featureCompatibilityVersionDocument"), fcvObj.get());
+ IDLParserContext("featureCompatibilityVersionDocument"), fcvObj.value());
auto previousTimestamp = fcvDoc.getChangeTimestamp();
if (setFCVPhase == SetFCVPhaseEnum::kStart) {
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 40139d4ee17..690e53ca44e 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -89,7 +89,7 @@ std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(Operation
// Rewrite any FLE find payloads that exist in the query if this is a FLE 2 query.
if (shouldDoFLERewrite(findCommand)) {
invariant(findCommand->getNamespaceOrUUID().nss());
- processFLEFindD(opCtx, findCommand->getNamespaceOrUUID().nss().get(), findCommand.get());
+ processFLEFindD(opCtx, findCommand->getNamespaceOrUUID().nss().value(), findCommand.get());
}
if (findCommand->getMirrored().value_or(false)) {
@@ -464,7 +464,7 @@ public:
const auto& nss = ctx->getNss();
uassert(ErrorCodes::NamespaceNotFound,
- str::stream() << "UUID " << findCommand->getNamespaceOrUUID().uuid().get()
+ str::stream() << "UUID " << findCommand->getNamespaceOrUUID().uuid().value()
<< " specified in query request not found",
ctx || !findCommand->getNamespaceOrUUID().uuid());
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index c6b9154aeec..ef4a557d38b 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -236,7 +236,7 @@ public:
3,
"Non-debug severity levels must not pass 'debugLevel'",
"severity"_attr = obj[Request::kSeverityFieldName].valueStringData(),
- "debugLevel"_attr = optDebugLevel.get());
+ "debugLevel"_attr = optDebugLevel.value());
}
switch (severity) {
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 3b34751014f..2bc6b7d9471 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -358,7 +358,7 @@ public:
uassertStatusOK(auth::checkAuthForGetMore(AuthorizationSession::get(opCtx->getClient()),
ns(),
_cmd.getCommandParameter(),
- _cmd.getTerm().is_initialized()));
+ _cmd.getTerm().has_value()));
}
/**
@@ -639,7 +639,7 @@ public:
lastKnownCommittedOpTime = cursorPin->getLastKnownCommittedOpTime();
}
if (lastKnownCommittedOpTime) {
- clientsLastKnownCommittedOpTime(opCtx) = lastKnownCommittedOpTime.get();
+ clientsLastKnownCommittedOpTime(opCtx) = lastKnownCommittedOpTime.value();
}
awaitDataState(opCtx).shouldWaitForInserts = true;
diff --git a/src/mongo/db/commands/http_client.cpp b/src/mongo/db/commands/http_client.cpp
index 1bdc6116fd9..7ff14d644c9 100644
--- a/src/mongo/db/commands/http_client.cpp
+++ b/src/mongo/db/commands/http_client.cpp
@@ -107,7 +107,7 @@ public:
client->allowInsecureHTTP(isLocalhost);
auto timeoutSecs = cmd.getTimeoutSecs();
if (timeoutSecs) {
- client->setTimeout(Seconds(timeoutSecs.get()));
+ client->setTimeout(Seconds(timeoutSecs.value()));
}
auto ret = client->request(HttpClient::HttpMethod::kGET, uri, {nullptr, 0});
diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp
index 410b636f901..572f98d204e 100644
--- a/src/mongo/db/commands/kill_sessions_command.cpp
+++ b/src/mongo/db/commands/kill_sessions_command.cpp
@@ -61,7 +61,7 @@ KillAllSessionsByPatternSet patternsForLoggedInUser(OperationContext* opCtx) {
auto* as = AuthorizationSession::get(client);
if (auto user = as->getAuthenticatedUser()) {
auto item = makeKillAllSessionsByPattern(opCtx);
- item.pattern.setUid(user.get()->getDigest());
+ item.pattern.setUid(user.value()->getDigest());
patterns.emplace(std::move(item));
}
} else {
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 05c130c74f7..d454817654c 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -102,8 +102,8 @@ public:
if (authDB) {
uassert(ErrorCodes::Unauthorized,
"Insufficient permissions to list all databases",
- authDB.get() || mayListAllDatabases);
- return authDB.get();
+ authDB.value() || mayListAllDatabases);
+ return authDB.value();
}
// By default, list all databases if we can, otherwise
@@ -119,7 +119,7 @@ public:
auto expCtx = make_intrusive<ExpressionContext>(
opCtx, std::unique_ptr<CollatorInterface>(nullptr), ns());
auto matcher = uassertStatusOK(
- MatchExpressionParser::parse(filterObj.get(), std::move(expCtx)));
+ MatchExpressionParser::parse(filterObj.value(), std::move(expCtx)));
filter = std::move(matcher);
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index 94ebd846c71..117f46a8a5a 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -229,7 +229,7 @@ public:
return NamespaceString(request().getDbName(), "");
}
invariant(nss.nss());
- return nss.nss().get();
+ return nss.nss().value();
}
void doCheckAuthorization(OperationContext* opCtx) const final {
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index 3bef28fae6b..0cc2b224bf2 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -206,7 +206,7 @@ auto translateOutReduce(boost::intrusive_ptr<ExpressionContext> expCtx,
if (finalizeCode && finalizeCode->hasCode()) {
auto finalizeObj = BSON("args" << BSON_ARRAY("$_id"
<< "$value")
- << "body" << finalizeCode->getCode().get() << "lang"
+ << "body" << finalizeCode->getCode().value() << "lang"
<< ExpressionFunction::kJavaScript);
auto finalizeSpec =
BSON(DocumentSourceProject::kStageName
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 029fce709f4..36a23574a71 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -512,7 +512,7 @@ std::vector<std::unique_ptr<Pipeline, PipelineDeleter>> createExchangePipelinesI
if (request.getExchange() && !expCtx->explain) {
boost::intrusive_ptr<Exchange> exchange =
- new Exchange(request.getExchange().get(), std::move(pipeline));
+ new Exchange(request.getExchange().value(), std::move(pipeline));
for (size_t idx = 0; idx < exchange->getConsumers(); ++idx) {
// For every new pipeline we have create a new ExpressionContext as the context
@@ -956,7 +956,7 @@ Status runAggregate(OperationContext* opCtx,
if (shouldDoFLERewrite(request)) {
// After this rewriting, the encryption info does not need to be kept around.
pipeline = processFLEPipelineD(
- opCtx, nss, request.getEncryptionInformation().get(), std::move(pipeline));
+ opCtx, nss, request.getEncryptionInformation().value(), std::move(pipeline));
request.setEncryptionInformation(boost::none);
}
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 59310fdf720..dbea7398a5a 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -302,7 +302,7 @@ public:
auto fcvObj =
FeatureCompatibilityVersion::findFeatureCompatibilityVersionDocument(opCtx);
auto fcvDoc = FeatureCompatibilityVersionDocument::parse(
- IDLParserContext("featureCompatibilityVersionDocument"), fcvObj.get());
+ IDLParserContext("featureCompatibilityVersionDocument"), fcvObj.value());
changeTimestamp = fcvDoc.getChangeTimestamp();
uassert(5722800,
"The 'changeTimestamp' field is missing in the FCV document persisted by "
diff --git a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
index 51d89bbea00..0beb7da1998 100644
--- a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
+++ b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
@@ -191,7 +191,7 @@ public:
optionalDonor);
// Retrieve the shared_ptr from boost::optional to improve readability
- auto donorPtr = optionalDonor.get();
+ auto donorPtr = optionalDonor.value();
// always ensure we wait for the initial state document to be inserted.
donorPtr->getInitialStateDocumentDurableFuture().get(opCtx);
@@ -281,7 +281,7 @@ public:
}
// Retrieve the shared_ptr from boost::optional to improve readability
- auto donorPtr = optionalDonor.get();
+ auto donorPtr = optionalDonor.value();
donorPtr->onReceiveDonorAbortMigration();
donorPtr->getDecisionFuture().get(opCtx);
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 413607a19a1..f075e237878 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -314,7 +314,7 @@ boost::optional<Timestamp> TestingDurableHistoryPin::calculatePin(OperationConte
Timestamp ret = Timestamp::max();
auto cursor = autoColl->getCursor(opCtx);
for (auto doc = cursor->next(); doc; doc = cursor->next()) {
- const BSONObj obj = doc.get().data.toBson();
+ const BSONObj obj = doc.value().data.toBson();
const Timestamp ts = obj["pinTs"].timestamp();
ret = std::min(ret, ts);
}
diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp
index 0e6b43fae65..483095e4795 100644
--- a/src/mongo/db/commands/txn_cmds.cpp
+++ b/src/mongo/db/commands/txn_cmds.cpp
@@ -142,7 +142,8 @@ public:
auto optionalCommitTimestamp = request().getCommitTimestamp();
if (optionalCommitTimestamp) {
// commitPreparedTransaction will throw if the transaction is not prepared.
- txnParticipant.commitPreparedTransaction(opCtx, optionalCommitTimestamp.get(), {});
+ txnParticipant.commitPreparedTransaction(
+ opCtx, optionalCommitTimestamp.value(), {});
} else {
if (ShardingState::get(opCtx)->canAcceptShardedCommands().isOK() ||
serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index fb96d4bbd7b..6bcbf3c07ed 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -2514,7 +2514,7 @@ void CmdMergeAuthzCollections::Invocation::typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::BadValue,
"Must provide at least one of \"tempUsersCollection\" and \"tempRolescollection\"",
- !tempUsersColl.empty() | !tempRolesColl.empty());
+ !tempUsersColl.empty() || !tempRolesColl.empty());
auto* svcCtx = opCtx->getClient()->getServiceContext();
auto* authzManager = AuthorizationManager::get(svcCtx);
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index f798b291324..6373e3269eb 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -226,7 +226,7 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UpdateUserCommand&
as->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
ActionType::revokeRole));
- auto resolvedRoles = resolveRoleNames(possibleRoles.get(), dbname);
+ auto resolvedRoles = resolveRoleNames(possibleRoles.value(), dbname);
uassertStatusOK(checkAuthorizedToGrantRoles(as, resolvedRoles));
}
@@ -267,11 +267,11 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UpdateRoleCommand&
ActionType::revokeRole));
if (auto roles = request.getRoles()) {
- auto resolvedRoles = resolveRoleNames(roles.get(), dbname);
+ auto resolvedRoles = resolveRoleNames(roles.value(), dbname);
uassertStatusOK(checkAuthorizedToGrantRoles(as, resolvedRoles));
}
if (auto privs = request.getPrivileges()) {
- uassertStatusOK(checkAuthorizedToGrantPrivileges(as, privs.get()));
+ uassertStatusOK(checkAuthorizedToGrantPrivileges(as, privs.value()));
}
uassertStatusOK(checkAuthorizedToSetRestrictions(
as, request.getAuthenticationRestrictions() != boost::none, dbname));
diff --git a/src/mongo/db/commands/validate_db_metadata_cmd.cpp b/src/mongo/db/commands/validate_db_metadata_cmd.cpp
index 3ba6108f237..3ffeae5e66d 100644
--- a/src/mongo/db/commands/validate_db_metadata_cmd.cpp
+++ b/src/mongo/db/commands/validate_db_metadata_cmd.cpp
@@ -156,7 +156,8 @@ public:
++collIt) {
if (!_validateNamespace(
opCtx,
- collectionCatalog->lookupNSSByUUID(opCtx, collIt.uuid().get()).get())) {
+ collectionCatalog->lookupNSSByUUID(opCtx, collIt.uuid().value())
+ .value())) {
return;
}
}
diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp
index a2c3e378d38..e203f9b15e8 100644
--- a/src/mongo/db/commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands.cpp
@@ -1485,7 +1485,7 @@ public:
OperationSource source = OperationSource::kStandard;
if (request().getEncryptionInformation().has_value() &&
- !request().getEncryptionInformation().get().getCrudProcessed()) {
+ !request().getEncryptionInformation().value().getCrudProcessed()) {
return processFLEUpdate(opCtx, request());
}
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index af469dd95f3..e30a068c6ca 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -620,7 +620,7 @@ BSONObj CurOp::truncateAndSerializeGenericCursor(GenericCursor* cursor,
if (maxQuerySize) {
BSONObjBuilder tempObj;
appendAsObjOrString(
- "truncatedObj", cursor->getOriginatingCommand().get(), maxQuerySize, &tempObj);
+ "truncatedObj", cursor->getOriginatingCommand().value(), maxQuerySize, &tempObj);
auto originatingCommand = tempObj.done().getObjectField("truncatedObj");
cursor->setOriginatingCommand(originatingCommand.getOwned());
}
@@ -878,7 +878,7 @@ void OpDebug::report(OperationContext* opCtx,
}
if (classicEngineUsed) {
- pAttrs->add("queryExecutionEngine", classicEngineUsed.get() ? "classic" : "sbe");
+ pAttrs->add("queryExecutionEngine", classicEngineUsed.value() ? "classic" : "sbe");
}
if (!errInfo.isOK()) {
@@ -1047,7 +1047,7 @@ void OpDebug::append(OperationContext* opCtx,
}
if (classicEngineUsed) {
- b.append("queryExecutionEngine", classicEngineUsed.get() ? "classic" : "sbe");
+ b.append("queryExecutionEngine", classicEngineUsed.value() ? "classic" : "sbe");
}
{
@@ -1313,7 +1313,7 @@ std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(StringSet requ
addIfNeeded("queryExecutionEngine", [](auto field, auto args, auto& b) {
if (args.op.classicEngineUsed) {
- b.append("queryExecutionEngine", args.op.classicEngineUsed.get() ? "classic" : "sbe");
+ b.append("queryExecutionEngine", args.op.classicEngineUsed.value() ? "classic" : "sbe");
}
});
@@ -1474,9 +1474,9 @@ BSONObj OpDebug::makeFlowControlObject(FlowControlTicketholder::CurOp stats) {
BSONObj OpDebug::makeMongotDebugStatsObject() const {
BSONObjBuilder cursorBuilder;
invariant(mongotCursorId);
- cursorBuilder.append("cursorid", mongotCursorId.get());
+ cursorBuilder.append("cursorid", mongotCursorId.value());
if (msWaitingForMongot) {
- cursorBuilder.append("timeWaitingMillis", msWaitingForMongot.get());
+ cursorBuilder.append("timeWaitingMillis", msWaitingForMongot.value());
}
cursorBuilder.append("batchNum", mongotBatchNum);
return cursorBuilder.obj();
diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp
index 0f2ca39f371..d09c1e488bf 100644
--- a/src/mongo/db/curop_failpoint_helpers.cpp
+++ b/src/mongo/db/curop_failpoint_helpers.cpp
@@ -79,7 +79,7 @@ void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint,
},
[&](const BSONObj& data) {
StringData fpNss = data.getStringField("nss");
- if (nss && !fpNss.empty() && fpNss != nss.get().toString()) {
+ if (nss && !fpNss.empty() && fpNss != nss.value().toString()) {
return false;
}
return true;
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index ce81235bdd0..4ba7f271c6a 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -237,7 +237,7 @@ StatusWith<ClientCursorPin> CursorManager::pinCursor(
// we pass down to the logical session cache and vivify the record (updating last use).
if (cursor->getSessionId()) {
auto vivifyCursorStatus =
- LogicalSessionCache::get(opCtx)->vivify(opCtx, cursor->getSessionId().get());
+ LogicalSessionCache::get(opCtx)->vivify(opCtx, cursor->getSessionId().value());
if (!vivifyCursorStatus.isOK()) {
return vivifyCursorStatus;
}
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index bf35c7558d5..eb7d3c2e3d6 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -853,7 +853,7 @@ AutoGetCollectionForReadCommandLockFree::AutoGetCollectionForReadCommandLockFree
while (_autoCollForReadCommandBase->getCollection() &&
_autoCollForReadCommandBase->getCollection().isSharded() && receivedShardVersion &&
- receivedShardVersion.get() == ChunkVersion::UNSHARDED()) {
+ receivedShardVersion.value() == ChunkVersion::UNSHARDED()) {
reachedAutoGetLockFreeShardConsistencyRetry.executeIf(
[&](auto&) { reachedAutoGetLockFreeShardConsistencyRetry.pauseWhileSet(opCtx); },
[&](const BSONObj& data) {
diff --git a/src/mongo/db/dbdirectclient_test.cpp b/src/mongo/db/dbdirectclient_test.cpp
index ab15c3cd7fb..b26f8b5d1d9 100644
--- a/src/mongo/db/dbdirectclient_test.cpp
+++ b/src/mongo/db/dbdirectclient_test.cpp
@@ -75,7 +75,7 @@ TEST_F(DBDirectClientTest, InsertDuplicateDocumentDoesNotThrow) {
insertOp.setDocuments({BSON("_id" << 1), BSON("_id" << 1)});
auto insertReply = client.insert(insertOp);
ASSERT_EQ(insertReply.getN(), 1);
- auto writeErrors = insertReply.getWriteErrors().get();
+ auto writeErrors = insertReply.getWriteErrors().value();
ASSERT_EQ(writeErrors.size(), 1);
ASSERT_EQ(writeErrors[0].getStatus(), ErrorCodes::DuplicateKey);
}
@@ -111,7 +111,7 @@ TEST_F(DBDirectClientTest, UpdateDuplicateImmutableFieldDoesNotThrow) {
auto updateReply = client.update(updateOp);
ASSERT_EQ(updateReply.getN(), 0);
ASSERT_EQ(updateReply.getNModified(), 0);
- auto writeErrors = updateReply.getWriteErrors().get();
+ auto writeErrors = updateReply.getWriteErrors().value();
ASSERT_EQ(writeErrors.size(), 1);
ASSERT_EQ(writeErrors[0].getStatus(), ErrorCodes::ImmutableField);
}
@@ -152,7 +152,7 @@ TEST_F(DBDirectClientTest, DeleteDocumentIncorrectHintDoesNotThrow) {
}()});
auto deleteReply = client.remove(deleteOp);
ASSERT_EQ(deleteReply.getN(), 0);
- auto writeErrors = deleteReply.getWriteErrors().get();
+ auto writeErrors = deleteReply.getWriteErrors().value();
ASSERT_EQ(writeErrors.size(), 1);
ASSERT_EQ(writeErrors[0].getStatus(), ErrorCodes::BadValue);
}
diff --git a/src/mongo/db/error_labels.cpp b/src/mongo/db/error_labels.cpp
index c0b67fa6991..96e1b05ede6 100644
--- a/src/mongo/db/error_labels.cpp
+++ b/src/mongo/db/error_labels.cpp
@@ -55,7 +55,8 @@ bool ErrorLabelBuilder::isTransientTransactionError() const {
// we have already tried to abort it. An error code for which isTransientTransactionError()
// is true indicates a transaction failure with no persistent side effects.
return _code && _sessionOptions.getTxnNumber() && _sessionOptions.getAutocommit() &&
- mongo::isTransientTransactionError(_code.get(), _wcCode != boost::none, _isCommitOrAbort());
+ mongo::isTransientTransactionError(
+ _code.value(), _wcCode != boost::none, _isCommitOrAbort());
}
bool ErrorLabelBuilder::isRetryableWriteError() const {
@@ -77,8 +78,8 @@ bool ErrorLabelBuilder::isRetryableWriteError() const {
// transactions commit/abort.
if (isRetryableWrite() || isTransactionCommitOrAbort()) {
bool isShutDownCode = _code &&
- (ErrorCodes::isShutdownError(_code.get()) ||
- _code.get() == ErrorCodes::CallbackCanceled);
+ (ErrorCodes::isShutdownError(_code.value()) ||
+ _code.value() == ErrorCodes::CallbackCanceled);
if (isShutDownCode &&
(globalInShutdownDeprecated() ||
MONGO_unlikely(errorLabelBuilderMockShutdown.shouldFail()))) {
@@ -88,14 +89,14 @@ bool ErrorLabelBuilder::isRetryableWriteError() const {
// mongos should not attach RetryableWriteError label to retryable errors thrown by the
// config server or targeted shards.
return !_isMongos &&
- ((_code && ErrorCodes::isRetriableError(_code.get())) ||
- (_wcCode && ErrorCodes::isRetriableError(_wcCode.get())));
+ ((_code && ErrorCodes::isRetriableError(_code.value())) ||
+ (_wcCode && ErrorCodes::isRetriableError(_wcCode.value())));
}
return false;
}
bool ErrorLabelBuilder::isNonResumableChangeStreamError() const {
- return _code && ErrorCodes::isNonResumableChangeStreamError(_code.get());
+ return _code && ErrorCodes::isNonResumableChangeStreamError(_code.value());
}
bool ErrorLabelBuilder::isResumableChangeStreamError() const {
@@ -198,10 +199,10 @@ BSONObj getErrorLabels(OperationContext* opCtx,
// This command was failed by a failCommand failpoint. Thus, we return the errorLabels
// specified in the failpoint to supress any other error labels that would otherwise be
// returned by the ErrorLabelBuilder.
- if (errorLabelsOverride(opCtx).get().isEmpty()) {
+ if (errorLabelsOverride(opCtx).value().isEmpty()) {
return BSONObj();
} else {
- return BSON(kErrorLabelsFieldName << errorLabelsOverride(opCtx).get());
+ return BSON(kErrorLabelsFieldName << errorLabelsOverride(opCtx).value());
}
}
diff --git a/src/mongo/db/exec/bucket_unpacker.cpp b/src/mongo/db/exec/bucket_unpacker.cpp
index 43ccca4a13a..37847fc80c4 100644
--- a/src/mongo/db/exec/bucket_unpacker.cpp
+++ b/src/mongo/db/exec/bucket_unpacker.cpp
@@ -242,8 +242,8 @@ std::unique_ptr<MatchExpression> createComparisonPredicate(
// This function only handles time and measurement predicates--not metadata.
if (bucketSpec.metaField() &&
- (matchExprPath == bucketSpec.metaField().get() ||
- expression::isPathPrefixOf(bucketSpec.metaField().get(), matchExprPath))) {
+ (matchExprPath == bucketSpec.metaField().value() ||
+ expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) {
tasserted(
6707200,
str::stream() << "createComparisonPredicate() does not handle metadata predicates: "
@@ -452,8 +452,8 @@ std::unique_ptr<MatchExpression> BucketSpec::createPredicatesOnBucketLevelField(
// handle it here.
const auto matchExprPath = matchExpr->path();
if (!matchExprPath.empty() && bucketSpec.metaField() &&
- (matchExprPath == bucketSpec.metaField().get() ||
- expression::isPathPrefixOf(bucketSpec.metaField().get(), matchExprPath))) {
+ (matchExprPath == bucketSpec.metaField().value() ||
+ expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) {
if (haveComputedMetaField)
return handleIneligible(policy, matchExpr, "can't handle a computed meta field");
@@ -464,7 +464,7 @@ std::unique_ptr<MatchExpression> BucketSpec::createPredicatesOnBucketLevelField(
auto result = matchExpr->shallowClone();
expression::applyRenamesToExpression(
result.get(),
- {{bucketSpec.metaField().get(), timeseries::kBucketMetaFieldName.toString()}});
+ {{bucketSpec.metaField().value(), timeseries::kBucketMetaFieldName.toString()}});
return result;
}
diff --git a/src/mongo/db/exec/sbe/abt/abt_lower.cpp b/src/mongo/db/exec/sbe/abt/abt_lower.cpp
index adb44ec5574..18ecc6236bf 100644
--- a/src/mongo/db/exec/sbe/abt/abt_lower.cpp
+++ b/src/mongo/db/exec/sbe/abt/abt_lower.cpp
@@ -875,7 +875,7 @@ std::unique_ptr<sbe::PlanStage> SBENodeLowering::lowerScanNode(
sbe::ScanCallbacks callbacks({}, {}, {});
if (useParallelScan) {
- return sbe::makeS<sbe::ParallelScanStage>(nss.uuid().get(),
+ return sbe::makeS<sbe::ParallelScanStage>(nss.uuid().value(),
rootSlot,
ridSlot,
boost::none,
@@ -888,7 +888,7 @@ std::unique_ptr<sbe::PlanStage> SBENodeLowering::lowerScanNode(
planNodeId,
callbacks);
} else {
- return sbe::makeS<sbe::ScanStage>(nss.uuid().get(),
+ return sbe::makeS<sbe::ScanStage>(nss.uuid().value(),
rootSlot,
ridSlot,
boost::none,
@@ -1014,7 +1014,7 @@ std::unique_ptr<sbe::PlanStage> SBENodeLowering::walk(const IndexScanNode& n, co
// Unused.
boost::optional<sbe::value::SlotId> resultSlot;
- return sbe::makeS<sbe::IndexScanStage>(nss.uuid().get(),
+ return sbe::makeS<sbe::IndexScanStage>(nss.uuid().value(),
indexDefName,
!indexSpec.isReverseOrder(),
resultSlot,
@@ -1048,7 +1048,7 @@ std::unique_ptr<sbe::PlanStage> SBENodeLowering::walk(const SeekNode& n,
sbe::ScanCallbacks callbacks({}, {}, {});
const PlanNodeId planNodeId = _nodeToGroupPropsMap.at(&n)._planNodeId;
- return sbe::makeS<sbe::ScanStage>(nss.uuid().get(),
+ return sbe::makeS<sbe::ScanStage>(nss.uuid().value(),
rootSlot,
ridSlot,
boost::none,
diff --git a/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp b/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp
index 46dba4ce176..c3de8c88f49 100644
--- a/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp
+++ b/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp
@@ -90,7 +90,7 @@ public:
if (collator) {
// Setup collator and insert it into the ctx.
collatorSlot = generateSlotId();
- ctx->pushCorrelated(collatorSlot.get(), &collatorAccessor);
+ ctx->pushCorrelated(collatorSlot.value(), &collatorAccessor);
collatorAccessor.reset(value::TypeTags::collator,
value::bitcastFrom<CollatorInterface*>(collator));
}
diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.cpp b/src/mongo/db/exec/sbe/stages/bson_scan.cpp
index 3a4c3b50512..432f999e61a 100644
--- a/src/mongo/db/exec/sbe/stages/bson_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/bson_scan.cpp
@@ -171,7 +171,7 @@ std::vector<DebugPrinter::Block> BSONScanStage::debugPrint() const {
auto ret = PlanStage::debugPrint();
if (_recordSlot) {
- DebugPrinter::addIdentifier(ret, _recordSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordSlot.value());
}
ret.emplace_back(DebugPrinter::Block("[`"));
diff --git a/src/mongo/db/exec/sbe/stages/column_scan.cpp b/src/mongo/db/exec/sbe/stages/column_scan.cpp
index b60f9aca5af..a003515427f 100644
--- a/src/mongo/db/exec/sbe/stages/column_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/column_scan.cpp
@@ -485,13 +485,13 @@ std::vector<DebugPrinter::Block> ColumnScanStage::debugPrint() const {
auto ret = PlanStage::debugPrint();
if (_reconstructedRecordSlot) {
- DebugPrinter::addIdentifier(ret, _reconstructedRecordSlot.get());
+ DebugPrinter::addIdentifier(ret, _reconstructedRecordSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_recordIdSlot) {
- DebugPrinter::addIdentifier(ret, _recordIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
index 21b2a06cf12..92175f96f71 100644
--- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
@@ -462,19 +462,19 @@ std::vector<DebugPrinter::Block> IndexScanStage::debugPrint() const {
}
if (_recordSlot) {
- DebugPrinter::addIdentifier(ret, _recordSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_recordIdSlot) {
- DebugPrinter::addIdentifier(ret, _recordIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_snapshotIdSlot) {
- DebugPrinter::addIdentifier(ret, _snapshotIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _snapshotIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp
index 0f6d514c139..57c8c594a84 100644
--- a/src/mongo/db/exec/sbe/stages/scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/scan.cpp
@@ -511,41 +511,41 @@ std::vector<DebugPrinter::Block> ScanStage::debugPrint() const {
auto ret = PlanStage::debugPrint();
if (_seekKeySlot) {
- DebugPrinter::addIdentifier(ret, _seekKeySlot.get());
+ DebugPrinter::addIdentifier(ret, _seekKeySlot.value());
}
if (_recordSlot) {
- DebugPrinter::addIdentifier(ret, _recordSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_recordIdSlot) {
- DebugPrinter::addIdentifier(ret, _recordIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_snapshotIdSlot) {
- DebugPrinter::addIdentifier(ret, _snapshotIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _snapshotIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexIdSlot) {
- DebugPrinter::addIdentifier(ret, _indexIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _indexIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexKeySlot) {
- DebugPrinter::addIdentifier(ret, _indexKeySlot.get());
+ DebugPrinter::addIdentifier(ret, _indexKeySlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexKeyPatternSlot) {
- DebugPrinter::addIdentifier(ret, _indexKeyPatternSlot.get());
+ DebugPrinter::addIdentifier(ret, _indexKeyPatternSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
@@ -981,37 +981,37 @@ std::vector<DebugPrinter::Block> ParallelScanStage::debugPrint() const {
auto ret = PlanStage::debugPrint();
if (_recordSlot) {
- DebugPrinter::addIdentifier(ret, _recordSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_recordIdSlot) {
- DebugPrinter::addIdentifier(ret, _recordIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _recordIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_snapshotIdSlot) {
- DebugPrinter::addIdentifier(ret, _snapshotIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _snapshotIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexIdSlot) {
- DebugPrinter::addIdentifier(ret, _indexIdSlot.get());
+ DebugPrinter::addIdentifier(ret, _indexIdSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexKeySlot) {
- DebugPrinter::addIdentifier(ret, _indexKeySlot.get());
+ DebugPrinter::addIdentifier(ret, _indexKeySlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
if (_indexKeyPatternSlot) {
- DebugPrinter::addIdentifier(ret, _indexKeyPatternSlot.get());
+ DebugPrinter::addIdentifier(ret, _indexKeyPatternSlot.value());
} else {
DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword);
}
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index 854e5b48768..ccb5b010819 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -184,7 +184,7 @@ bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) con
// Our state should be such that we have index data/are covered.
if (auto outOpt = IndexKeyDatum::getFieldDotted(keyData, field)) {
- *out = outOpt.get();
+ *out = outOpt.value();
return true;
} else {
return false;
diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
index faa05c2b63e..ca248706cf8 100644
--- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
+++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
@@ -179,7 +179,7 @@ auto startExhaustQuery(
if (findCmd.getTailable() && findCmd.getAwaitData()) {
queryCursor->setAwaitDataTimeoutMS(awaitDataTimeoutMS);
if (lastKnownCommittedOpTime) {
- auto term = lastKnownCommittedOpTime.get().getTerm();
+ auto term = lastKnownCommittedOpTime.value().getTerm();
queryCursor->setCurrentTermAndLastCommittedOpTime(term,
lastKnownCommittedOpTime);
}
diff --git a/src/mongo/db/fle_crud.cpp b/src/mongo/db/fle_crud.cpp
index 0277b0c649f..2d7e59abb2e 100644
--- a/src/mongo/db/fle_crud.cpp
+++ b/src/mongo/db/fle_crud.cpp
@@ -171,7 +171,7 @@ boost::intrusive_ptr<ExpressionContext> makeExpCtx(OperationContext* opCtx,
std::unique_ptr<CollatorInterface> collator;
if (op.getCollation()) {
auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(op.getCollation().get());
+ ->makeFromBSON(op.getCollation().value());
uassertStatusOK(statusWithCollator.getStatus());
collator = std::move(statusWithCollator.getValue());
@@ -193,7 +193,7 @@ std::pair<FLEBatchResult, write_ops::InsertCommandReply> processInsert(
GetTxnCallback getTxns) {
auto edcNss = insertRequest.getNamespace();
- auto ei = insertRequest.getEncryptionInformation().get();
+ auto ei = insertRequest.getEncryptionInformation().value();
bool bypassDocumentValidation =
insertRequest.getWriteCommandRequestBase().getBypassDocumentValidation();
@@ -458,7 +458,7 @@ void processFieldsForInsert(FLEQueryInterface* queryImpl,
int32_t* pStmtId,
bool bypassDocumentValidation) {
- NamespaceString nssEsc(edcNss.db(), efc.getEscCollection().get());
+ NamespaceString nssEsc(edcNss.db(), efc.getEscCollection().value());
auto docCount = queryImpl->countDocuments(nssEsc);
@@ -516,7 +516,7 @@ void processFieldsForInsert(FLEQueryInterface* queryImpl,
checkWriteErrors(escInsertReply);
- NamespaceString nssEcoc(edcNss.db(), efc.getEcocCollection().get());
+ NamespaceString nssEcoc(edcNss.db(), efc.getEcocCollection().value());
// TODO - should we make this a batch of ECOC updates?
auto ecocInsertReply = uassertStatusOK(queryImpl->insertDocument(
@@ -537,7 +537,7 @@ void processRemovedFields(FLEQueryInterface* queryImpl,
const std::vector<EDCIndexedFields>& deletedFields,
int32_t* pStmtId) {
- NamespaceString nssEcc(edcNss.db(), efc.getEccCollection().get());
+ NamespaceString nssEcc(edcNss.db(), efc.getEccCollection().value());
auto docCount = queryImpl->countDocuments(nssEcc);
@@ -609,7 +609,7 @@ void processRemovedFields(FLEQueryInterface* queryImpl,
true));
checkWriteErrors(eccInsertReply);
- NamespaceString nssEcoc(edcNss.db(), efc.getEcocCollection().get());
+ NamespaceString nssEcoc(edcNss.db(), efc.getEcocCollection().value());
// TODO - make this a batch of ECOC updates?
EncryptedStateCollectionTokens tokens(plainTextField.esc, plainTextField.ecc);
@@ -751,7 +751,7 @@ write_ops::DeleteCommandReply processDelete(FLEQueryInterface* queryImpl,
const write_ops::DeleteCommandRequest& deleteRequest) {
auto edcNss = deleteRequest.getNamespace();
- auto ei = deleteRequest.getEncryptionInformation().get();
+ auto ei = deleteRequest.getEncryptionInformation().value();
auto efc = EncryptionInformationHelpers::getAndValidateSchema(edcNss, ei);
auto tokenMap = EncryptionInformationHelpers::getDeleteTokens(edcNss, ei);
@@ -804,7 +804,7 @@ write_ops::UpdateCommandReply processUpdate(FLEQueryInterface* queryImpl,
const write_ops::UpdateCommandRequest& updateRequest) {
auto edcNss = updateRequest.getNamespace();
- auto ei = updateRequest.getEncryptionInformation().get();
+ auto ei = updateRequest.getEncryptionInformation().value();
auto efc = EncryptionInformationHelpers::getAndValidateSchema(edcNss, ei);
auto tokenMap = EncryptionInformationHelpers::getDeleteTokens(edcNss, ei);
@@ -1002,7 +1002,7 @@ std::unique_ptr<BatchedCommandRequest> processFLEBatchExplain(
newDeleteOp.setQ(fle::rewriteQuery(opCtx,
getExpCtx(newDeleteOp),
request.getNS(),
- deleteRequest.getEncryptionInformation().get(),
+ deleteRequest.getEncryptionInformation().value(),
newDeleteOp.getQ(),
&getTransactionWithRetriesForMongoS,
fle::HighCardinalityModeAllowed::kAllow));
@@ -1019,7 +1019,7 @@ std::unique_ptr<BatchedCommandRequest> processFLEBatchExplain(
newUpdateOp.setQ(fle::rewriteQuery(opCtx,
getExpCtx(newUpdateOp),
request.getNS(),
- updateRequest.getEncryptionInformation().get(),
+ updateRequest.getEncryptionInformation().value(),
newUpdateOp.getQ(),
&getTransactionWithRetriesForMongoS,
highCardinalityModeAllowed));
@@ -1037,7 +1037,7 @@ write_ops::FindAndModifyCommandReply processFindAndModify(
const write_ops::FindAndModifyCommandRequest& findAndModifyRequest) {
auto edcNss = findAndModifyRequest.getNamespace();
- auto ei = findAndModifyRequest.getEncryptionInformation().get();
+ auto ei = findAndModifyRequest.getEncryptionInformation().value();
auto efc = EncryptionInformationHelpers::getAndValidateSchema(edcNss, ei);
auto tokenMap = EncryptionInformationHelpers::getDeleteTokens(edcNss, ei);
@@ -1182,7 +1182,7 @@ write_ops::FindAndModifyCommandRequest processFindAndModifyExplain(
const write_ops::FindAndModifyCommandRequest& findAndModifyRequest) {
auto edcNss = findAndModifyRequest.getNamespace();
- auto ei = findAndModifyRequest.getEncryptionInformation().get();
+ auto ei = findAndModifyRequest.getEncryptionInformation().value();
auto efc = EncryptionInformationHelpers::getAndValidateSchema(edcNss, ei);
diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp
index c5cd4a76d68..cd8740054c2 100644
--- a/src/mongo/db/free_mon/free_mon_controller_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp
@@ -955,7 +955,7 @@ TEST_F(FreeMonControllerTest, TestRegister) {
controller->turnCrankForTest(Turner().registerCommand());
- ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).get().getRegistrationId().empty());
+ ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).value().getRegistrationId().empty());
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_GTE(controller.metricsCollector->count(), 0UL);
@@ -976,7 +976,7 @@ TEST_F(FreeMonControllerTest, TestRegisterTimeout) {
ASSERT_OK(*optionalStatus);
controller->turnCrankForTest(Turner().registerCommand(2));
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::pending);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::pending);
ASSERT_GTE(controller.network->getRegistersCalls(), 2);
ASSERT_GTE(controller.registerCollector->count(), 2UL);
}
@@ -995,7 +995,8 @@ TEST_F(FreeMonControllerTest, TestRegisterFail) {
ASSERT_OK(*optionalStatus);
controller->turnCrankForTest(Turner().registerCommand(1));
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::disabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() ==
+ StorageStateEnum::disabled);
ASSERT_EQ(controller.network->getRegistersCalls(), 1);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
@@ -1015,7 +1016,8 @@ TEST_F(FreeMonControllerTest, TestRegisterHalts) {
ASSERT_OK(*optionalStatus);
controller->turnCrankForTest(Turner().registerCommand());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::disabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() ==
+ StorageStateEnum::disabled);
ASSERT_EQ(controller.network->getRegistersCalls(), 1);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
@@ -1030,7 +1032,7 @@ TEST_F(FreeMonControllerTest, TestMetrics) {
controller->turnCrankForTest(
Turner().registerServer().registerCommand().collect(2).metricsSend());
- ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).get().getRegistrationId().empty());
+ ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).value().getRegistrationId().empty());
ASSERT_GTE(controller.network->getRegistersCalls(), 1);
ASSERT_GTE(controller.network->getMetricsCalls(), 1);
@@ -1076,7 +1078,7 @@ TEST_F(FreeMonControllerTest, TestMetricsWithEnabledStorage) {
controller->turnCrankForTest(
Turner().registerServer().registerCommand().collect(2).metricsSend());
- ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).get().getRegistrationId().empty());
+ ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).value().getRegistrationId().empty());
ASSERT_GTE(controller.network->getRegistersCalls(), 1);
ASSERT_GTE(controller.network->getMetricsCalls(), 1);
@@ -1179,7 +1181,7 @@ TEST_F(FreeMonControllerTest, TestMetricsUnregisterCancelsRegister) {
ASSERT_OK(*optionalStatus);
controller->turnCrankForTest(Turner().registerCommand(2));
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::pending);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::pending);
ASSERT_GTE(controller.network->getRegistersCalls(), 2);
ASSERT_GTE(controller.registerCollector->count(), 2UL);
@@ -1190,7 +1192,8 @@ TEST_F(FreeMonControllerTest, TestMetricsUnregisterCancelsRegister) {
controller->turnCrankForTest(Turner().unRegisterCommand());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::disabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() ==
+ StorageStateEnum::disabled);
ASSERT_GTE(controller.network->getRegistersCalls(), 2);
ASSERT_GTE(controller.registerCollector->count(), 2UL);
@@ -1207,8 +1210,9 @@ TEST_F(FreeMonControllerTest, TestMetricsHalt) {
controller->turnCrankForTest(
Turner().registerServer().registerCommand().metricsSend().collect(4).metricsSend());
- ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).get().getRegistrationId().empty());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::disabled);
+ ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).value().getRegistrationId().empty());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() ==
+ StorageStateEnum::disabled);
ASSERT_GTE(controller.network->getRegistersCalls(), 1);
ASSERT_GTE(controller.network->getMetricsCalls(), 1);
@@ -1229,7 +1233,7 @@ TEST_F(FreeMonControllerTest, TestMetricsPermanentlyDelete) {
controller->turnCrankForTest(
Turner().registerServer().registerCommand().collect(5).metricsSend(4));
- ASSERT_FALSE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_FALSE(FreeMonStorage::read(_opCtx.get()).has_value());
ASSERT_GTE(controller.network->getRegistersCalls(), 1);
ASSERT_GTE(controller.network->getMetricsCalls(), 3);
@@ -1301,7 +1305,7 @@ TEST_F(FreeMonControllerTest, TestResendRegistration) {
controller->turnCrankForTest(Turner().registerServer().registerCommand().collect(2));
- ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).get().getRegistrationId().empty());
+ ASSERT_TRUE(!FreeMonStorage::read(_opCtx.get()).value().getRegistrationId().empty());
controller->turnCrankForTest(
Turner().metricsSend(3).collect(3).registerCommand().metricsSend(1));
@@ -1372,7 +1376,7 @@ TEST_F(FreeMonControllerRSTest, TransitionToPrimary) {
controller->turnCrankForTest(Turner().onTransitionToPrimary().registerCommand());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).has_value());
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_GTE(controller.metricsCollector->count(), 2UL);
@@ -1391,7 +1395,7 @@ TEST_F(FreeMonControllerRSTest, StartupOnSecondary) {
controller->turnCrankForTest(Turner().registerServer().registerCommand().collect());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).has_value());
// Validate the new registration id was not written
ASSERT_EQ(FreeMonStorage::read(_opCtx.get())->getRegistrationId(), "Foo");
@@ -1415,7 +1419,7 @@ TEST_F(FreeMonControllerRSTest, SecondaryStartOnInsert) {
controller->turnCrankForTest(Turner().notifyUpsert().registerCommand().collect());
- ASSERT_FALSE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_FALSE(FreeMonStorage::read(_opCtx.get()).has_value());
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_GTE(controller.metricsCollector->count(), 2UL);
@@ -1439,7 +1443,7 @@ TEST_F(FreeMonControllerRSTest, SecondaryStartOnUpdate) {
controller->turnCrankForTest(Turner().notifyUpsert().registerCommand().collect());
// Since there is no local write, it remains pending
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::pending);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::pending);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_GTE(controller.metricsCollector->count(), 2UL);
@@ -1464,10 +1468,10 @@ TEST_F(FreeMonControllerRSTest, SecondaryStopOnDeRegister) {
controller->turnCrankForTest(Turner().notifyUpsert().collect().metricsSend());
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).has_value());
// Since there is no local write, it remains enabled
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::enabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::enabled);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_EQ(controller.metricsCollector->count(), 2UL);
@@ -1485,7 +1489,7 @@ TEST_F(FreeMonControllerRSTest, StepdownDuringRegistration) {
controller->turnCrankForTest(Turner().registerServer() + 1);
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::pending);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::pending);
// Now become a secondary
ASSERT_OK(_getReplCoord()->setFollowerMode(repl::MemberState::RS_SECONDARY));
@@ -1495,7 +1499,7 @@ TEST_F(FreeMonControllerRSTest, StepdownDuringRegistration) {
controller->turnCrankForTest(Turner().metricsSend().collect(2));
// Registration cannot write back to the local store so remain in pending
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::pending);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::pending);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_EQ(controller.metricsCollector->count(), 2UL);
@@ -1546,10 +1550,10 @@ TEST_F(FreeMonControllerRSTest, SecondaryStopOnDocumentDrop) {
// There is a race condition where sometimes metrics send sneaks in
controller->turnCrankForTest(Turner().notifyDelete().collect(3));
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).has_value());
// Since there is no local write, it remains enabled
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::enabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::enabled);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_GTE(controller.metricsCollector->count(), 2UL);
@@ -1586,10 +1590,10 @@ TEST_F(FreeMonControllerRSTest, SecondaryStopOnDocumentDropDuringCollect) {
controller->turnCrankForTest(Turner().metricsSend().collect(2));
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).has_value());
// Since there is no local write, it remains enabled
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::enabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::enabled);
BSONObjBuilder builder;
controller->getServerStatus(_opCtx.get(), &builder);
@@ -1622,7 +1626,7 @@ TEST_F(FreeMonControllerRSTest, SecondaryStartOnBadUpdate) {
controller->turnCrankForTest(Turner().notifyUpsert());
// Since there is no local write, it remains enabled
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::enabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::enabled);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_EQ(controller.metricsCollector->count(), 2UL);
@@ -1655,7 +1659,7 @@ TEST_F(FreeMonControllerRSTest, SecondaryRollbackStopMetrics) {
Turner().notifyOnRollback().registerCommand().metricsSend().collect(2).metricsSend());
// Since there is no local write, it remains enabled
- ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).get().getState() == StorageStateEnum::enabled);
+ ASSERT_TRUE(FreeMonStorage::read(_opCtx.get()).value().getState() == StorageStateEnum::enabled);
ASSERT_EQ(controller.registerCollector->count(), 1UL);
ASSERT_EQ(controller.metricsCollector->count(), 4UL);
diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp
index b230d9431cc..d42dc473d14 100644
--- a/src/mongo/db/free_mon/free_mon_processor.cpp
+++ b/src/mongo/db/free_mon/free_mon_processor.cpp
@@ -167,12 +167,12 @@ void FreeMonProcessor::run() {
while (true) {
auto item = _queue.dequeue(client->getServiceContext()->getPreciseClockSource());
- if (!item.is_initialized()) {
+ if (!item.has_value()) {
// Shutdown was triggered
return;
}
- auto msg = item.get();
+ auto msg = item.value();
// Do work here
switch (msg->getType()) {
@@ -279,13 +279,13 @@ void FreeMonProcessor::readState(OperationContext* opCtx, bool updateInMemory) {
_lastReadState = state;
- if (state.is_initialized()) {
- invariant(state.get().getVersion() == kStorageVersion);
+ if (state.has_value()) {
+ invariant(state.value().getVersion() == kStorageVersion);
if (updateInMemory) {
- _state = state.get();
+ _state = state.value();
}
- } else if (!state.is_initialized()) {
+ } else if (!state.has_value()) {
// Default the state
auto state = _state.synchronize();
state->setVersion(kStorageVersion);
@@ -355,12 +355,12 @@ void FreeMonProcessor::doServerRegister(
// record the registration id until after becoming primary
// 2. a standalone which has never been registered
//
- if (!state.is_initialized()) {
+ if (!state.has_value()) {
_registerOnTransitionToPrimary = regType;
} else {
// We are standalone or secondary, if we have a registration id, then send a
// registration notification, else wait for the user to register us.
- if (state.get().getState() == StorageStateEnum::enabled) {
+ if (state.value().getState() == StorageStateEnum::enabled) {
enqueue(FreeMonRegisterCommandMessage::createNow(
{msg->getPayload().second, boost::none}));
}
@@ -426,7 +426,7 @@ void FreeMonProcessor::doCommandRegister(Client* client,
FreeMonRegistrationRequest req;
if (msg->getPayload().second) {
- req.setId(StringData(msg->getPayload().second.get()));
+ req.setId(StringData(msg->getPayload().second.value()));
} else {
auto regid = _state->getRegistrationId();
if (!regid.empty()) {
@@ -504,10 +504,10 @@ Status FreeMonProcessor::validateRegistrationResponse(const FreeMonRegistrationR
<< kInformationalMessageMaxLength << "'");
}
- if (resp.getUserReminder().is_initialized() &&
- resp.getUserReminder().get().size() >= kUserReminderMaxLength) {
+ if (resp.getUserReminder().has_value() &&
+ resp.getUserReminder().value().size() >= kUserReminderMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
+ str::stream() << "UserReminder is '" << resp.getUserReminder().value().size()
<< "' bytes in length, maximum allowed length is '"
<< kUserReminderMaxLength << "'");
}
@@ -547,34 +547,34 @@ Status FreeMonProcessor::validateMetricsResponse(const FreeMonMetricsResponse& r
<< "), received '" << resp.getVersion() << "'");
}
- if (resp.getId().is_initialized() && resp.getId().get().size() >= kRegistrationIdMaxLength) {
+ if (resp.getId().has_value() && resp.getId().value().size() >= kRegistrationIdMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "Id is '" << resp.getId().get().size()
+ str::stream() << "Id is '" << resp.getId().value().size()
<< "' bytes in length, maximum allowed length is '"
<< kRegistrationIdMaxLength << "'");
}
- if (resp.getInformationalURL().is_initialized() &&
- resp.getInformationalURL().get().size() >= kInformationalURLMaxLength) {
+ if (resp.getInformationalURL().has_value() &&
+ resp.getInformationalURL().value().size() >= kInformationalURLMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
str::stream()
- << "InformationURL is '" << resp.getInformationalURL().get().size()
+ << "InformationURL is '" << resp.getInformationalURL().value().size()
<< "' bytes in length, maximum allowed length is '"
<< kInformationalURLMaxLength << "'");
}
- if (resp.getMessage().is_initialized() &&
- resp.getMessage().get().size() >= kInformationalMessageMaxLength) {
+ if (resp.getMessage().has_value() &&
+ resp.getMessage().value().size() >= kInformationalMessageMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "Message is '" << resp.getMessage().get().size()
+ str::stream() << "Message is '" << resp.getMessage().value().size()
<< "' bytes in length, maximum allowed length is '"
<< kInformationalMessageMaxLength << "'");
}
- if (resp.getUserReminder().is_initialized() &&
- resp.getUserReminder().get().size() >= kUserReminderMaxLength) {
+ if (resp.getUserReminder().has_value() &&
+ resp.getUserReminder().value().size() >= kUserReminderMaxLength) {
return Status(ErrorCodes::FreeMonHttpPermanentFailure,
- str::stream() << "UserReminder is '" << resp.getUserReminder().get().size()
+ str::stream() << "UserReminder is '" << resp.getUserReminder().value().size()
<< "' bytes in length, maximum allowed length is '"
<< kUserReminderMaxLength << "'");
}
@@ -640,8 +640,8 @@ void FreeMonProcessor::doAsyncRegisterComplete(
auto state = _state.synchronize();
state->setRegistrationId(resp.getId());
- if (resp.getUserReminder().is_initialized()) {
- state->setUserReminder(resp.getUserReminder().get());
+ if (resp.getUserReminder().has_value()) {
+ state->setUserReminder(resp.getUserReminder().value());
} else {
state->setUserReminder("");
}
@@ -846,20 +846,20 @@ void FreeMonProcessor::doAsyncMetricsComplete(
{
auto state = _state.synchronize();
- if (resp.getId().is_initialized()) {
- state->setRegistrationId(resp.getId().get());
+ if (resp.getId().has_value()) {
+ state->setRegistrationId(resp.getId().value());
}
- if (resp.getUserReminder().is_initialized()) {
- state->setUserReminder(resp.getUserReminder().get());
+ if (resp.getUserReminder().has_value()) {
+ state->setUserReminder(resp.getUserReminder().value());
}
- if (resp.getInformationalURL().is_initialized()) {
- state->setInformationalURL(resp.getInformationalURL().get());
+ if (resp.getInformationalURL().has_value()) {
+ state->setInformationalURL(resp.getInformationalURL().value());
}
- if (resp.getMessage().is_initialized()) {
- state->setMessage(resp.getMessage().get());
+ if (resp.getMessage().has_value()) {
+ state->setMessage(resp.getMessage().value());
}
}
@@ -871,7 +871,7 @@ void FreeMonProcessor::doAsyncMetricsComplete(
_metricsRetry->setMin(Seconds(resp.getReportingInterval()));
_metricsRetry->reset();
- if (resp.getResendRegistration().is_initialized() && resp.getResendRegistration()) {
+ if (resp.getResendRegistration().has_value() && resp.getResendRegistration()) {
enqueue(FreeMonRegisterCommandMessage::createNow({_tags, boost::none}));
} else {
// Enqueue next metrics upload
diff --git a/src/mongo/db/free_mon/free_mon_queue_test.cpp b/src/mongo/db/free_mon/free_mon_queue_test.cpp
index ad6104c5126..275743a0fda 100644
--- a/src/mongo/db/free_mon/free_mon_queue_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_queue_test.cpp
@@ -91,7 +91,7 @@ TEST_F(FreeMonQueueTest, TestBasic) {
auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource());
- ASSERT(item.get()->getType() == FreeMonMessageType::RegisterServer);
+ ASSERT(item.value()->getType() == FreeMonMessageType::RegisterServer);
}
Date_t fromNow(int millis) {
@@ -107,10 +107,10 @@ TEST_F(FreeMonQueueTest, TestDeadlinePriority) {
queue.enqueue(
FreeMonMessage::createWithDeadline(FreeMonMessageType::RegisterCommand, fromNow(50)));
- auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).get();
+ auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).value();
ASSERT(item->getType() == FreeMonMessageType::RegisterCommand);
- item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).get();
+ item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).value();
ASSERT(item->getType() == FreeMonMessageType::RegisterServer);
}
@@ -124,13 +124,13 @@ TEST_F(FreeMonQueueTest, TestFIFO) {
queue.enqueue(
FreeMonMessage::createWithDeadline(FreeMonMessageType::RegisterCommand, Date_t()));
- auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).get();
+ auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).value();
ASSERT(item->getType() == FreeMonMessageType::RegisterServer);
- item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).get();
+ item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).value();
ASSERT(item->getType() == FreeMonMessageType::AsyncRegisterComplete);
- item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).get();
+ item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource()).value();
ASSERT(item->getType() == FreeMonMessageType::RegisterCommand);
}
@@ -150,7 +150,7 @@ TEST_F(FreeMonQueueTest, TestQueueStop) {
// Try to dequeue from a stopped task queue
auto item = queue.dequeue(_opCtx.get()->getServiceContext()->getPreciseClockSource());
- ASSERT_FALSE(item.is_initialized());
+ ASSERT_FALSE(item.has_value());
});
ASSERT_OK(swSchedule.getStatus());
diff --git a/src/mongo/db/free_mon/free_mon_storage_test.cpp b/src/mongo/db/free_mon/free_mon_storage_test.cpp
index 97369228f6d..89073a30b0e 100644
--- a/src/mongo/db/free_mon/free_mon_storage_test.cpp
+++ b/src/mongo/db/free_mon/free_mon_storage_test.cpp
@@ -109,7 +109,7 @@ TEST_F(FreeMonStorageTest, TestStorage) {
// Validate no collection works
{
auto emptyDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_FALSE(emptyDoc.is_initialized());
+ ASSERT_FALSE(emptyDoc.has_value());
}
// Create collection with one document.
@@ -135,7 +135,7 @@ TEST_F(FreeMonStorageTest, TestStorage) {
{
auto emptyDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_FALSE(emptyDoc.is_initialized());
+ ASSERT_FALSE(emptyDoc.has_value());
}
FreeMonStorage::replace(_opCtx.get(), initialState);
@@ -143,7 +143,7 @@ TEST_F(FreeMonStorageTest, TestStorage) {
{
auto persistedDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_TRUE(persistedDoc.is_initialized());
+ ASSERT_TRUE(persistedDoc.has_value());
ASSERT_TRUE(persistedDoc == initialState);
}
@@ -152,7 +152,7 @@ TEST_F(FreeMonStorageTest, TestStorage) {
{
auto emptyDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_FALSE(emptyDoc.is_initialized());
+ ASSERT_FALSE(emptyDoc.has_value());
}
// Verfiy delete of nothing succeeds
@@ -189,7 +189,7 @@ TEST_F(FreeMonStorageTest, TestSecondary) {
{
auto persistedDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_TRUE(persistedDoc.is_initialized());
+ ASSERT_TRUE(persistedDoc.has_value());
ASSERT_TRUE(persistedDoc == initialState);
}
@@ -214,7 +214,7 @@ TEST_F(FreeMonStorageTest, TestSecondary) {
{
auto persistedDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_TRUE(persistedDoc.is_initialized());
+ ASSERT_TRUE(persistedDoc.has_value());
ASSERT_TRUE(persistedDoc == initialState);
}
@@ -223,7 +223,7 @@ TEST_F(FreeMonStorageTest, TestSecondary) {
{
auto persistedDoc = FreeMonStorage::read(_opCtx.get());
- ASSERT_TRUE(persistedDoc.is_initialized());
+ ASSERT_TRUE(persistedDoc.has_value());
}
// Verfiy delete of nothing succeeds
@@ -247,7 +247,7 @@ TEST_F(FreeMonStorageTest, TestClusterManagerStorage) {
const NamespaceString localClusterManagerNss("local.clustermanager");
// Verify read of non-existent collection works
- ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).is_initialized());
+ ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).has_value());
CollectionOptions collectionOptions;
collectionOptions.uuid = UUID::gen();
@@ -256,17 +256,17 @@ TEST_F(FreeMonStorageTest, TestClusterManagerStorage) {
ASSERT_OK(statusCC);
// Verify read of empty collection works
- ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).is_initialized());
+ ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).has_value());
insertDoc(_opCtx.get(), localClusterManagerNss, "foo1");
// Verify read of singleton collection works
- ASSERT_TRUE(FreeMonStorage::readClusterManagerState(_opCtx.get()).is_initialized());
+ ASSERT_TRUE(FreeMonStorage::readClusterManagerState(_opCtx.get()).has_value());
insertDoc(_opCtx.get(), localClusterManagerNss, "bar1");
// Verify read of two doc collection fails
- ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).is_initialized());
+ ASSERT_FALSE(FreeMonStorage::readClusterManagerState(_opCtx.get()).has_value());
}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index f8dee52f889..5009f22760f 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -133,17 +133,17 @@ public:
addSample(const BSONObj& sample) {
auto st = _compressor.addSample(sample, Date_t());
- if (!st.getValue().is_initialized()) {
+ if (!st.getValue().has_value()) {
_docs.emplace_back(sample);
- } else if (std::get<1>(st.getValue().get()) ==
+ } else if (std::get<1>(st.getValue().value()) ==
FTDCCompressor::CompressorState::kSchemaChanged) {
- validate(std::get<0>(st.getValue().get()));
+ validate(std::get<0>(st.getValue().value()));
_docs.clear();
_docs.emplace_back(sample);
- } else if (std::get<1>(st.getValue().get()) ==
+ } else if (std::get<1>(st.getValue().value()) ==
FTDCCompressor::CompressorState::kCompressorFull) {
_docs.emplace_back(sample);
- validate(std::get<0>(st.getValue().get()));
+ validate(std::get<0>(st.getValue().value()));
_docs.clear();
} else {
MONGO_UNREACHABLE;
@@ -154,8 +154,8 @@ public:
void validate(boost::optional<ConstDataRange> cdr) {
std::vector<BSONObj> list;
- if (cdr.is_initialized()) {
- auto sw = _decompressor.uncompress(cdr.get());
+ if (cdr.has_value()) {
+ auto sw = _decompressor.uncompress(cdr.value());
ASSERT_TRUE(sw.isOK());
list = sw.getValue();
} else {
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index 6eaa88ee872..b0c0f4c0007 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -156,8 +156,8 @@ Status FTDCFileWriter::writeSample(const BSONObj& sample, Date_t date) {
return ret.getStatus();
}
- if (ret.getValue().is_initialized()) {
- return flush(std::get<0>(ret.getValue().get()), std::get<2>(ret.getValue().get()));
+ if (ret.getValue().has_value()) {
+ return flush(std::get<0>(ret.getValue().value()), std::get<2>(ret.getValue().value()));
}
if (_compressor.getSampleCount() != 0 &&
@@ -177,7 +177,7 @@ Status FTDCFileWriter::writeSample(const BSONObj& sample, Date_t date) {
}
Status FTDCFileWriter::flush(const boost::optional<ConstDataRange>& range, Date_t date) {
- if (!range.is_initialized()) {
+ if (!range.has_value()) {
if (_compressor.hasDataToFlush()) {
auto swBuf = _compressor.getCompressedSamples();
@@ -194,7 +194,7 @@ Status FTDCFileWriter::flush(const boost::optional<ConstDataRange>& range, Date_
}
}
} else {
- BSONObj o = FTDCBSONUtil::createBSONMetricChunkDocument(range.get(), date);
+ BSONObj o = FTDCBSONUtil::createBSONMetricChunkDocument(range.value(), date);
Status s = writeArchiveFileBuffer({o.objdata(), static_cast<size_t>(o.objsize())});
if (!s.isOK()) {
diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp
index b27c7169d1a..59f8c7d1d6f 100644
--- a/src/mongo/db/index/index_build_interceptor.cpp
+++ b/src/mongo/db/index/index_build_interceptor.cpp
@@ -90,7 +90,7 @@ IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx,
entry->descriptor()->unique() == dupKeyTrackerIdentExists);
if (duplicateKeyTrackerIdent) {
_duplicateKeyTracker =
- std::make_unique<DuplicateKeyTracker>(opCtx, entry, duplicateKeyTrackerIdent.get());
+ std::make_unique<DuplicateKeyTracker>(opCtx, entry, duplicateKeyTrackerIdent.value());
}
}
@@ -445,7 +445,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx,
// expectations.
stdx::unique_lock<Latch> lk(_multikeyPathMutex);
if (_multikeyPaths) {
- MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.get(), multikeyPaths);
+ MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.value(), multikeyPaths);
} else {
// `mergeMultikeyPaths` is sensitive to the two inputs having the same multikey
// "shape". Initialize `_multikeyPaths` with the right shape from the first result.
diff --git a/src/mongo/db/index/skipped_record_tracker.cpp b/src/mongo/db/index/skipped_record_tracker.cpp
index 2b82f1603b7..f5cf3e44e79 100644
--- a/src/mongo/db/index/skipped_record_tracker.cpp
+++ b/src/mongo/db/index/skipped_record_tracker.cpp
@@ -61,7 +61,7 @@ SkippedRecordTracker::SkippedRecordTracker(OperationContext* opCtx,
// lazily initialize table when we record the first document.
_skippedRecordsTable =
opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStoreFromExistingIdent(
- opCtx, ident.get());
+ opCtx, ident.value());
}
void SkippedRecordTracker::keepTemporaryTable() {
@@ -202,7 +202,7 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx,
_multikeyPaths = *multikeyPaths;
}
- MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.get(), *multikeyPaths);
+ MultikeyPathTracker::mergeMultikeyPaths(&_multikeyPaths.value(), *multikeyPaths);
}
}
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index 901c37a98f5..e5f83f1918b 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -101,7 +101,7 @@ std::pair<const BSONObj, const BSONObj> buildIndexBuildEntryFilterAndUpdate(
// '$addToSet' to prevent any duplicate entries written to "commitReadyMembers" field.
if (auto commitReadyMembers = indexBuildEntry.getCommitReadyMembers()) {
BSONArrayBuilder arrayBuilder;
- for (const auto& item : commitReadyMembers.get()) {
+ for (const auto& item : commitReadyMembers.value()) {
arrayBuilder.append(item.toString());
}
const auto commitReadyMemberList = BSON(IndexBuildEntry::kCommitReadyMembersFieldName
diff --git a/src/mongo/db/index_build_entry_helpers_test.cpp b/src/mongo/db/index_build_entry_helpers_test.cpp
index 9450bc58037..9c14ee9ebaa 100644
--- a/src/mongo/db/index_build_entry_helpers_test.cpp
+++ b/src/mongo/db/index_build_entry_helpers_test.cpp
@@ -81,8 +81,8 @@ void checkIfEqual(IndexBuildEntry lhs, IndexBuildEntry rhs) {
ASSERT_TRUE(std::equal(lhsIndexNames.begin(), lhsIndexNames.end(), rhsIndexNames.begin()));
if (lhs.getCommitReadyMembers() && rhs.getCommitReadyMembers()) {
- auto lhsMembers = lhs.getCommitReadyMembers().get();
- auto rhsMembers = rhs.getCommitReadyMembers().get();
+ auto lhsMembers = lhs.getCommitReadyMembers().value();
+ auto rhsMembers = rhs.getCommitReadyMembers().value();
ASSERT_TRUE(std::equal(lhsMembers.begin(), lhsMembers.end(), rhsMembers.begin()));
} else {
ASSERT_FALSE(lhs.getCommitReadyMembers());
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 0c3cea6fc00..3a631c57988 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -589,8 +589,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
const auto durableBuildUUID = collection->getIndexBuildUUID(indexNames[i]);
// A build UUID is present if and only if we are rebuilding a two-phase build.
- invariant((protocol == IndexBuildProtocol::kTwoPhase) ==
- durableBuildUUID.is_initialized());
+ invariant((protocol == IndexBuildProtocol::kTwoPhase) == durableBuildUUID.has_value());
// When a buildUUID is present, it must match the build UUID parameter to this
// function.
invariant(!durableBuildUUID || *durableBuildUUID == buildUUID,
@@ -1574,7 +1573,7 @@ void IndexBuildsCoordinator::restartIndexBuildsForRecovery(
"Index build: resuming",
"buildUUID"_attr = buildUUID,
"collectionUUID"_attr = collUUID,
- logAttrs(nss.get()),
+ logAttrs(nss.value()),
"details"_attr = resumeInfo.toBSON());
try {
@@ -1634,7 +1633,7 @@ void IndexBuildsCoordinator::restartIndexBuildsForRecovery(
"Index build: restarting",
"buildUUID"_attr = buildUUID,
"collectionUUID"_attr = build.collUUID,
- logAttrs(nss.get()));
+ logAttrs(nss.value()));
IndexBuildsCoordinator::IndexBuildOptions indexBuildOptions;
// Indicate that the initialization should not generate oplog entries or timestamps for the
// first catalog write, and that the original durable catalog entries should be dropped and
@@ -2045,7 +2044,7 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild
// Persist the commit quorum value in the config.system.indexBuilds collection.
IndexBuildEntry indexBuildEntry(replState->buildUUID,
replState->collectionUUID,
- indexBuildOptions.commitQuorum.get(),
+ indexBuildOptions.commitQuorum.value(),
replState->indexNames);
uassertStatusOK(indexbuildentryhelpers::addIndexBuildEntry(opCtx, indexBuildEntry));
@@ -2328,7 +2327,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner(
PrepareConflictBehavior::kIgnoreConflictsAllowWrites);
if (resumeInfo) {
- _resumeIndexBuildFromPhase(opCtx, replState, indexBuildOptions, resumeInfo.get());
+ _resumeIndexBuildFromPhase(opCtx, replState, indexBuildOptions, resumeInfo.value());
} else {
_buildIndex(opCtx, replState, indexBuildOptions);
}
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 0aa1661bed2..8f54e8fd9be 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -248,7 +248,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
auto status = Status::OK();
if (resumeInfo) {
status = _setUpResumeIndexBuild(
- opCtx, dbName, collectionUUID, specs, buildUUID, resumeInfo.get());
+ opCtx, dbName, collectionUUID, specs, buildUUID, resumeInfo.value());
} else {
status = _setUpIndexBuildForTwoPhaseRecovery(
opCtx, dbName, collectionUUID, specs, buildUUID);
@@ -267,7 +267,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
invariant(statusWithOptionalResult.getValue()->isReady());
// The requested index (specs) are already built or are being built. Return success
// early (this is v4.0 behavior compatible).
- return statusWithOptionalResult.getValue().get();
+ return statusWithOptionalResult.getValue().value();
}
if (opCtx->getClient()->isFromUserConnection()) {
@@ -457,7 +457,7 @@ Status IndexBuildsCoordinatorMongod::voteCommitIndexBuild(OperationContext* opCt
// commit quorum is disabled, do not record their entry into the commit ready nodes.
// If we fail to retrieve the persisted commit quorum, the index build might be in the
// middle of tearing down.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.get());
+ Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
auto commitQuorum =
uassertStatusOK(indexbuildentryhelpers::getCommitQuorum(opCtx, buildUUID));
if (commitQuorum.numNodes == CommitQuorumOptions::kDisabled) {
@@ -499,7 +499,7 @@ void IndexBuildsCoordinatorMongod::_signalIfCommitQuorumIsSatisfied(
// Acquire the commitQuorumLk in shared mode to make sure commit quorum value did not change
// after reading it from config.system.indexBuilds collection.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.get());
+ Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
// Read the index builds entry from config.system.indexBuilds collection.
auto swIndexBuildEntry =
@@ -513,7 +513,7 @@ void IndexBuildsCoordinatorMongod::_signalIfCommitQuorumIsSatisfied(
return;
bool commitQuorumSatisfied = repl::ReplicationCoordinator::get(opCtx)->isCommitQuorumSatisfied(
- indexBuildEntry.getCommitQuorum(), voteMemberList.get());
+ indexBuildEntry.getCommitQuorum(), voteMemberList.value());
if (!commitQuorumSatisfied)
return;
@@ -546,7 +546,7 @@ bool IndexBuildsCoordinatorMongod::_signalIfCommitQuorumNotEnabled(
// Acquire the commitQuorumLk in shared mode to make sure commit quorum value did not change
// after reading it from config.system.indexBuilds collection.
- Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.get());
+ Lock::SharedLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
// Read the commit quorum value from config.system.indexBuilds collection.
auto commitQuorum = uassertStatusOKWithContext(
@@ -874,7 +874,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx,
// About to update the commit quorum value on-disk. So, take the lock in exclusive mode to
// prevent readers from reading the commit quorum value and making decision on commit quorum
// satisfied with the stale read commit quorum value.
- Lock::ExclusiveLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.get());
+ Lock::ExclusiveLock commitQuorumLk(opCtx->lockState(), replState->commitQuorumLock.value());
{
if (auto action = replState->getNextActionNoWait()) {
return Status(ErrorCodes::CommandFailed,
diff --git a/src/mongo/db/initialize_operation_session_info.cpp b/src/mongo/db/initialize_operation_session_info.cpp
index a3b75ec443d..0ef4d2f2cdd 100644
--- a/src/mongo/db/initialize_operation_session_info.cpp
+++ b/src/mongo/db/initialize_operation_session_info.cpp
@@ -93,7 +93,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext*
// If osi lsid includes the uid, makeLogicalSessionId will also verify that the hash
// matches with the current user logged in.
- auto lsid = makeLogicalSessionId(osi.getSessionId().get(), opCtx);
+ auto lsid = makeLogicalSessionId(osi.getSessionId().value(), opCtx);
if (!attachToOpCtx) {
return {};
@@ -109,7 +109,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext*
}
opCtx->setLogicalSessionId(std::move(lsid));
- uassertStatusOK(lsc->vivify(opCtx, opCtx->getLogicalSessionId().get()));
+ uassertStatusOK(lsc->vivify(opCtx, opCtx->getLogicalSessionId().value()));
} else {
uassert(ErrorCodes::InvalidOptions,
"Transaction number requires a session ID to also be specified",
diff --git a/src/mongo/db/kill_sessions.cpp b/src/mongo/db/kill_sessions.cpp
index a34c1b6b4cb..f2c3bc62c51 100644
--- a/src/mongo/db/kill_sessions.cpp
+++ b/src/mongo/db/kill_sessions.cpp
@@ -82,14 +82,14 @@ getKillAllSessionsByPatternImpersonateData(const KillAllSessionsByPattern& patte
uassert(ErrorCodes::BadValue,
"Too many users in impersonation data",
pattern.getUsers()->size() <= 1);
- const auto& impUser = pattern.getUsers().get()[0];
+ const auto& impUser = pattern.getUsers().value()[0];
user = UserName(impUser.getUser(), impUser.getDb());
}
if (pattern.getRoles()) {
roles.reserve(pattern.getRoles()->size());
- for (auto&& role : pattern.getRoles().get()) {
+ for (auto&& role : pattern.getRoles().value()) {
roles.emplace_back(role.getRole(), role.getDb());
}
}
@@ -124,7 +124,7 @@ KillAllSessionsByPatternSet makeSessionFilterForAuthenticatedUsers(OperationCont
if (auto user = as->getAuthenticatedUser()) {
KillAllSessionsByPattern pattern;
- pattern.setUid(user.get()->getDigest());
+ pattern.setUid(user.value()->getDigest());
KillAllSessionsByPatternItem item{std::move(pattern), APIParameters::get(opCtx)};
patterns.emplace(std::move(item));
}
diff --git a/src/mongo/db/logical_session_id_helpers.cpp b/src/mongo/db/logical_session_id_helpers.cpp
index 65641e2b925..20d4659d77f 100644
--- a/src/mongo/db/logical_session_id_helpers.cpp
+++ b/src/mongo/db/logical_session_id_helpers.cpp
@@ -53,7 +53,7 @@ boost::optional<UserHandle> getAuthenticatedUser(Client* client) {
auto optUser = AuthorizationSession::get(client)->getAuthenticatedUser();
uassert(ErrorCodes::Unauthorized, "Logical sessions require authentication", optUser);
- return optUser.get();
+ return optUser.value();
}
} // namespace
@@ -67,9 +67,9 @@ SHA256Block getLogicalSessionUserDigestForLoggedInUser(const OperationContext* o
if (auto user = getAuthenticatedUser(opCtx->getClient())) {
uassert(ErrorCodes::BadValue,
"Username too long to use with logical sessions",
- user.get()->getName().getDisplayNameLength() <
+ user.value()->getName().getDisplayNameLength() <
kMaximumUserNameLengthForLogicalSessions);
- return user.get()->getDigest();
+ return user.value()->getDigest();
} else {
return kNoAuthDigest;
}
@@ -192,8 +192,8 @@ LogicalSessionRecord makeLogicalSessionRecord(OperationContext* opCtx, Date_t la
LogicalSessionRecord lsr{};
if (auto user = getAuthenticatedUser(opCtx->getClient())) {
- id.setUid(user.get()->getDigest());
- lsr.setUser(StringData(user.get()->getName().getDisplayName()));
+ id.setUid(user.value()->getDigest());
+ lsr.setUser(StringData(user.value()->getName().getDisplayName()));
} else {
id.setUid(kNoAuthDigest);
}
@@ -225,8 +225,8 @@ LogicalSessionRecord makeLogicalSessionRecord(OperationContext* opCtx,
auto lsr = makeLogicalSessionRecord(lsid, lastUse);
if (auto user = getAuthenticatedUser(opCtx->getClient())) {
- if (user.get()->getDigest() == lsid.getUid()) {
- lsr.setUser(StringData(user.get()->getName().getDisplayName()));
+ if (user.value()->getDigest() == lsid.getUid()) {
+ lsr.setUser(StringData(user.value()->getName().getDisplayName()));
}
}
diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp
index d09867f6151..24e04282e8b 100644
--- a/src/mongo/db/logical_time_validator.cpp
+++ b/src/mongo/db/logical_time_validator.cpp
@@ -186,7 +186,7 @@ Status LogicalTimeValidator::validate(OperationContext* opCtx, const SignedLogic
auto firstError = Status::OK();
for (const auto& key : keys) {
auto proofStatus =
- _timeProofService.checkProof(newTime.getTime(), newProof.get(), key.getKey());
+ _timeProofService.checkProof(newTime.getTime(), newProof.value(), key.getKey());
if (proofStatus.isOK()) {
return Status::OK();
} else if (firstError.isOK()) {
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index bb13e28a9e0..2674a095c39 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -191,11 +191,11 @@ MatchExpression::ErrorAnnotation::SchemaAnnotations::SchemaAnnotations(
void MatchExpression::ErrorAnnotation::SchemaAnnotations::appendElements(
BSONObjBuilder& builder) const {
if (title) {
- builder << JSONSchemaParser::kSchemaTitleKeyword << title.get();
+ builder << JSONSchemaParser::kSchemaTitleKeyword << title.value();
}
if (description) {
- builder << JSONSchemaParser::kSchemaDescriptionKeyword << description.get();
+ builder << JSONSchemaParser::kSchemaDescriptionKeyword << description.value();
}
}
} // namespace mongo
diff --git a/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp b/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp
index 211c894d4b6..6e7b84ffe0b 100644
--- a/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp
+++ b/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp
@@ -102,8 +102,8 @@ TEST(EncryptSchemaTest, ParseFullEncryptObjectFromBSON) {
MatcherTypeSet resultMatcherSet;
resultMatcherSet.bsonTypes.insert(BSONType::NumberInt);
ASSERT_TRUE(encryptInfo.getBsonType() == BSONTypeSet(resultMatcherSet));
- ASSERT_TRUE(encryptInfo.getAlgorithm().get() == FleAlgorithmEnum::kDeterministic);
- EncryptSchemaKeyId keyid = encryptInfo.getKeyId().get();
+ ASSERT_TRUE(encryptInfo.getAlgorithm().value() == FleAlgorithmEnum::kDeterministic);
+ EncryptSchemaKeyId keyid = encryptInfo.getKeyId().value();
ASSERT_TRUE(keyid.type() == EncryptSchemaKeyId::Type::kJSONPointer);
ASSERT_EQ(keyid.jsonPointer().toString(), "/pointer");
}
diff --git a/src/mongo/db/op_observer/fcv_op_observer.cpp b/src/mongo/db/op_observer/fcv_op_observer.cpp
index 110415de3fd..5563d522960 100644
--- a/src/mongo/db/op_observer/fcv_op_observer.cpp
+++ b/src/mongo/db/op_observer/fcv_op_observer.cpp
@@ -110,7 +110,7 @@ void FcvOpObserver::_setVersion(OperationContext* opCtx,
// (Generic FCV reference): This FCV check should exist across LTS binary versions.
const auto shouldIncrementTopologyVersion = newVersion == multiversion::GenericFCV::kLastLTS ||
(prevVersion &&
- prevVersion.get() == multiversion::GenericFCV::kDowngradingFromLatestToLastContinuous) ||
+ prevVersion.value() == multiversion::GenericFCV::kDowngradingFromLatestToLastContinuous) ||
newVersion == multiversion::GenericFCV::kUpgradingFromLastLTSToLatest ||
newVersion == multiversion::GenericFCV::kUpgradingFromLastContinuousToLatest ||
newVersion == multiversion::GenericFCV::kUpgradingFromLastLTSToLastContinuous;
@@ -179,7 +179,7 @@ void FcvOpObserver::onDelete(OperationContext* opCtx,
auto optDocKey = repl::documentKeyDecoration(opCtx);
invariant(optDocKey, nss.ns());
if (nss.isServerConfigurationCollection()) {
- auto id = optDocKey.get().getId().firstElement();
+ auto id = optDocKey.value().getId().firstElement();
if (id.type() == BSONType::String && id.String() == multiversion::kParameterName) {
uasserted(40670, "removing FeatureCompatibilityVersion document is not allowed");
}
diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp
index b4a48472b8c..44e9e812afd 100644
--- a/src/mongo/db/op_observer/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl.cpp
@@ -295,7 +295,7 @@ OpTimeBundle replLogDelete(OperationContext* opCtx,
}
oplogEntry->setOpType(repl::OpTypeEnum::kDelete);
- oplogEntry->setObject(repl::documentKeyDecoration(opCtx).get().getShardKeyAndId());
+ oplogEntry->setObject(repl::documentKeyDecoration(opCtx).value().getShardKeyAndId());
oplogEntry->setFromMigrateIfTrue(fromMigrate);
// oplogLink could have been changed to include preImageOpTime by the previous no-op write.
oplogWriter->appendOplogEntryChainInfo(opCtx, oplogEntry, &oplogLink, {stmtId});
@@ -312,7 +312,7 @@ void writeToImageCollection(OperationContext* opCtx,
const BSONObj& dataImage) {
repl::ImageEntry imageEntry;
imageEntry.set_id(sessionId);
- imageEntry.setTxnNumber(opCtx->getTxnNumber().get());
+ imageEntry.setTxnNumber(opCtx->getTxnNumber().value());
imageEntry.setTs(timestamp);
imageEntry.setImageKind(imageKind);
imageEntry.setImage(dataImage);
@@ -820,8 +820,8 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (oplogEntry.getNeedsRetryImage()) {
// If the oplog entry has `needsRetryImage`, copy the image into image collection.
const BSONObj& dataImage = [&]() {
- if (oplogEntry.getNeedsRetryImage().get() == repl::RetryImageEnum::kPreImage) {
- return args.updateArgs->preImageDoc.get();
+ if (oplogEntry.getNeedsRetryImage().value() == repl::RetryImageEnum::kPreImage) {
+ return args.updateArgs->preImageDoc.value();
} else {
return args.updateArgs->updatedDoc;
}
@@ -829,7 +829,7 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
writeToImageCollection(opCtx,
*opCtx->getLogicalSessionId(),
opTime.writeOpTime.getTimestamp(),
- oplogEntry.getNeedsRetryImage().get(),
+ oplogEntry.getNeedsRetryImage().value(),
dataImage);
}
@@ -850,10 +850,10 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
args.updateArgs->source != OperationSource::kFromMigrate &&
!args.nss.isTemporaryReshardingCollection()) {
const auto& preImageDoc = args.updateArgs->preImageDoc;
- tassert(5868600, "PreImage must be set", preImageDoc && !preImageDoc.get().isEmpty());
+ tassert(5868600, "PreImage must be set", preImageDoc && !preImageDoc.value().isEmpty());
ChangeStreamPreImageId id(args.uuid, opTime.writeOpTime.getTimestamp(), 0);
- ChangeStreamPreImage preImage(id, opTime.wallClockTime, preImageDoc.get());
+ ChangeStreamPreImage preImage(id, opTime.wallClockTime, preImageDoc.value());
// TODO SERVER-66643 Pass tenant id to the pre-images collection if running in the
// serverless.
@@ -926,7 +926,7 @@ void OpObserverImpl::onDelete(OperationContext* opCtx,
const OplogDeleteEntryArgs& args) {
auto optDocKey = repl::documentKeyDecoration(opCtx);
invariant(optDocKey, nss.ns());
- auto& documentKey = optDocKey.get();
+ auto& documentKey = optDocKey.value();
auto txnParticipant = TransactionParticipant::get(opCtx);
const bool inMultiDocumentTransaction =
@@ -1179,15 +1179,15 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx,
BSONObjBuilder oldIndexOptions;
if (indexInfo->oldExpireAfterSeconds) {
auto oldExpireAfterSeconds =
- durationCount<Seconds>(indexInfo->oldExpireAfterSeconds.get());
+ durationCount<Seconds>(indexInfo->oldExpireAfterSeconds.value());
oldIndexOptions.append("expireAfterSeconds", oldExpireAfterSeconds);
}
if (indexInfo->oldHidden) {
- auto oldHidden = indexInfo->oldHidden.get();
+ auto oldHidden = indexInfo->oldHidden.value();
oldIndexOptions.append("hidden", oldHidden);
}
if (indexInfo->oldPrepareUnique) {
- auto oldPrepareUnique = indexInfo->oldPrepareUnique.get();
+ auto oldPrepareUnique = indexInfo->oldPrepareUnique.value();
oldIndexOptions.append("prepareUnique", oldPrepareUnique);
}
o2Builder.append("indexOptions_old", oldIndexOptions.obj());
diff --git a/src/mongo/db/op_observer/op_observer_impl_test.cpp b/src/mongo/db/op_observer/op_observer_impl_test.cpp
index e9d4b02b946..6d356530cc8 100644
--- a/src/mongo/db/op_observer/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl_test.cpp
@@ -511,7 +511,7 @@ TEST_F(OpObserverTest, CollModWithCollectionOptionsAndTTLInfo) {
<< "warn"
<< "index"
<< BSON("name" << indexInfo.indexName << "expireAfterSeconds"
- << durationCount<Seconds>(indexInfo.expireAfterSeconds.get())));
+ << durationCount<Seconds>(indexInfo.expireAfterSeconds.value())));
ASSERT_BSONOBJ_EQ(oExpected, o);
// Ensure that the old collection metadata was saved.
@@ -523,7 +523,7 @@ TEST_F(OpObserverTest, CollModWithCollectionOptionsAndTTLInfo) {
<< ValidationAction_serializer(*oldCollOpts.validationAction))
<< "indexOptions_old"
<< BSON("expireAfterSeconds" << durationCount<Seconds>(
- indexInfo.oldExpireAfterSeconds.get())));
+ indexInfo.oldExpireAfterSeconds.value())));
ASSERT_BSONOBJ_EQ(o2Expected, o2);
}
@@ -1752,28 +1752,28 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTestIncludesTenantId) {
auto oExpected =
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "tid" << nss1.tenantId().get() << "ns"
+ << "tid" << nss1.tenantId().value() << "ns"
<< nss1.toString() << "ui" << uuid1 << "o"
<< BSON("_id" << 0 << "data"
<< "x")
<< "o2" << BSON("_id" << 0))
<< BSON("op"
<< "i"
- << "tid" << nss1.tenantId().get() << "ns"
+ << "tid" << nss1.tenantId().value() << "ns"
<< nss1.toString() << "ui" << uuid1 << "o"
<< BSON("_id" << 1 << "data"
<< "y")
<< "o2" << BSON("_id" << 1))
<< BSON("op"
<< "i"
- << "tid" << nss2.tenantId().get() << "ns"
+ << "tid" << nss2.tenantId().value() << "ns"
<< nss2.toString() << "ui" << uuid2 << "o"
<< BSON("_id" << 2 << "data"
<< "z")
<< "o2" << BSON("_id" << 2))
<< BSON("op"
<< "i"
- << "tid" << nss2.tenantId().get() << "ns"
+ << "tid" << nss2.tenantId().value() << "ns"
<< nss2.toString() << "ui" << uuid2 << "o"
<< BSON("_id" << 3 << "data"
<< "w")
@@ -1877,14 +1877,14 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTestIncludesTenantId) {
auto oExpected =
BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "u"
- << "tid" << nss1.tenantId().get() << "ns"
+ << "tid" << nss1.tenantId().value() << "ns"
<< nss1.toString() << "ui" << uuid1 << "o"
<< BSON("$set" << BSON("data"
<< "x"))
<< "o2" << BSON("_id" << 0))
<< BSON("op"
<< "u"
- << "tid" << nss2.tenantId().get() << "ns"
+ << "tid" << nss2.tenantId().value() << "ns"
<< nss2.toString() << "ui" << uuid2 << "o"
<< BSON("$set" << BSON("data"
<< "y"))
@@ -1969,11 +1969,11 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTestIncludesTenantId) {
auto oExpected = BSON("applyOps" << BSON_ARRAY(
BSON("op"
<< "d"
- << "tid" << nss1.tenantId().get() << "ns" << nss1.toString()
+ << "tid" << nss1.tenantId().value() << "ns" << nss1.toString()
<< "ui" << uuid1 << "o" << BSON("_id" << 0))
<< BSON("op"
<< "d"
- << "tid" << nss2.tenantId().get() << "ns" << nss2.toString()
+ << "tid" << nss2.tenantId().value() << "ns" << nss2.toString()
<< "ui" << uuid2 << "o" << BSON("_id" << 1))));
ASSERT_BSONOBJ_EQ(oExpected, o);
ASSERT_FALSE(oplogEntry.hasField("prepare"));
@@ -2391,7 +2391,7 @@ protected:
const Timestamp preImageOpTime = updateOplogEntry.getPreImageOpTime()->getTimestamp();
ASSERT_FALSE(preImageOpTime.isNull());
OplogEntry preImage = *findByTimestamp(oplogs, preImageOpTime);
- ASSERT_BSONOBJ_EQ(update.updateArgs->preImageDoc.get(), preImage.getObject());
+ ASSERT_BSONOBJ_EQ(update.updateArgs->preImageDoc.value(), preImage.getObject());
if (updateOplogEntry.getSessionId()) {
ASSERT_EQ(*updateOplogEntry.getSessionId(), *preImage.getSessionId());
}
@@ -2468,7 +2468,7 @@ protected:
repl::ImageEntry imageEntry =
getImageEntryFromSideCollection(opCtx, *updateOplogEntry.getSessionId());
const BSONObj& expectedImage = testCase.imageType == StoreDocOption::PreImage
- ? update.updateArgs->preImageDoc.get()
+ ? update.updateArgs->preImageDoc.value()
: update.updateArgs->updatedDoc;
ASSERT_BSONOBJ_EQ(expectedImage, imageEntry.getImage());
ASSERT(imageEntry.getImageKind() == updateOplogEntry.getNeedsRetryImage());
@@ -2506,7 +2506,7 @@ protected:
ChangeStreamPreImageId preImageId(
_uuid, updateOplogEntry.getOpTime().getTimestamp(), 0);
ChangeStreamPreImage preImage = getChangeStreamPreImage(opCtx, preImageId, &container);
- const BSONObj& expectedImage = update.updateArgs->preImageDoc.get();
+ const BSONObj& expectedImage = update.updateArgs->preImageDoc.value();
ASSERT_BSONOBJ_EQ(expectedImage, preImage.getPreImage());
ASSERT_EQ(updateOplogEntry.getWallClockTime(), preImage.getOperationTime());
}
@@ -2747,8 +2747,8 @@ TEST_F(OpObserverTest, TestFundamentalOnInsertsOutputs) {
}
// Only for retryable writes:
- ASSERT_EQ(opCtx->getLogicalSessionId().get(), entry.getSessionId().get());
- ASSERT_EQ(opCtx->getTxnNumber().get(), entry.getTxnNumber().get());
+ ASSERT_EQ(opCtx->getLogicalSessionId().value(), entry.getSessionId().value());
+ ASSERT_EQ(opCtx->getTxnNumber().value(), entry.getTxnNumber().value());
ASSERT_EQ(1, entry.getStatementIds().size());
ASSERT_EQ(StmtId(opIdx), entry.getStatementIds()[0]);
// When we insert multiple documents in retryable writes, each insert will "link" back
@@ -2760,7 +2760,7 @@ TEST_F(OpObserverTest, TestFundamentalOnInsertsOutputs) {
oplogs[opIdx - 1][repl::OplogEntryBase::kTimestampFieldName].timestamp();
}
ASSERT_EQ(expectedPrevWriteOpTime,
- entry.getPrevWriteOpTimeInTransaction().get().getTimestamp());
+ entry.getPrevWriteOpTimeInTransaction().value().getTimestamp());
}
if (testCase.isRetryableWrite) {
@@ -3275,7 +3275,7 @@ TEST_F(AtomicApplyOpsOutputsTest, InsertInNestedApplyOpsReturnsSuccess) {
<< "ns" << _nss.ns() << "o"
<< BSON("_id"
<< "a")
- << "ui" << options.uuid.get());
+ << "ui" << options.uuid.value());
auto innerApplyOpsObj = BSON("op"
<< "c"
<< "ns" << _nss.getCommandNS().ns() << "o"
diff --git a/src/mongo/db/op_observer/op_observer_util.cpp b/src/mongo/db/op_observer/op_observer_util.cpp
index d833dcee1c0..98b015dc0b3 100644
--- a/src/mongo/db/op_observer/op_observer_util.cpp
+++ b/src/mongo/db/op_observer/op_observer_util.cpp
@@ -59,19 +59,20 @@ BSONObj makeCollModCmdObj(const BSONObj& collModCmd,
BSONObjBuilder indexObjBuilder;
indexObjBuilder.append("name", indexInfo->indexName);
if (indexInfo->expireAfterSeconds)
- indexObjBuilder.append("expireAfterSeconds",
- durationCount<Seconds>(indexInfo->expireAfterSeconds.get()));
+ indexObjBuilder.append(
+ "expireAfterSeconds",
+ durationCount<Seconds>(indexInfo->expireAfterSeconds.value()));
if (indexInfo->hidden)
- indexObjBuilder.append("hidden", indexInfo->hidden.get());
+ indexObjBuilder.append("hidden", indexInfo->hidden.value());
if (indexInfo->unique)
- indexObjBuilder.append("unique", indexInfo->unique.get());
+ indexObjBuilder.append("unique", indexInfo->unique.value());
if (indexInfo->prepareUnique)
- indexObjBuilder.append("prepareUnique", indexInfo->prepareUnique.get());
+ indexObjBuilder.append("prepareUnique", indexInfo->prepareUnique.value());
if (indexInfo->forceNonUnique)
- indexObjBuilder.append("forceNonUnique", indexInfo->forceNonUnique.get());
+ indexObjBuilder.append("forceNonUnique", indexInfo->forceNonUnique.value());
cmdObjBuilder.append(indexFieldName, indexObjBuilder.obj());
} else {
@@ -88,7 +89,7 @@ BSONObj DocumentKey::getId() const {
BSONObj DocumentKey::getShardKeyAndId() const {
if (_shardKey) {
- BSONObjBuilder builder(_shardKey.get());
+ BSONObjBuilder builder(_shardKey.value());
builder.appendElementsUnique(_id);
return builder.obj();
}
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index 4f0189e6572..9c0239a7555 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -58,7 +58,7 @@ void validateFindAndModifyRetryability(const write_ops::FindAndModifyCommandRequ
const repl::OplogEntry& oplogWithCorrectLinks) {
auto opType = oplogEntry.getOpType();
auto ts = oplogEntry.getTimestamp();
- const bool needsRetryImage = oplogEntry.getNeedsRetryImage().is_initialized();
+ const bool needsRetryImage = oplogEntry.getNeedsRetryImage().has_value();
if (opType == repl::OpTypeEnum::kDelete) {
uassert(
@@ -119,8 +119,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
DBDirectClient client(opCtx);
if (oplog.getNeedsRetryImage()) {
// Extract image from side collection.
- LogicalSessionId sessionId = oplog.getSessionId().get();
- TxnNumber txnNumber = oplog.getTxnNumber().get();
+ LogicalSessionId sessionId = oplog.getSessionId().value();
+ TxnNumber txnNumber = oplog.getTxnNumber().value();
Timestamp ts = oplog.getTimestamp();
auto curOp = CurOp::get(opCtx);
const std::string existingNS = curOp->getNS();
diff --git a/src/mongo/db/pipeline/abt/document_source_visitor.cpp b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
index 4a799f75a74..af9b741b345 100644
--- a/src/mongo/db/pipeline/abt/document_source_visitor.cpp
+++ b/src/mongo/db/pipeline/abt/document_source_visitor.cpp
@@ -872,7 +872,7 @@ public:
std::move(entry._node));
if (source->indexPath().has_value()) {
- const FieldPath indexFieldPath = source->indexPath().get();
+ const FieldPath indexFieldPath = source->indexPath().value();
if (indexFieldPath.getPathLength() > 0) {
ABT indexPath = translateFieldPath(
indexFieldPath,
diff --git a/src/mongo/db/pipeline/accumulator_rank.cpp b/src/mongo/db/pipeline/accumulator_rank.cpp
index b0d006c0df7..7df6895a2ed 100644
--- a/src/mongo/db/pipeline/accumulator_rank.cpp
+++ b/src/mongo/db/pipeline/accumulator_rank.cpp
@@ -56,7 +56,7 @@ REGISTER_STABLE_WINDOW_FUNCTION(
void AccumulatorRank::processInternal(const Value& input, bool merging) {
tassert(5417001, "$rank can't be merged", !merging);
if (!_lastInput ||
- getExpressionContext()->getValueComparator().compare(_lastInput.get(), input) != 0) {
+ getExpressionContext()->getValueComparator().compare(_lastInput.value(), input) != 0) {
_lastRank += _numSameRank;
_numSameRank = 1;
_lastInput = input;
@@ -75,7 +75,7 @@ void AccumulatorDocumentNumber::processInternal(const Value& input, bool merging
void AccumulatorDenseRank::processInternal(const Value& input, bool merging) {
tassert(5417003, "$denseRank can't be merged", !merging);
if (!_lastInput ||
- getExpressionContext()->getValueComparator().compare(_lastInput.get(), input) != 0) {
+ getExpressionContext()->getValueComparator().compare(_lastInput.value(), input) != 0) {
++_lastRank;
_lastInput = input;
_memUsageBytes = sizeof(*this) + _lastInput->getApproximateSize() - sizeof(Value);
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index be336da50fd..c0167bef26d 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -94,7 +94,7 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
ASSERT_BSONOBJ_EQ(request.getUnwrappedReadPref().value_or(BSONObj()),
BSON("$readPreference"
<< "nearest"));
- ASSERT_TRUE(request.getExchange().is_initialized());
+ ASSERT_TRUE(request.getExchange().has_value());
ASSERT_TRUE(request.getIsMapReduceCommand());
ASSERT_EQ(*request.getCollectionUUID(), uuid);
}
diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.cpp b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
index bb16662791f..09d92f4aa71 100644
--- a/src/mongo/db/pipeline/document_source_bucket_auto.cpp
+++ b/src/mongo/db/pipeline/document_source_bucket_auto.cpp
@@ -519,7 +519,7 @@ intrusive_ptr<DocumentSource> DocumentSourceBucketAuto::createFromBson(
groupByExpression && numBuckets);
return DocumentSourceBucketAuto::create(
- pExpCtx, groupByExpression, numBuckets.get(), accumulationStatements, granularityRounder);
+ pExpCtx, groupByExpression, numBuckets.value(), accumulationStatements, granularityRounder);
}
} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp
index 0732278536b..0842398e770 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp
@@ -98,7 +98,7 @@ DocumentSource::GetNextResult DocumentSourceChangeStreamCheckInvalidate::doGetNe
// then throws a 'ChangeStreamInvalidated' exception on the next call to this method.
if (_queuedInvalidate) {
- const auto res = DocumentSource::GetNextResult(std::move(_queuedInvalidate.get()));
+ const auto res = DocumentSource::GetNextResult(std::move(_queuedInvalidate.value()));
_queuedInvalidate.reset();
return res;
}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index c992288a0e4..084274467d9 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -240,7 +240,7 @@ Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity>
Explain::explainStages(_exec.get(),
collections,
- verbosity.get(),
+ verbosity.value(),
_execStatus,
_winningPlanTrialStats,
BSONObj(),
@@ -252,7 +252,7 @@ Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity>
invariant(explainStats["queryPlanner"]);
out["queryPlanner"] = Value(explainStats["queryPlanner"]);
- if (verbosity.get() >= ExplainOptions::Verbosity::kExecStats) {
+ if (verbosity.value() >= ExplainOptions::Verbosity::kExecStats) {
invariant(explainStats["executionStats"]);
out["executionStats"] = Value(explainStats["executionStats"]);
}
diff --git a/src/mongo/db/pipeline/document_source_densify.cpp b/src/mongo/db/pipeline/document_source_densify.cpp
index d72e17571fe..2fd25d5c3ca 100644
--- a/src/mongo/db/pipeline/document_source_densify.cpp
+++ b/src/mongo/db/pipeline/document_source_densify.cpp
@@ -60,7 +60,7 @@ RangeStatement RangeStatement::parse(RangeSpec spec) {
"The step parameter in a range statement must be a whole number when "
"densifying a date range",
step.integral64Bit());
- return optional<TimeUnit>(parseTimeUnit(unit.get()));
+ return optional<TimeUnit>(parseTimeUnit(unit.value()));
} else {
return optional<TimeUnit>(boost::none);
}
@@ -292,7 +292,7 @@ Document DocumentSourceInternalDensify::DocGenerator::getNextDocument() {
_state = GeneratorState::kDone;
// If _finalDoc is boost::none we can't be in this state.
tassert(5832800, "DocGenerator expected _finalDoc, found boost::none", _finalDoc);
- return _finalDoc.get();
+ return _finalDoc.value();
}
// Assume all types have been checked at this point and we are in a valid state.
DensifyValue valueToAdd = _min;
diff --git a/src/mongo/db/pipeline/document_source_fill.cpp b/src/mongo/db/pipeline/document_source_fill.cpp
index 66a061e3576..e4ad71920d8 100644
--- a/src/mongo/db/pipeline/document_source_fill.cpp
+++ b/src/mongo/db/pipeline/document_source_fill.cpp
@@ -116,7 +116,7 @@ std::list<boost::intrusive_ptr<DocumentSource>> createFromBson(
uassert(6050204,
"Maximum one of 'partitionBy' and 'partitionByFields can be specified in '$fill'",
!spec.getPartitionByFields());
- auto partitionByField = partitionByUnparsedExpr.get();
+ auto partitionByField = partitionByUnparsedExpr.value();
if (std::string* partitionByString = stdx::get_if<std::string>(&partitionByField)) {
setWindowFieldsSpec.append("partitionBy", *partitionByString);
} else
diff --git a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp
index 9068a61ff05..3c21e19a3f7 100644
--- a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp
@@ -294,14 +294,14 @@ TEST_F(FindAndModifyImageLookupTest, ShouldForgeImageEntryWhenMatchingImageDocIs
ASSERT_BSONOBJ_EQ(prePostImage, forgedImageEntry.getObject());
ASSERT_EQUALS(nss, forgedImageEntry.getNss());
ASSERT_EQUALS(uuid, *forgedImageEntry.getUuid());
- ASSERT_EQUALS(txnNum, forgedImageEntry.getTxnNumber().get());
- ASSERT_EQUALS(sessionId, forgedImageEntry.getSessionId().get());
+ ASSERT_EQUALS(txnNum, forgedImageEntry.getTxnNumber().value());
+ ASSERT_EQUALS(sessionId, forgedImageEntry.getSessionId().value());
ASSERT_EQUALS("n", repl::OpType_serializer(forgedImageEntry.getOpType()));
const auto stmtIds = forgedImageEntry.getStatementIds();
ASSERT_EQUALS(1U, stmtIds.size());
ASSERT_EQUALS(stmtId, stmtIds.front());
ASSERT_EQUALS(ts - 1, forgedImageEntry.getTimestamp());
- ASSERT_EQUALS(1, forgedImageEntry.getTerm().get());
+ ASSERT_EQUALS(1, forgedImageEntry.getTerm().value());
// The next doc should be the doc for the original findAndModify oplog entry with the
// 'needsRetryImage' field removed and 'preImageOpTime'/'postImageOpTime' field appended.
@@ -394,14 +394,14 @@ TEST_F(FindAndModifyImageLookupTest, ShouldForgeImageEntryWhenMatchingImageDocIs
ASSERT_BSONOBJ_EQ(prePostImage, forgedImageEntry.getObject());
ASSERT_EQUALS(nss, forgedImageEntry.getNss());
ASSERT_EQUALS(uuid, *forgedImageEntry.getUuid());
- ASSERT_EQUALS(txnNum, forgedImageEntry.getTxnNumber().get());
- ASSERT_EQUALS(sessionId, forgedImageEntry.getSessionId().get());
+ ASSERT_EQUALS(txnNum, forgedImageEntry.getTxnNumber().value());
+ ASSERT_EQUALS(sessionId, forgedImageEntry.getSessionId().value());
ASSERT_EQUALS("n", repl::OpType_serializer(forgedImageEntry.getOpType()));
const auto stmtIds = forgedImageEntry.getStatementIds();
ASSERT_EQUALS(1U, stmtIds.size());
ASSERT_EQUALS(stmtId, stmtIds.front());
ASSERT_EQUALS(applyOpsTs - 1, forgedImageEntry.getTimestamp());
- ASSERT_EQUALS(1, forgedImageEntry.getTerm().get());
+ ASSERT_EQUALS(1, forgedImageEntry.getTerm().value());
// The next doc should be the doc for original applyOps oplog entry but the
// findAndModify/update operation entry should have 'needsRetryImage' field removed and
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index 4d78c926b5f..e42c6cbe027 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -505,7 +505,7 @@ void DocumentSourceGraphLookUp::performSearch() {
DocumentSource::GetModPathsReturn DocumentSourceGraphLookUp::getModifiedPaths() const {
OrderedPathSet modifiedPaths{_as.fullPath()};
if (_unwind) {
- auto pathsModifiedByUnwind = _unwind.get()->getModifiedPaths();
+ auto pathsModifiedByUnwind = _unwind.value()->getModifiedPaths();
invariant(pathsModifiedByUnwind.type == GetModPathsReturn::Type::kFiniteSet);
modifiedPaths.insert(pathsModifiedByUnwind.paths.begin(),
pathsModifiedByUnwind.paths.end());
@@ -688,7 +688,7 @@ DocumentSourceGraphLookUp::DocumentSourceGraphLookUp(
_variables(original._variables),
_variablesParseState(original._variablesParseState.copyWith(_variables.useIdGenerator())) {
if (original._unwind) {
- _unwind = static_cast<DocumentSourceUnwind*>(original._unwind.get()->clone().get());
+ _unwind = static_cast<DocumentSourceUnwind*>(original._unwind.value()->clone().get());
}
}
diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp
index 3c7a73f8fc8..cfa58471fda 100644
--- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp
+++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp
@@ -133,7 +133,7 @@ bool checkMetadataSortReorder(
return false;
}
if (sortKey.fieldPath->getFieldName(0) != metaFieldStr) {
- if (lastpointTimeField && sortKey.fieldPath->fullPath() == lastpointTimeField.get()) {
+ if (lastpointTimeField && sortKey.fieldPath->fullPath() == lastpointTimeField.value()) {
// If we are checking the sort pattern for the lastpoint case, 'time' is allowed.
timeFound = true;
continue;
@@ -169,7 +169,7 @@ boost::intrusive_ptr<DocumentSourceSort> createMetadataSortForReorder(
std::vector<SortPattern::SortPatternPart> updatedPattern;
if (groupIdField) {
- auto groupId = FieldPath(groupIdField.get());
+ auto groupId = FieldPath(groupIdField.value());
SortPattern::SortPatternPart patternPart;
patternPart.isAscending = !flipSort;
patternPart.fieldPath = groupId;
@@ -180,16 +180,16 @@ boost::intrusive_ptr<DocumentSourceSort> createMetadataSortForReorder(
for (const auto& entry : sortPattern) {
updatedPattern.push_back(entry);
- if (lastpointTimeField && entry.fieldPath->fullPath() == lastpointTimeField.get()) {
+ if (lastpointTimeField && entry.fieldPath->fullPath() == lastpointTimeField.value()) {
updatedPattern.back().fieldPath =
FieldPath((entry.isAscending ? timeseries::kControlMinFieldNamePrefix
: timeseries::kControlMaxFieldNamePrefix) +
- lastpointTimeField.get());
+ lastpointTimeField.value());
updatedPattern.push_back(SortPattern::SortPatternPart{
entry.isAscending,
FieldPath((entry.isAscending ? timeseries::kControlMaxFieldNamePrefix
: timeseries::kControlMinFieldNamePrefix) +
- lastpointTimeField.get()),
+ lastpointTimeField.value()),
nullptr});
} else {
auto updated = FieldPath(timeseries::kBucketMetaFieldName);
@@ -523,7 +523,7 @@ bool DocumentSourceInternalUnpackBucket::pushDownComputedMetaProjection(
(nextTransform->getType() == TransformerInterface::TransformerType::kInclusionProjection ||
nextTransform->getType() == TransformerInterface::TransformerType::kComputedProjection)) {
- auto& metaName = _bucketUnpacker.bucketSpec().metaField().get();
+ auto& metaName = _bucketUnpacker.bucketSpec().metaField().value();
auto [addFieldsSpec, deleteStage] =
nextTransform->extractComputedProjections(metaName,
timeseries::kBucketMetaFieldName.toString(),
@@ -625,7 +625,7 @@ std::pair<BSONObj, bool> DocumentSourceInternalUnpackBucket::extractProjectForPu
_bucketUnpacker.bucketSpec().metaField() && nextProject &&
nextProject->getType() == TransformerInterface::TransformerType::kExclusionProjection) {
return nextProject->extractProjectOnFieldAndRename(
- _bucketUnpacker.bucketSpec().metaField().get(), timeseries::kBucketMetaFieldName);
+ _bucketUnpacker.bucketSpec().metaField().value(), timeseries::kBucketMetaFieldName);
}
return {BSONObj{}, false};
@@ -652,7 +652,7 @@ DocumentSourceInternalUnpackBucket::rewriteGroupByMinMax(Pipeline::SourceContain
const auto& idPath = exprIdPath->getFieldPath();
if (idPath.getPathLength() < 2 ||
- idPath.getFieldName(1) != _bucketUnpacker.bucketSpec().metaField().get()) {
+ idPath.getFieldName(1) != _bucketUnpacker.bucketSpec().metaField().value()) {
return {};
}
@@ -738,7 +738,7 @@ DocumentSourceInternalUnpackBucket::rewriteGroupByMinMax(Pipeline::SourceContain
bool DocumentSourceInternalUnpackBucket::haveComputedMetaField() const {
return _bucketUnpacker.bucketSpec().metaField() &&
_bucketUnpacker.bucketSpec().fieldIsComputed(
- _bucketUnpacker.bucketSpec().metaField().get());
+ _bucketUnpacker.bucketSpec().metaField().value());
}
template <TopBottomSense sense, bool single>
@@ -894,7 +894,7 @@ bool DocumentSourceInternalUnpackBucket::optimizeLastpoint(Pipeline::SourceConta
return false;
}
- auto metaField = maybeMetaField.get();
+ auto metaField = maybeMetaField.value();
if (!checkMetadataSortReorder(sortStage->getSortKeyPattern(), metaField, timeField)) {
return false;
}
@@ -1008,7 +1008,7 @@ Pipeline::SourceContainer::iterator DocumentSourceInternalUnpackBucket::doOptimi
if (auto sortPtr = dynamic_cast<DocumentSourceSort*>(std::next(itr)->get())) {
if (auto metaField = _bucketUnpacker.bucketSpec().metaField();
metaField && !haveComputedMetaField) {
- if (checkMetadataSortReorder(sortPtr->getSortKeyPattern(), metaField.get())) {
+ if (checkMetadataSortReorder(sortPtr->getSortKeyPattern(), metaField.value())) {
// We have a sort on metadata field following this stage. Reorder the two stages
// and return a pointer to the preceding stage.
auto sortForReorder = createMetadataSortForReorder(*sortPtr);
diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.cpp b/src/mongo/db/pipeline/document_source_list_local_sessions.cpp
index 91d57031a02..95bc338659e 100644
--- a/src/mongo/db/pipeline/document_source_list_local_sessions.cpp
+++ b/src/mongo/db/pipeline/document_source_list_local_sessions.cpp
@@ -78,7 +78,7 @@ DocumentSourceListLocalSessions::DocumentSourceListLocalSessions(
invariant(!_spec.getUsers() || _spec.getUsers()->empty());
_ids = _cache->listIds();
} else {
- _ids = _cache->listIds(listSessionsUsersToDigests(_spec.getUsers().get()));
+ _ids = _cache->listIds(listSessionsUsersToDigests(_spec.getUsers().value()));
}
}
@@ -122,7 +122,7 @@ mongo::PrivilegeVector mongo::listSessionsRequiredPrivileges(const ListSessionsS
const auto& myName =
getUserNameForLoggedInUser(Client::getCurrent()->getOperationContext());
- const auto& users = spec.getUsers().get();
+ const auto& users = spec.getUsers().value();
return !std::all_of(
users.cbegin(), users.cend(), [myName](const auto& name) { return myName == name; });
})();
diff --git a/src/mongo/db/pipeline/document_source_list_sessions.cpp b/src/mongo/db/pipeline/document_source_list_sessions.cpp
index 3c19beb4d5f..1ee6a2eccaa 100644
--- a/src/mongo/db/pipeline/document_source_list_sessions.cpp
+++ b/src/mongo/db/pipeline/document_source_list_sessions.cpp
@@ -66,7 +66,7 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceListSessions::createFromBson(
invariant(spec.getUsers() && !spec.getUsers()->empty());
BSONArrayBuilder builder;
- for (const auto& uid : listSessionsUsersToDigests(spec.getUsers().get())) {
+ for (const auto& uid : listSessionsUsersToDigests(spec.getUsers().value())) {
ConstDataRange cdr = uid.toCDR();
builder.append(BSONBinData(cdr.data(), cdr.length(), BinDataGeneral));
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index 6d256cef471..e712caa02a7 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -136,7 +136,7 @@ DocumentSourceLookUp::DocumentSourceLookUp(
_fromExpCtx = expCtx->copyForSubPipeline(resolvedNamespace.ns, resolvedNamespace.uuid);
_fromExpCtx->inLookup = true;
if (fromCollator) {
- _fromExpCtx->setCollator(std::move(fromCollator.get()));
+ _fromExpCtx->setCollator(std::move(fromCollator.value()));
_hasExplicitCollation = true;
}
}
@@ -1034,7 +1034,7 @@ void DocumentSourceLookUp::serializeToArray(
<< (indexPath ? Value(indexPath->fullPath()) : Value())));
}
- if (explain.get() >= ExplainOptions::Verbosity::kExecStats) {
+ if (explain.value() >= ExplainOptions::Verbosity::kExecStats) {
appendSpecificExecStats(output);
}
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index ce416165e66..ea21c75682b 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -436,7 +436,7 @@ intrusive_ptr<DocumentSourceSort> DocumentSourceSort::createBoundedSort(
}
if (limit) {
- opts.Limit(limit.get());
+ opts.Limit(limit.value());
}
if (boundBase == kMin) {
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 2161b0b9bbb..6f4599ffa59 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -253,7 +253,7 @@ bool DocumentSourceUnwind::canPushLimitBack(const DocumentSourceLimit* limit) co
// If _smallestLimitPushedDown is boost::none, then we have not yet pushed a limit down. So no
// matter what the limit is, we should duplicate and push down. Otherwise we should only push
// the limit down if it is smaller than the smallest limit we have pushed down so far.
- return !_smallestLimitPushedDown || limit->getLimit() < _smallestLimitPushedDown.get();
+ return !_smallestLimitPushedDown || limit->getLimit() < _smallestLimitPushedDown.value();
}
Pipeline::SourceContainer::iterator DocumentSourceUnwind::doOptimizeAt(
@@ -273,7 +273,7 @@ Pipeline::SourceContainer::iterator DocumentSourceUnwind::doOptimizeAt(
if (nextSort->hasLimit()) {
container->insert(
std::next(next),
- DocumentSourceLimit::create(nextSort->getContext(), nextSort->getLimit().get()));
+ DocumentSourceLimit::create(nextSort->getContext(), nextSort->getLimit().value()));
}
std::swap(*itr, *next);
return itr == container->begin() ? itr : std::prev(itr);
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 5007dacda7f..a373c23ccbe 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -1723,11 +1723,11 @@ Value ExpressionDateFromString::evaluate(const Document& root, Variables* variab
}
return Value(getExpressionContext()->timeZoneDatabase->fromString(
- dateTimeString, timeZone.get(), formatValue.getStringData()));
+ dateTimeString, timeZone.value(), formatValue.getStringData()));
}
return Value(
- getExpressionContext()->timeZoneDatabase->fromString(dateTimeString, timeZone.get()));
+ getExpressionContext()->timeZoneDatabase->fromString(dateTimeString, timeZone.value()));
} catch (const ExceptionFor<ErrorCodes::ConversionFailure>&) {
if (_onError) {
return _onError->evaluate(root, variables);
@@ -7544,7 +7544,7 @@ Value ExpressionDateArithmetics::evaluate(const Document& root, Variables* varia
amount.integral64Bit());
return evaluateDateArithmetics(
- startDate.coerceToDate(), unit, amount.coerceToLong(), timezone.get());
+ startDate.coerceToDate(), unit, amount.coerceToLong(), timezone.value());
}
/* ----------------------- ExpressionDateAdd ---------------------------- */
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index 7258456a1ac..c79434f4bcb 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -241,37 +241,38 @@ void ExpressionContext::startExpressionCounters() {
void ExpressionContext::incrementMatchExprCounter(StringData name) {
if (enabledCounters && _expressionCounters) {
- ++_expressionCounters.get().matchExprCountersMap[name];
+ ++_expressionCounters.value().matchExprCountersMap[name];
}
}
void ExpressionContext::incrementAggExprCounter(StringData name) {
if (enabledCounters && _expressionCounters) {
- ++_expressionCounters.get().aggExprCountersMap[name];
+ ++_expressionCounters.value().aggExprCountersMap[name];
}
}
void ExpressionContext::incrementGroupAccumulatorExprCounter(StringData name) {
if (enabledCounters && _expressionCounters) {
- ++_expressionCounters.get().groupAccumulatorExprCountersMap[name];
+ ++_expressionCounters.value().groupAccumulatorExprCountersMap[name];
}
}
void ExpressionContext::incrementWindowAccumulatorExprCounter(StringData name) {
if (enabledCounters && _expressionCounters) {
- ++_expressionCounters.get().windowAccumulatorExprCountersMap[name];
+ ++_expressionCounters.value().windowAccumulatorExprCountersMap[name];
}
}
void ExpressionContext::stopExpressionCounters() {
if (enabledCounters && _expressionCounters) {
operatorCountersMatchExpressions.mergeCounters(
- _expressionCounters.get().matchExprCountersMap);
- operatorCountersAggExpressions.mergeCounters(_expressionCounters.get().aggExprCountersMap);
+ _expressionCounters.value().matchExprCountersMap);
+ operatorCountersAggExpressions.mergeCounters(
+ _expressionCounters.value().aggExprCountersMap);
operatorCountersGroupAccumulatorExpressions.mergeCounters(
- _expressionCounters.get().groupAccumulatorExprCountersMap);
+ _expressionCounters.value().groupAccumulatorExprCountersMap);
operatorCountersWindowAccumulatorExpressions.mergeCounters(
- _expressionCounters.get().windowAccumulatorExprCountersMap);
+ _expressionCounters.value().windowAccumulatorExprCountersMap);
}
_expressionCounters = boost::none;
}
diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.cpp b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
index f770f986e33..7c96d56865c 100644
--- a/src/mongo/db/pipeline/lite_parsed_document_source.cpp
+++ b/src/mongo/db/pipeline/lite_parsed_document_source.cpp
@@ -101,7 +101,7 @@ LiteParsedDocumentSourceNestedPipelines::LiteParsedDocumentSourceNestedPipelines
: LiteParsedDocumentSourceNestedPipelines(
std::move(parseTimeName), std::move(foreignNss), std::vector<LiteParsedPipeline>{}) {
if (pipeline)
- _pipelines.emplace_back(std::move(pipeline.get()));
+ _pipelines.emplace_back(std::move(pipeline.value()));
}
stdx::unordered_set<NamespaceString>
diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
index b34a1cfc2c3..07a0016f119 100644
--- a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp
@@ -119,7 +119,7 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
}()
- .first.get() == Stage(TestThing{23}, {}, {}));
+ .first.value() == Stage(TestThing{23}, {}, {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -128,7 +128,7 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
}()
- .first.get() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
+ .first.value() == Stage(TestThing{24}, makeUniqueStage(TestThing{23}, {}, {}), {}));
ASSERT([&]() {
auto pipePtr = jsonToPipeline(
@@ -141,7 +141,7 @@ TEST_F(PipelineMetadataTreeTest, LinearPipelinesConstructProperTrees) {
return makeTree<TestThing>(
{{NamespaceString("test.collection"), initial}}, *pipePtr, ignoreDocumentSourceAddOne);
}()
- .first.get() ==
+ .first.value() ==
Stage(TestThing{28},
makeUniqueStage(
TestThing{27},
@@ -240,7 +240,7 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
*pipePtr,
buildRepresentativeString);
}()
- .first.get() ==
+ .first.value() ==
Stage(TestThing{"1mpxul[2m]ulu"},
makeUniqueStage(
TestThing{"1mpxul[2m]ul"},
@@ -277,7 +277,7 @@ TEST_F(PipelineMetadataTreeTest, BranchingPipelinesConstructProperTrees) {
return makeTree<TestThing>(
{{NamespaceString("test.collection"), {""}}}, *pipePtr, buildRepresentativeString);
}()
- .first.get() ==
+ .first.value() ==
Stage(TestThing{"f[tugs, tmgs, tb]"},
makeUniqueStage(
TestThing{""},
@@ -356,7 +356,7 @@ TEST_F(PipelineMetadataTreeTest, ZipWalksAPipelineAndTreeInTandemAndInOrder) {
*pipePtr,
takeTypeInfo)
.first;
- zip<TestThing>(&tree.get(), &*pipePtr, tookTypeInfoOrThrow);
+ zip<TestThing>(&tree.value(), &*pipePtr, tookTypeInfoOrThrow);
previousStack.pop();
}());
@@ -374,7 +374,7 @@ TEST_F(PipelineMetadataTreeTest, ZipWalksAPipelineAndTreeInTandemAndInOrder) {
*pipePtr,
takeTypeInfo)
.first;
- zip<TestThing>(&tree.get(), &*pipePtr, tookTypeInfoOrThrow);
+ zip<TestThing>(&tree.value(), &*pipePtr, tookTypeInfoOrThrow);
previousStack.pop();
}());
}
diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp
index 446973aa1ec..3f5e62651dd 100644
--- a/src/mongo/db/pipeline/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_test.cpp
@@ -4190,7 +4190,7 @@ TEST_F(PipelineDeferredMergeSortTest, StageWithDeferredSortDoesNotSplit) {
// Verify the sort is correct.
ASSERT(splitPipeline.shardCursorsSortSpec);
- ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.get(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.value(), BSON("a" << 1));
}
TEST_F(PipelineDeferredMergeSortTest, EarliestSortIsSelectedIfDeferred) {
@@ -4214,7 +4214,7 @@ TEST_F(PipelineDeferredMergeSortTest, EarliestSortIsSelectedIfDeferred) {
// Verify the sort is correct.
ASSERT(splitPipeline.shardCursorsSortSpec);
- ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.get(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.value(), BSON("a" << 1));
}
TEST_F(PipelineDeferredMergeSortTest, StageThatCantSwapGoesToMergingHalf) {
@@ -4237,7 +4237,7 @@ TEST_F(PipelineDeferredMergeSortTest, StageThatCantSwapGoesToMergingHalf) {
// Verify the sort is correct.
ASSERT(splitPipeline.shardCursorsSortSpec);
- ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.get(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(splitPipeline.shardCursorsSortSpec.value(), BSON("a" << 1));
}
} // namespace DeferredSort
} // namespace Sharded
diff --git a/src/mongo/db/pipeline/resharding_initial_split_policy_test.cpp b/src/mongo/db/pipeline/resharding_initial_split_policy_test.cpp
index 2df79a991d4..1296191ad5d 100644
--- a/src/mongo/db/pipeline/resharding_initial_split_policy_test.cpp
+++ b/src/mongo/db/pipeline/resharding_initial_split_policy_test.cpp
@@ -57,11 +57,11 @@ TEST_F(ReshardingSplitPolicyTest, ShardKeyWithNonDottedFieldAndIdIsNotProjectedS
// We sample all of the documents since numSplitPoints(1) * samplingRatio (2) = 2 and the
// document source has 2 chunks. So we can assert on the returned values.
auto next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("a").getInt(), 5);
- ASSERT(next.get().getField("_id").missing());
+ ASSERT_EQUALS(next.value().getField("a").getInt(), 5);
+ ASSERT(next.value().getField("_id").missing());
next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("a").getInt(), 15);
- ASSERT(next.get().getField("_id").missing());
+ ASSERT_EQUALS(next.value().getField("a").getInt(), 15);
+ ASSERT(next.value().getField("_id").missing());
ASSERT(!pipeline->getNext());
}
@@ -79,11 +79,11 @@ TEST_F(ReshardingSplitPolicyTest, ShardKeyWithIdFieldIsProjectedSucceeds) {
// We sample all of the documents since numSplitPoints(1) * samplingRatio (2) = 2 and the
// document source has 2 chunks. So we can assert on the returned values.
auto next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("_id").getInt(), 3);
- ASSERT(next.get().getField("a").missing());
+ ASSERT_EQUALS(next.value().getField("_id").getInt(), 3);
+ ASSERT(next.value().getField("a").missing());
next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("_id").getInt(), 10);
- ASSERT(next.get().getField("a").missing());
+ ASSERT_EQUALS(next.value().getField("_id").getInt(), 10);
+ ASSERT(next.value().getField("a").missing());
ASSERT(!pipeline->getNext());
}
@@ -102,13 +102,13 @@ TEST_F(ReshardingSplitPolicyTest, CompoundShardKeyWithNonDottedHashedFieldSuccee
// We sample all of the documents since numSplitPoints(1) * samplingRatio (2) = 2 and the
// document source has 2 chunks. So we can assert on the returned values.
auto next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("a").getInt(), 5);
- ASSERT_EQUALS(next.get().getField("b").getLong(), -6548868637522515075LL);
- ASSERT(next.get().getField("x").missing());
+ ASSERT_EQUALS(next.value().getField("a").getInt(), 5);
+ ASSERT_EQUALS(next.value().getField("b").getLong(), -6548868637522515075LL);
+ ASSERT(next.value().getField("x").missing());
next = pipeline->getNext();
- ASSERT_EQUALS(next.get().getField("a").getInt(), 15);
- ASSERT_EQUALS(next.get().getField("b").getLong(), 2598032665634823220LL);
- ASSERT(next.get().getField("x").missing());
+ ASSERT_EQUALS(next.value().getField("a").getInt(), 15);
+ ASSERT_EQUALS(next.value().getField("b").getLong(), 2598032665634823220LL);
+ ASSERT(next.value().getField("x").missing());
ASSERT(!pipeline->getNext());
}
@@ -126,9 +126,9 @@ TEST_F(ReshardingSplitPolicyTest, CompoundShardKeyWithDottedFieldSucceeds) {
// We sample all of the documents since numSplitPoints(1) * samplingRatio (2) = 2 and the
// document source has 2 chunks. So we can assert on the returned values.
auto next = pipeline->getNext();
- ASSERT_BSONOBJ_EQ(next.get().toBson(), BSON("a" << BSON("b" << 10) << "c" << 5));
+ ASSERT_BSONOBJ_EQ(next.value().toBson(), BSON("a" << BSON("b" << 10) << "c" << 5));
next = pipeline->getNext();
- ASSERT_BSONOBJ_EQ(next.get().toBson(), BSON("a" << BSON("b" << 20) << "c" << 1));
+ ASSERT_BSONOBJ_EQ(next.value().toBson(), BSON("a" << BSON("b" << 20) << "c" << 1));
ASSERT(!pipeline->getNext());
}
@@ -147,10 +147,10 @@ TEST_F(ReshardingSplitPolicyTest, CompoundShardKeyWithDottedHashedFieldSucceeds)
// We sample all of the documents since numSplitPoints(1) * samplingRatio (2) = 2 and the
// document source has 2 chunks. So we can assert on the returned values.
auto next = pipeline->getNext();
- ASSERT_BSONOBJ_EQ(next.get().toBson(),
+ ASSERT_BSONOBJ_EQ(next.value().toBson(),
BSON("a" << BSON("b" << 10 << "c" << -6548868637522515075LL) << "c" << 5));
next = pipeline->getNext();
- ASSERT_BSONOBJ_EQ(next.get().toBson(),
+ ASSERT_BSONOBJ_EQ(next.value().toBson(),
BSON("a" << BSON("b" << 20 << "c" << 2598032665634823220LL) << "c" << 1));
ASSERT(!pipeline->getNext());
}
diff --git a/src/mongo/db/pipeline/sequential_document_cache_test.cpp b/src/mongo/db/pipeline/sequential_document_cache_test.cpp
index 39e8e24abc0..2fa47818b0f 100644
--- a/src/mongo/db/pipeline/sequential_document_cache_test.cpp
+++ b/src/mongo/db/pipeline/sequential_document_cache_test.cpp
@@ -82,7 +82,7 @@ TEST(SequentialDocumentCacheTest, CanIterateCacheAfterFreezing) {
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 0));
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 1));
- ASSERT_FALSE(cache.getNext().is_initialized());
+ ASSERT_FALSE(cache.getNext().has_value());
}
TEST(SequentialDocumentCacheTest, CanRestartCacheIterationAfterFreezing) {
@@ -98,13 +98,13 @@ TEST(SequentialDocumentCacheTest, CanRestartCacheIterationAfterFreezing) {
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 0));
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 1));
- ASSERT_FALSE(cache.getNext().is_initialized());
+ ASSERT_FALSE(cache.getNext().has_value());
cache.restartIteration();
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 0));
ASSERT_DOCUMENT_EQ(*cache.getNext(), DOC("_id" << 1));
- ASSERT_FALSE(cache.getNext().is_initialized());
+ ASSERT_FALSE(cache.getNext().has_value());
}
DEATH_TEST(SequentialDocumentCacheTest, CannotAddDocumentsToCacheAfterFreezing, "invariant") {
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index f9e966c4733..0ec9f8571c1 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -1287,7 +1287,7 @@ partitionCursors(std::vector<OwnedRemoteCursor> ownedCursors) {
untypedCursors.push_back(std::move(ownedCursor));
} else {
auto cursorType = CursorType_parse(IDLParserContext("ShardedAggHelperCursorType"),
- maybeCursorType.get());
+ maybeCursorType.value());
if (cursorType == CursorTypeEnum::DocumentResult) {
resultsCursors.push_back(std::move(ownedCursor));
} else if (cursorType == CursorTypeEnum::SearchMetaResult) {
diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp
index 625b15aac09..50c45f015bf 100644
--- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp
+++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp
@@ -73,7 +73,7 @@ void WindowFunctionExecRemovableDocument::initialize() {
int lowerBoundForInit = _lowerBound > 0 ? _lowerBound : 0;
// Run the loop until we hit the out of partition break (right unbounded) or we hit the upper
// bound.
- for (int i = lowerBoundForInit; !_upperBound || i <= _upperBound.get(); ++i) {
+ for (int i = lowerBoundForInit; !_upperBound || i <= _upperBound.value(); ++i) {
// If this is false, we're over the end of the partition.
if (auto doc = (this->_iter)[i]) {
addValue(_input->evaluate(*doc, &_input->getExpressionContext()->variables));
@@ -93,7 +93,7 @@ void WindowFunctionExecRemovableDocument::update() {
// If there is no upper bound, the whole partition is loaded by initialize.
if (_upperBound) {
// If this is false, we're over the end of the partition.
- if (auto doc = (this->_iter)[_upperBound.get()]) {
+ if (auto doc = (this->_iter)[_upperBound.value()]) {
addValue(_input->evaluate(*doc, &_input->getExpressionContext()->variables));
}
}
diff --git a/src/mongo/db/process_health/fault_manager.cpp b/src/mongo/db/process_health/fault_manager.cpp
index 1b8a31a0bda..b314635917c 100644
--- a/src/mongo/db/process_health/fault_manager.cpp
+++ b/src/mongo/db/process_health/fault_manager.cpp
@@ -450,7 +450,7 @@ FaultManager::~FaultManager() {
for (auto& pair : _healthCheckContexts) {
auto cbHandle = pair.second.callbackHandle;
if (cbHandle) {
- _taskExecutor->cancel(cbHandle.get());
+ _taskExecutor->cancel(cbHandle.value());
}
}
}
diff --git a/src/mongo/db/query/count_command_as_aggregation_command.cpp b/src/mongo/db/query/count_command_as_aggregation_command.cpp
index b719ab77c86..aa3d761626b 100644
--- a/src/mongo/db/query/count_command_as_aggregation_command.cpp
+++ b/src/mongo/db/query/count_command_as_aggregation_command.cpp
@@ -65,13 +65,13 @@ StatusWith<BSONObj> countCommandAsAggregationCommand(const CountCommandRequest&
if (auto skip = cmd.getSkip()) {
BSONObjBuilder skipBuilder(pipelineBuilder.subobjStart());
- skipBuilder.append("$skip", skip.get());
+ skipBuilder.append("$skip", skip.value());
skipBuilder.doneFast();
}
if (auto limit = cmd.getLimit()) {
BSONObjBuilder limitBuilder(pipelineBuilder.subobjStart());
- limitBuilder.append("$limit", limit.get());
+ limitBuilder.append("$limit", limit.value());
limitBuilder.doneFast();
}
@@ -82,27 +82,27 @@ StatusWith<BSONObj> countCommandAsAggregationCommand(const CountCommandRequest&
// Complete the command by appending the other options to the aggregate command.
if (auto collation = cmd.getCollation()) {
- aggregationBuilder.append(kCollationField, collation.get());
+ aggregationBuilder.append(kCollationField, collation.value());
}
aggregationBuilder.append(kHintField, cmd.getHint());
if (auto maxTime = cmd.getMaxTimeMS()) {
- if (maxTime.get() > 0) {
- aggregationBuilder.append(kMaxTimeMSField, maxTime.get());
+ if (maxTime.value() > 0) {
+ aggregationBuilder.append(kMaxTimeMSField, maxTime.value());
}
}
if (auto readConcern = cmd.getReadConcern()) {
if (!readConcern->isEmpty()) {
- aggregationBuilder.append(kReadConcernField, readConcern.get());
+ aggregationBuilder.append(kReadConcernField, readConcern.value());
}
}
if (auto unwrapped = cmd.getQueryOptions()) {
if (!unwrapped->isEmpty()) {
aggregationBuilder.append(query_request_helper::kUnwrappedReadPrefField,
- unwrapped.get());
+ unwrapped.value());
}
}
diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp
index f5409b0bc80..2414e1330ce 100644
--- a/src/mongo/db/query/count_command_test.cpp
+++ b/src/mongo/db/query/count_command_test.cpp
@@ -84,13 +84,13 @@ TEST(CountCommandTest, ParserParsesCommandWithAllFieldsCorrectly) {
const auto countCmd = CountCommandRequest::parse(ctxt, commandObj);
ASSERT_BSONOBJ_EQ(countCmd.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
- ASSERT_EQ(countCmd.getLimit().get(), 100);
- ASSERT_EQ(countCmd.getSkip().get(), 1000);
- ASSERT_EQ(countCmd.getMaxTimeMS().get(), 10000u);
+ ASSERT_EQ(countCmd.getLimit().value(), 100);
+ ASSERT_EQ(countCmd.getSkip().value(), 1000);
+ ASSERT_EQ(countCmd.getMaxTimeMS().value(), 10000u);
ASSERT_BSONOBJ_EQ(countCmd.getHint(), fromjson("{ b : 5 }"));
- ASSERT_BSONOBJ_EQ(countCmd.getCollation().get(), fromjson("{ locale : 'en_US' }"));
- ASSERT_BSONOBJ_EQ(countCmd.getReadConcern().get(), fromjson("{ level: 'linearizable' }"));
- ASSERT_BSONOBJ_EQ(countCmd.getQueryOptions().get(),
+ ASSERT_BSONOBJ_EQ(countCmd.getCollation().value(), fromjson("{ locale : 'en_US' }"));
+ ASSERT_BSONOBJ_EQ(countCmd.getReadConcern().value(), fromjson("{ level: 'linearizable' }"));
+ ASSERT_BSONOBJ_EQ(countCmd.getQueryOptions().value(),
fromjson("{ $readPreference: 'secondary' }"));
}
@@ -102,7 +102,7 @@ TEST(CountCommandTest, ParsingNegativeLimitGivesPositiveLimit) {
<< "limit" << -100);
const auto countCmd = CountCommandRequest::parse(ctxt, commandObj);
- ASSERT_EQ(countCmd.getLimit().get(), 100);
+ ASSERT_EQ(countCmd.getLimit().value(), 100);
}
TEST(CountCommandTest, LimitCannotBeMinLong) {
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index e6ea4ae6ea3..c3ccdc1d812 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -114,7 +114,7 @@ void appendCursorResponseObject(long long cursorId,
cursorObj.append(kNsField, cursorNamespace);
cursorObj.append(kBatchFieldInitial, firstBatch);
if (cursorType) {
- cursorObj.append(kTypeField, cursorType.get());
+ cursorObj.append(kTypeField, cursorType.value());
}
cursorObj.done();
}
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 56498b71024..dfd9f788290 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -227,7 +227,7 @@ TEST(CursorResponseTest, parseFromBSONVarsFieldCorrect) {
ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2));
ASSERT_TRUE(response.getVarsField());
- ASSERT_BSONOBJ_EQ(response.getVarsField().get(), varsContents);
+ ASSERT_BSONOBJ_EQ(response.getVarsField().value(), varsContents);
}
TEST(CursorResponseTest, parseFromBSONVarsFieldWrongType) {
@@ -255,7 +255,7 @@ TEST(CursorResponseTest, parseFromBSONMultipleVars) {
ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2));
ASSERT_TRUE(response.getVarsField());
- ASSERT_BSONOBJ_EQ(response.getVarsField().get(), varsContents);
+ ASSERT_BSONOBJ_EQ(response.getVarsField().value(), varsContents);
}
TEST(CursorResponseTest, roundTripThroughCursorResponseBuilderWithPartialResultsReturned) {
diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp
index 439c1f028d2..7ba592a515f 100644
--- a/src/mongo/db/query/datetime/date_time_support.cpp
+++ b/src/mongo/db/query/datetime/date_time_support.cpp
@@ -1202,7 +1202,7 @@ Date_t dateAdd(Date_t date, TimeUnit unit, long long amount, const TimeZone& tim
auto intervalInDays = daysToAdd(localTime.get(), unit, amount);
if (intervalInDays) {
unit = TimeUnit::day;
- amount = intervalInDays.get();
+ amount = intervalInDays.value();
}
auto interval = getTimelibRelTime(unit, amount);
diff --git a/src/mongo/db/query/fle/server_rewrite.cpp b/src/mongo/db/query/fle/server_rewrite.cpp
index 2aeb99a4061..3b35678eee7 100644
--- a/src/mongo/db/query/fle/server_rewrite.cpp
+++ b/src/mongo/db/query/fle/server_rewrite.cpp
@@ -89,7 +89,7 @@ boost::intrusive_ptr<ExpressionInternalFLEEqual> generateFleEqualMatch(StringDat
expCtx,
ExpressionFieldPath::createPathFromString(
expCtx, path.toString(), expCtx->variablesParseState),
- tokens.serverToken.get().data,
+ tokens.serverToken.value().data,
tokens.maxCounter.value_or(0LL),
tokens.edcToken.data);
}
@@ -110,7 +110,7 @@ std::unique_ptr<ExpressionInternalFLEEqual> generateFleEqualMatchUnique(StringDa
expCtx,
ExpressionFieldPath::createPathFromString(
expCtx, path.toString(), expCtx->variablesParseState),
- tokens.serverToken.get().data,
+ tokens.serverToken.value().data,
tokens.maxCounter.value_or(0LL),
tokens.edcToken.data);
}
@@ -144,20 +144,20 @@ static stdx::unordered_map<std::type_index, std::function<void(FLEQueryRewriter*
void rewriteMatch(FLEQueryRewriter* rewriter, DocumentSourceMatch* source) {
if (auto rewritten = rewriter->rewriteMatchExpression(source->getQuery())) {
- source->rebuild(rewritten.get());
+ source->rebuild(rewritten.value());
}
}
void rewriteGeoNear(FLEQueryRewriter* rewriter, DocumentSourceGeoNear* source) {
if (auto rewritten = rewriter->rewriteMatchExpression(source->getQuery())) {
- source->setQuery(rewritten.get());
+ source->setQuery(rewritten.value());
}
}
void rewriteGraphLookUp(FLEQueryRewriter* rewriter, DocumentSourceGraphLookUp* source) {
if (auto filter = source->getAdditionalFilter()) {
- if (auto rewritten = rewriter->rewriteMatchExpression(filter.get())) {
- source->setAdditionalFilter(rewritten.get());
+ if (auto rewritten = rewriter->rewriteMatchExpression(filter.value())) {
+ source->setAdditionalFilter(rewritten.value());
}
}
@@ -396,7 +396,7 @@ BSONObj rewriteEncryptedFilter(const FLEStateCollectionReader& escReader,
if (auto rewritten =
FLEQueryRewriter(expCtx, escReader, eccReader, mode).rewriteMatchExpression(filter)) {
- return rewritten.get();
+ return rewritten.value();
}
return filter;
@@ -514,8 +514,8 @@ BSONObj rewriteEncryptedFilterInsideTxn(FLEQueryInterface* queryImpl,
auto docCount = queryImpl->countDocuments(nss);
return TxnCollectionReader(docCount, queryImpl, nss);
};
- auto escReader = makeCollectionReader(queryImpl, efc.getEscCollection().get());
- auto eccReader = makeCollectionReader(queryImpl, efc.getEccCollection().get());
+ auto escReader = makeCollectionReader(queryImpl, efc.getEscCollection().value());
+ auto eccReader = makeCollectionReader(queryImpl, efc.getEccCollection().value());
return rewriteEncryptedFilter(escReader, eccReader, expCtx, filter, mode);
}
@@ -549,7 +549,7 @@ void processFindCommand(OperationContext* opCtx,
findCommand->setFilter(rewriteQuery(opCtx,
expCtx,
nss,
- findCommand->getEncryptionInformation().get(),
+ findCommand->getEncryptionInformation().value(),
findCommand->getFilter().getOwned(),
getTransaction,
HighCardinalityModeAllowed::kAllow));
@@ -574,7 +574,7 @@ void processCountCommand(OperationContext* opCtx,
countCommand->setQuery(rewriteQuery(opCtx,
expCtx,
nss,
- countCommand->getEncryptionInformation().get(),
+ countCommand->getEncryptionInformation().value(),
countCommand->getQuery().getOwned(),
getTxn,
HighCardinalityModeAllowed::kAllow));
diff --git a/src/mongo/db/query/fle/server_rewrite_test.cpp b/src/mongo/db/query/fle/server_rewrite_test.cpp
index 034de8f0aa9..0e16375c665 100644
--- a/src/mongo/db/query/fle/server_rewrite_test.cpp
+++ b/src/mongo/db/query/fle/server_rewrite_test.cpp
@@ -50,7 +50,7 @@ public:
BSONObj rewriteMatchExpressionForTest(const BSONObj& obj) {
auto res = rewriteMatchExpression(obj);
- return res ? res.get() : obj;
+ return res ? res.value() : obj;
}
};
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 65f6deb4151..b2292c0dc44 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -934,7 +934,7 @@ protected:
_ws,
_cq,
_plannerParams,
- cs->decisionWorks.get(),
+ cs->decisionWorks.value(),
std::move(root)),
std::move(querySolution));
return result;
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index b3e3e92a024..61562191b6c 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -266,11 +266,11 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
findCommand->setProjection(getDistinctProjection(std::string(parsedDistinct.getKey())));
if (auto query = parsedDistinct.getQuery()) {
- findCommand->setFilter(query.get().getOwned());
+ findCommand->setFilter(query.value().getOwned());
}
if (auto collation = parsedDistinct.getCollation()) {
- findCommand->setCollation(collation.get().getOwned());
+ findCommand->setCollation(collation.value().getOwned());
}
// The IDL parser above does not handle generic command arguments. Since the underlying query
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index fbef2c6907e..4897bea2115 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -480,7 +480,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
auto entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 10U);
+ ASSERT_EQ(entry->works.value(), 10U);
ASSERT_FALSE(entry->isActive);
decision = createDecision(1U, 50);
@@ -495,7 +495,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
decision = createDecision(1U, 30);
auto callbacks2 = createCallback(*cq, *decision);
@@ -509,7 +509,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 40U);
+ ASSERT_EQ(entry->works.value(), 40U);
decision = createDecision(1U, 25);
auto callbacks3 = createCallback(*cq, *decision);
@@ -528,7 +528,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
auto&& decision1 = entry->debugInfo->decision;
ASSERT_EQ(decision1->getStats<PlanStageStats>().candidatePlanStats[0]->common.works, 25U);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 25U);
+ ASSERT_EQ(entry->works.value(), 25U);
ASSERT_EQUALS(planCache.size(), 1U);
@@ -557,7 +557,7 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
auto entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 3U);
+ ASSERT_EQ(entry->works.value(), 3U);
ASSERT_FALSE(entry->isActive);
decision = createDecision(1U, 50);
@@ -574,7 +574,7 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 4U);
+ ASSERT_EQ(entry->works.value(), 4U);
// Clear the plan cache. The inactive entry should now be removed.
planCache.clear();
@@ -598,7 +598,7 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
auto entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 50U);
+ ASSERT_EQ(entry->works.value(), 50U);
ASSERT_FALSE(entry->isActive);
decision = createDecision(1U, 20);
@@ -611,7 +611,7 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
decision = createDecision(1U, 100);
auto callbacks2 = createCallback(*cq, *decision);
@@ -623,7 +623,7 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
}
TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
@@ -641,7 +641,7 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
// After add, the planCache should have an inactive entry.
auto entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 50U);
+ ASSERT_EQ(entry->works.value(), 50U);
ASSERT_FALSE(entry->isActive);
decision = createDecision(1U, 20);
@@ -654,7 +654,7 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
decision = createDecision(1U, 10);
auto callbacks2 = createCallback(*cq, *decision);
@@ -666,7 +666,7 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 10U);
+ ASSERT_EQ(entry->works.value(), 10U);
}
TEST(PlanCacheTest, DeactivateCacheEntry) {
@@ -684,7 +684,7 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
// After add, the planCache should have an inactive entry.
auto entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 50U);
+ ASSERT_EQ(entry->works.value(), 50U);
ASSERT_FALSE(entry->isActive);
decision = createDecision(1U, 20);
@@ -697,7 +697,7 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
entry = assertGet(planCache.getEntry(key));
ASSERT_TRUE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
planCache.deactivate(key);
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kPresentInactive);
@@ -706,7 +706,7 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
entry = assertGet(planCache.getEntry(key));
ASSERT_FALSE(entry->isActive);
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 20U);
+ ASSERT_EQ(entry->works.value(), 20U);
}
TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
@@ -738,7 +738,7 @@ TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
// Define a serialization function which just serializes the number of works.
const auto serializer = [](const PlanCacheEntry& entry) {
ASSERT_TRUE(entry.works);
- return BSON("works" << static_cast<int>(entry.works.get()));
+ return BSON("works" << static_cast<int>(entry.works.value()));
};
// Define a matcher which matches if the number of works exceeds 4.
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index a822ad81c6f..16134503159 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -1702,7 +1702,7 @@ bool PlanEnumerator::LockstepOrAssignment::shouldResetBeforeProceeding(
if (!subnode.maxIterCount) {
return false; // Haven't yet looped over this child entirely, not ready yet.
}
- totalPossibleEnumerations *= subnode.maxIterCount.get();
+ totalPossibleEnumerations *= subnode.maxIterCount.value();
}
// If we're able to compute a total number expected enumerations, we must have already cycled
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index c226061c03b..6ebd8d0d8b2 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -156,10 +156,10 @@ std::pair<boost::optional<Timestamp>, boost::optional<Timestamp>> extractTsRange
boost::optional<Timestamp> childMin;
boost::optional<Timestamp> childMax;
std::tie(childMin, childMax) = extractTsRange(me->getChild(i), false);
- if (childMin && (!min || childMin.get() > min.get())) {
+ if (childMin && (!min || childMin.value() > min.value())) {
min = childMin;
}
- if (childMax && (!max || childMax.get() < max.get())) {
+ if (childMax && (!max || childMax.value() < max.value())) {
max = childMax;
}
}
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index d05078423b7..a4ef137a7e5 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -630,9 +630,9 @@ void removeInclusionProjectionBelowGroupRecursive(QuerySolutionNode* solnRoot) {
// Multiple $group stages may be pushed down. So, if the child is a GROUP, then recurse.
return removeInclusionProjectionBelowGroupRecursive(projectNodeCandidate);
} else if (auto projection = attemptToGetProjectionFromQuerySolution(*projectNodeCandidate);
- projection && projection.get()->isInclusionOnly()) {
+ projection && projection.value()->isInclusionOnly()) {
// Check to see if the projectNode's field set is a super set of the groupNodes.
- if (!isSubset(groupNode->requiredFields, projection.get()->getRequiredFields())) {
+ if (!isSubset(groupNode->requiredFields, projection.value()->getRequiredFields())) {
// The dependency set of the GROUP stage is wider than the projectNode field set.
return;
}
@@ -988,7 +988,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAnalysis::analyzeSort(
solnSortPattern = providedSorts.getBaseSortPattern();
}
- if (sortMatchesTraversalPreference(params.traversalPreference.get(), solnSortPattern) &&
+ if (sortMatchesTraversalPreference(params.traversalPreference.value(), solnSortPattern) &&
QueryPlannerCommon::scanDirectionsEqual(solnRoot.get(),
-params.traversalPreference->direction)) {
QueryPlannerCommon::reverseScans(solnRoot.get(), true);
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index b7df376979a..6f37288e733 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -165,7 +165,7 @@ bool hintMatchesClusterKey(const boost::optional<ClusteredCollectionInfo>& clust
clusteredIndexSpec.getName());
auto hintName = firstHintElt.valueStringData();
- return hintName == clusteredIndexSpec.getName().get();
+ return hintName == clusteredIndexSpec.getName().value();
}
// An index spec is provided by the hint.
diff --git a/src/mongo/db/query/query_planner_options_test.cpp b/src/mongo/db/query/query_planner_options_test.cpp
index 12aeeb3d850..7471af9e7a0 100644
--- a/src/mongo/db/query/query_planner_options_test.cpp
+++ b/src/mongo/db/query/query_planner_options_test.cpp
@@ -855,7 +855,7 @@ TEST_F(QueryPlannerTest, DollarResumeAfterFieldPropagatedFromQueryRequestToStage
const auto* node = solns.front()->root();
const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(node);
- ASSERT_EQUALS(RecordId(42LL), csn->resumeAfterRecordId.get());
+ ASSERT_EQUALS(RecordId(42LL), csn->resumeAfterRecordId.value());
}
TEST_F(QueryPlannerTest, PreserveRecordIdOptionPrecludesSimpleSort) {
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index 2300460d032..0cbf5623a5f 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -114,7 +114,7 @@ CandidatePlans CachedSolutionPlanner::plan(
0};
}
- const size_t maxReadsBeforeReplan = internalQueryCacheEvictionRatio * _decisionReads.get();
+ const size_t maxReadsBeforeReplan = internalQueryCacheEvictionRatio * _decisionReads.value();
// In cached solution planning we collect execution stats with an upper bound on reads allowed
// per trial run computed based on previous decision reads. If the trial run ends before
diff --git a/src/mongo/db/read_write_concern_defaults.cpp b/src/mongo/db/read_write_concern_defaults.cpp
index 93b0754f2f3..cc383779dd5 100644
--- a/src/mongo/db/read_write_concern_defaults.cpp
+++ b/src/mongo/db/read_write_concern_defaults.cpp
@@ -207,8 +207,8 @@ void ReadWriteConcernDefaults::refreshIfNecessary(OperationContext* opCtx) {
// Log only if we updated the read- or write-concern defaults themselves.
if (defaultsBefore.getDefaultWriteConcern() != defaultsAfter.getDefaultWriteConcern() ||
(defaultsBefore.getDefaultReadConcern() && defaultsAfter.getDefaultReadConcern() &&
- (defaultsBefore.getDefaultReadConcern().get().getLevel() !=
- defaultsAfter.getDefaultReadConcern().get().getLevel()))) {
+ (defaultsBefore.getDefaultReadConcern().value().getLevel() !=
+ defaultsAfter.getDefaultReadConcern().value().getLevel()))) {
LOGV2(20997, "Refreshed RWC defaults", "newDefaults"_attr = possibleNewDefaultsBSON);
}
}
@@ -238,7 +238,7 @@ ReadWriteConcernDefaults::RWConcernDefaultAndTime ReadWriteConcernDefaults::getD
// Only overwrite the default read concern and its source if it has already been set on mongos.
if (!cached.getDefaultReadConcernSource()) {
- if (!cached.getDefaultReadConcern() || cached.getDefaultReadConcern().get().isEmpty()) {
+ if (!cached.getDefaultReadConcern() || cached.getDefaultReadConcern().value().isEmpty()) {
auto rcDefault = getImplicitDefaultReadConcern();
cached.setDefaultReadConcern(rcDefault);
cached.setDefaultReadConcernSource(DefaultReadConcernSourceEnum::kImplicit);
@@ -255,13 +255,13 @@ ReadWriteConcernDefaults::RWConcernDefaultAndTime ReadWriteConcernDefaults::getD
// already been set through the config server.
if (!cached.getDefaultWriteConcernSource()) {
const bool isCWWCSet = cached.getDefaultWriteConcern() &&
- !cached.getDefaultWriteConcern().get().usedDefaultConstructedWC;
+ !cached.getDefaultWriteConcern().value().usedDefaultConstructedWC;
if (isCWWCSet) {
cached.setDefaultWriteConcernSource(DefaultWriteConcernSourceEnum::kGlobal);
} else {
cached.setDefaultWriteConcernSource(DefaultWriteConcernSourceEnum::kImplicit);
if (_implicitDefaultWriteConcernMajority &&
- _implicitDefaultWriteConcernMajority.get()) {
+ _implicitDefaultWriteConcernMajority.value()) {
cached.setDefaultWriteConcern(
WriteConcernOptions(WriteConcernOptions::kMajority,
WriteConcernOptions::SyncMode::UNSET,
@@ -299,9 +299,9 @@ ReadWriteConcernDefaults::getDefaultWriteConcern(OperationContext* opCtx) {
boost::optional<ReadWriteConcernDefaults::WriteConcern> ReadWriteConcernDefaults::getCWWC(
OperationContext* opCtx) {
auto cached = _getDefaultCWRWCFromDisk(opCtx);
- if (cached && cached.get().getDefaultWriteConcern() &&
- !cached.get().getDefaultWriteConcern().get().usedDefaultConstructedWC) {
- return cached.get().getDefaultWriteConcern().get();
+ if (cached && cached.value().getDefaultWriteConcern() &&
+ !cached.value().getDefaultWriteConcern().value().usedDefaultConstructedWC) {
+ return cached.value().getDefaultWriteConcern().value();
}
return boost::none;
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 8a141ac0214..01d2e450a7a 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -214,7 +214,7 @@ BaseCloner::AfterStageBehavior CollectionCloner::listIndexesStage() {
invariant(_collectionOptions.clusteredIndex);
invariant(spec.getBoolField("clustered") == true);
invariant(clustered_util::formatClusterKeyForListIndexes(
- _collectionOptions.clusteredIndex.get(), _collectionOptions.collation)
+ _collectionOptions.clusteredIndex.value(), _collectionOptions.collation)
.woCompare(spec) == 0);
// Skip if the spec is for the collection's clusteredIndex.
} else if (spec.hasField("buildUUID")) {
@@ -323,7 +323,7 @@ void CollectionCloner::runQuery() {
// Resume the query from where we left off.
LOGV2_DEBUG(21133, 1, "Collection cloner will resume the last successful query");
findCmd.setRequestResumeToken(true);
- findCmd.setResumeAfter(_resumeToken.get());
+ findCmd.setResumeAfter(_resumeToken.value());
} else {
// New attempt at a resumable query.
LOGV2_DEBUG(21134, 1, "Collection cloner will run a new query");
diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp
index d74b3171b67..18f920b1ffd 100644
--- a/src/mongo/db/repl/idempotency_test_fixture.cpp
+++ b/src/mongo/db/repl/idempotency_test_fixture.cpp
@@ -382,7 +382,7 @@ CollectionState IdempotencyTest::validate(const NamespaceString& nss) {
if (collUUID) {
// Allow in-progress indexes to complete before validating collection contents.
IndexBuildsCoordinator::get(_opCtx.get())
- ->awaitNoIndexBuildInProgressForCollection(_opCtx.get(), collUUID.get());
+ ->awaitNoIndexBuildInProgressForCollection(_opCtx.get(), collUUID.value());
}
{
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index a25ff3b148f..5001927d8dc 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -993,7 +993,7 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback(
IDLParserContext("oldest active transaction optime for initial sync"), docs.front());
auto optime = entry.getStartOpTime();
if (optime) {
- beginFetchingOpTime = optime.get();
+ beginFetchingOpTime = optime.value();
}
}
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 36f682c1b88..107e5ade678 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -116,7 +116,7 @@ void MemberConfig::addTagInfo(ReplSetTagConfig* tagConfig) {
// Parse "tags" field.
//
if (getTags()) {
- for (auto&& tag : getTags().get()) {
+ for (auto&& tag : getTags().value()) {
if (tag.type() != String) {
uasserted(ErrorCodes::TypeMismatch,
str::stream()
@@ -198,8 +198,8 @@ BSONObj MemberConfig::toBSON(bool omitNewlyAddedField) const {
if (!omitNewlyAddedField && getNewlyAdded()) {
// We should never have _newlyAdded if automatic reconfigs aren't enabled.
- invariant(getNewlyAdded().get());
- configBuilder.append(kNewlyAddedFieldName, getNewlyAdded().get());
+ invariant(getNewlyAdded().value());
+ configBuilder.append(kNewlyAddedFieldName, getNewlyAdded().value());
}
configBuilder.append(kBuildIndexesFieldName, getBuildIndexes());
@@ -212,7 +212,7 @@ BSONObj MemberConfig::toBSON(bool omitNewlyAddedField) const {
_splitHorizon.toBSON(configBuilder);
if (getSecondaryDelaySecs()) {
- configBuilder.append(kSecondaryDelaySecsFieldName, getSecondaryDelaySecs().get());
+ configBuilder.append(kSecondaryDelaySecsFieldName, getSecondaryDelaySecs().value());
}
configBuilder.append(kVotesFieldName, MemberConfigBase::getVotes() ? 1 : 0);
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index c3391e63f92..82796ee6c44 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -794,7 +794,7 @@ NamespaceString extractNsFromUUIDorNs(OperationContext* opCtx,
const NamespaceString& ns,
const boost::optional<UUID>& ui,
const BSONObj& cmd) {
- return ui ? extractNsFromUUID(opCtx, ui.get()) : extractNs(ns.db(), cmd);
+ return ui ? extractNsFromUUID(opCtx, ui.value()) : extractNs(ns.db(), cmd);
}
using OpApplyFn = std::function<Status(
@@ -1001,28 +1001,28 @@ const StringMap<ApplyOpMetadata> kOpsMap = {
{[](OperationContext* opCtx, const OplogEntry& entry, OplogApplication::Mode mode) -> Status {
const auto& cmd = entry.getObject();
return dropIndexesForApplyOps(
- opCtx, extractNsFromUUID(opCtx, entry.getUuid().get()), cmd);
+ opCtx, extractNsFromUUID(opCtx, entry.getUuid().value()), cmd);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"deleteIndexes",
{[](OperationContext* opCtx, const OplogEntry& entry, OplogApplication::Mode mode) -> Status {
const auto& cmd = entry.getObject();
return dropIndexesForApplyOps(
- opCtx, extractNsFromUUID(opCtx, entry.getUuid().get()), cmd);
+ opCtx, extractNsFromUUID(opCtx, entry.getUuid().value()), cmd);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndex",
{[](OperationContext* opCtx, const OplogEntry& entry, OplogApplication::Mode mode) -> Status {
const auto& cmd = entry.getObject();
return dropIndexesForApplyOps(
- opCtx, extractNsFromUUID(opCtx, entry.getUuid().get()), cmd);
+ opCtx, extractNsFromUUID(opCtx, entry.getUuid().value()), cmd);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndexes",
{[](OperationContext* opCtx, const OplogEntry& entry, OplogApplication::Mode mode) -> Status {
const auto& cmd = entry.getObject();
return dropIndexesForApplyOps(
- opCtx, extractNsFromUUID(opCtx, entry.getUuid().get()), cmd);
+ opCtx, extractNsFromUUID(opCtx, entry.getUuid().value()), cmd);
},
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"renameCollection",
@@ -1177,10 +1177,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
CollectionPtr collection = nullptr;
if (auto uuid = op.getUuid()) {
auto catalog = CollectionCatalog::get(opCtx);
- collection = catalog->lookupCollectionByUUID(opCtx, uuid.get());
+ collection = catalog->lookupCollectionByUUID(opCtx, uuid.value());
uassert(ErrorCodes::NamespaceNotFound,
str::stream() << "Failed to apply operation due to missing collection ("
- << uuid.get() << "): " << redact(opOrGroupedInserts.toBSON()),
+ << uuid.value() << "): " << redact(opOrGroupedInserts.toBSON()),
collection);
requestNss = collection->ns();
dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX));
@@ -1210,7 +1210,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
BSONObj o2;
if (op.getObject2())
- o2 = op.getObject2().get();
+ o2 = op.getObject2().value();
const IndexCatalog* indexCatalog =
collection == nullptr ? nullptr : collection->getIndexCatalog();
@@ -1292,7 +1292,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
for (const auto iOp : insertOps) {
invariant(iOp->getTerm());
insertObjs.emplace_back(
- iOp->getObject(), iOp->getTimestamp(), iOp->getTerm().get());
+ iOp->getObject(), iOp->getTimestamp(), iOp->getTerm().value());
}
} else {
// Applying grouped inserts on the primary as part of a tenant migration.
@@ -1380,7 +1380,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
InsertStatement insertStmt(o);
if (assignOperationTimestamp) {
invariant(op.getTerm());
- insertStmt.oplogSlot = OpTime(op.getTimestamp(), op.getTerm().get());
+ insertStmt.oplogSlot = OpTime(op.getTimestamp(), op.getTerm().value());
} else if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(
opCtx, collection->ns())) {
// Primaries processing inserts always pre-allocate timestamps. For parity,
@@ -1638,10 +1638,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
if (op.getNeedsRetryImage()) {
writeToImageCollection(opCtx,
- op.getSessionId().get(),
- op.getTxnNumber().get(),
+ op.getSessionId().value(),
+ op.getTxnNumber().value(),
op.getApplyOpsTimestamp().value_or(op.getTimestamp()),
- op.getNeedsRetryImage().get(),
+ op.getNeedsRetryImage().value(),
// If we did not request an image because we're in
// initial sync, the value passed in here is conveniently
// the empty BSONObj.
@@ -1733,8 +1733,8 @@ Status applyOperation_inlock(OperationContext* opCtx,
// is responsible for whether to retry. The motivation here is to simply reduce
// the number of states related documents in the two collections can be in.
writeToImageCollection(opCtx,
- op.getSessionId().get(),
- op.getTxnNumber().get(),
+ op.getSessionId().value(),
+ op.getTxnNumber().value(),
op.getApplyOpsTimestamp().value_or(op.getTimestamp()),
repl::RetryImageEnum::kPreImage,
result.requestedPreImage.value_or(BSONObj()),
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
index d83795d16d5..af1ae775c8d 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
@@ -478,7 +478,7 @@ UUID createCollectionWithUuid(OperationContext* opCtx, const NamespaceString& ns
CollectionOptions options;
options.uuid = UUID::gen();
createCollection(opCtx, nss, options);
- return options.uuid.get();
+ return options.uuid.value();
}
void createDatabase(OperationContext* opCtx, StringData dbName) {
diff --git a/src/mongo/db/repl/oplog_applier_utils.cpp b/src/mongo/db/repl/oplog_applier_utils.cpp
index 0d2e5504b89..cd7e0b62839 100644
--- a/src/mongo/db/repl/oplog_applier_utils.cpp
+++ b/src/mongo/db/repl/oplog_applier_utils.cpp
@@ -175,7 +175,7 @@ NamespaceString OplogApplierUtils::parseUUIDOrNs(OperationContext* opCtx,
return oplogEntry.getNss();
}
- const auto& uuid = optionalUuid.get();
+ const auto& uuid = optionalUuid.value();
auto catalog = CollectionCatalog::get(opCtx);
auto nss = catalog->lookupNSSByUUID(opCtx, uuid);
uassert(ErrorCodes::NamespaceNotFound,
@@ -187,7 +187,7 @@ NamespaceString OplogApplierUtils::parseUUIDOrNs(OperationContext* opCtx,
NamespaceStringOrUUID OplogApplierUtils::getNsOrUUID(const NamespaceString& nss,
const OplogEntry& op) {
if (auto ui = op.getUuid()) {
- return {nss.db().toString(), ui.get()};
+ return {nss.db().toString(), ui.value()};
}
return nss;
}
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 60de088b83a..644818274cb 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -85,21 +85,21 @@ BSONObj makeOplogEntryDoc(OpTime opTime,
builder.append(OplogEntryBase::kNssFieldName, nss.toString());
builder.append(OplogEntryBase::kWallClockTimeFieldName, wallClockTime);
if (hash) {
- builder.append(OplogEntryBase::kHashFieldName, hash.get());
+ builder.append(OplogEntryBase::kHashFieldName, hash.value());
}
if (uuid) {
uuid->appendToBuilder(&builder, OplogEntryBase::kUuidFieldName);
}
if (fromMigrate) {
- builder.append(OplogEntryBase::kFromMigrateFieldName, fromMigrate.get());
+ builder.append(OplogEntryBase::kFromMigrateFieldName, fromMigrate.value());
}
builder.append(OplogEntryBase::kObjectFieldName, oField);
if (o2Field) {
- builder.append(OplogEntryBase::kObject2FieldName, o2Field.get());
+ builder.append(OplogEntryBase::kObject2FieldName, o2Field.value());
}
if (isUpsert) {
invariant(o2Field);
- builder.append(OplogEntryBase::kUpsertFieldName, isUpsert.get());
+ builder.append(OplogEntryBase::kUpsertFieldName, isUpsert.value());
}
if (statementIds.size() == 1) {
builder.append(OplogEntryBase::kStatementIdsFieldName, statementIds.front());
@@ -107,26 +107,26 @@ BSONObj makeOplogEntryDoc(OpTime opTime,
builder.append(OplogEntryBase::kStatementIdsFieldName, statementIds);
}
if (prevWriteOpTimeInTransaction) {
- const BSONObj localObject = prevWriteOpTimeInTransaction.get().toBSON();
+ const BSONObj localObject = prevWriteOpTimeInTransaction.value().toBSON();
builder.append(OplogEntryBase::kPrevWriteOpTimeInTransactionFieldName, localObject);
}
if (preImageOpTime) {
- const BSONObj localObject = preImageOpTime.get().toBSON();
+ const BSONObj localObject = preImageOpTime.value().toBSON();
builder.append(OplogEntryBase::kPreImageOpTimeFieldName, localObject);
}
if (postImageOpTime) {
- const BSONObj localObject = postImageOpTime.get().toBSON();
+ const BSONObj localObject = postImageOpTime.value().toBSON();
builder.append(OplogEntryBase::kPostImageOpTimeFieldName, localObject);
}
if (destinedRecipient) {
builder.append(OplogEntryBase::kDestinedRecipientFieldName,
- destinedRecipient.get().toString());
+ destinedRecipient.value().toString());
}
if (needsRetryImage) {
builder.append(OplogEntryBase::kNeedsRetryImageFieldName,
- RetryImage_serializer(needsRetryImage.get()));
+ RetryImage_serializer(needsRetryImage.value()));
}
return builder.obj();
}
@@ -298,7 +298,7 @@ void MutableOplogEntry::setOpTime(const OpTime& opTime) & {
OpTime MutableOplogEntry::getOpTime() const {
long long term = OpTime::kUninitializedTerm;
if (getTerm()) {
- term = getTerm().get();
+ term = getTerm().value();
}
return OpTime(getTimestamp(), term);
}
diff --git a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp
index be04c9a8463..6173db95e00 100644
--- a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp
+++ b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp
@@ -65,7 +65,7 @@ BSONObj OplogEntryOrGroupedInserts::toBSON() const {
long long term = OpTime::kUninitializedTerm;
auto parsedTerm = op->getTerm();
if (parsedTerm)
- term = parsedTerm.get();
+ term = parsedTerm.value();
tArrayBuilder.append(term);
}
}
diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp
index c39fb47ce9b..1dd6d6ebd6d 100644
--- a/src/mongo/db/repl/primary_only_service.cpp
+++ b/src/mongo/db/repl/primary_only_service.cpp
@@ -252,7 +252,7 @@ void PrimaryOnlyService::reportInstanceInfoForCurrentOp(
for (auto& [_, instance] : _activeInstances) {
auto op = instance.getInstance()->reportForCurrentOp(connMode, sessionMode);
if (op.has_value()) {
- ops->push_back(std::move(op.get()));
+ ops->push_back(std::move(op.value()));
}
}
}
diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp
index 7b3e3e0a549..08db89a738b 100644
--- a/src/mongo/db/repl/primary_only_service_test.cpp
+++ b/src/mongo/db/repl/primary_only_service_test.cpp
@@ -528,7 +528,7 @@ TEST_F(PrimaryOnlyServiceTest, LookupInstance) {
ASSERT(instance.get());
ASSERT_EQ(0, instance->getID());
- auto instance2 = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ auto instance2 = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(instance.get(), instance2.get());
@@ -537,7 +537,7 @@ TEST_F(PrimaryOnlyServiceTest, LookupInstance) {
// Shouldn't be able to look up instance after it has completed running.
auto instance3 = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0));
- ASSERT_FALSE(instance3.is_initialized());
+ ASSERT_FALSE(instance3.has_value());
}
TEST_F(PrimaryOnlyServiceTest, LookupInstanceInterruptible) {
@@ -573,7 +573,7 @@ TEST_F(PrimaryOnlyServiceTest, LookupInstanceHoldingISLock) {
ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites());
auto instance2 =
- TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(instance.get(), instance2.get());
}
@@ -595,7 +595,7 @@ TEST_F(PrimaryOnlyServiceTest, LookupInstanceHoldingIXLock) {
Lock::GlobalLock lk(opCtx.get(), MODE_IX);
ASSERT_FALSE(opCtx->shouldAlwaysInterruptAtStepDownOrUp());
auto instance2 =
- TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(instance.get(), instance2.get());
}
@@ -847,7 +847,7 @@ TEST_F(PrimaryOnlyServiceTest, StepDownBeforePersisted) {
auto opCtx = makeOperationContext();
// Since the Instance never wrote its state document, it shouldn't be recreated on stepUp.
auto recreatedInstance = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0));
- ASSERT(!recreatedInstance.is_initialized());
+ ASSERT(!recreatedInstance.has_value());
}
TEST_F(PrimaryOnlyServiceTest, RecreateInstanceOnStepUp) {
@@ -874,7 +874,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstanceOnStepUp) {
{
auto opCtx = makeOperationContext();
auto recreatedInstance =
- TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(TestService::State::kOne, recreatedInstance->getInitialState());
TestServiceHangDuringStateTwo.waitForTimesEntered(++stateTwoFPTimesEntered);
ASSERT_EQ(TestService::State::kTwo, recreatedInstance->getState());
@@ -892,7 +892,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstanceOnStepUp) {
{
auto opCtx = makeOperationContext();
auto recreatedInstance =
- TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(TestService::State::kTwo, recreatedInstance->getInitialState());
TestServiceHangDuringStateOne.setMode(FailPoint::off);
recreatedInstance->getCompletionFuture().get();
@@ -901,7 +901,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstanceOnStepUp) {
auto nonExistentInstance =
TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0));
- ASSERT(!nonExistentInstance.is_initialized());
+ ASSERT(!nonExistentInstance.has_value());
}
stepDown();
@@ -913,7 +913,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstanceOnStepUp) {
// its state document.
auto nonExistentInstance =
TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0));
- ASSERT(!nonExistentInstance.is_initialized());
+ ASSERT(!nonExistentInstance.has_value());
}
}
@@ -969,7 +969,7 @@ TEST_F(PrimaryOnlyServiceTest, StepDownBeforeRebuildingInstances) {
TestServiceHangDuringStateOne.waitForTimesEntered(++stateOneFPTimesEntered);
auto opCtx = makeOperationContext();
- auto instance = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ auto instance = TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(TestService::State::kOne, instance->getInitialState());
ASSERT_EQ(TestService::State::kOne, instance->getState());
@@ -1019,8 +1019,8 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstancesFails) {
// After stepping down we are in a consistent state again, but cannot create or lookup
// instances because we are not primary.
auto opCtx = makeOperationContext();
- ASSERT_FALSE(TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0))
- .is_initialized());
+ ASSERT_FALSE(
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).has_value());
ASSERT_THROWS_CODE(TestService::Instance::getOrCreate(
opCtx.get(), _service, BSON("_id" << 0 << "state" << 0)),
DBException,
@@ -1038,7 +1038,7 @@ TEST_F(PrimaryOnlyServiceTest, RecreateInstancesFails) {
// Instance should be recreated successfully.
auto opCtx = makeOperationContext();
auto instance =
- TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).get();
+ TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)).value();
ASSERT_EQ(TestService::State::kOne, instance->getInitialState());
ASSERT_EQ(TestService::State::kOne, instance->getState());
TestServiceHangDuringStateOne.setMode(FailPoint::off);
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 93ebe67cbd7..5906aad2b51 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -116,7 +116,7 @@ ReadConcernLevel ReadConcernArgs::getLevel() const {
}
bool ReadConcernArgs::hasLevel() const {
- return _level.is_initialized();
+ return _level.has_value();
}
boost::optional<OpTime> ReadConcernArgs::getArgsOpTime() const {
@@ -299,7 +299,7 @@ bool ReadConcernArgs::isSpeculativeMajority() const {
void ReadConcernArgs::_appendInfoInner(BSONObjBuilder* builder) const {
if (_level) {
- builder->append(kLevelFieldName, readConcernLevels::toString(_level.get()));
+ builder->append(kLevelFieldName, readConcernLevels::toString(_level.value()));
}
if (_opTime) {
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index e50c9a48f16..49351129c12 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -152,7 +152,7 @@ public:
StorageInterface::get(getGlobalServiceContext())
->getLastStableRecoveryTimestamp(getGlobalServiceContext());
if (ts) {
- result.append("lastStableRecoveryTimestamp", ts.get());
+ result.append("lastStableRecoveryTimestamp", ts.value());
}
} else {
LOGV2_WARNING(6100700,
diff --git a/src/mongo/db/repl/repl_set_test_egress.cpp b/src/mongo/db/repl/repl_set_test_egress.cpp
index c5842a52baa..c3c5b9861eb 100644
--- a/src/mongo/db/repl/repl_set_test_egress.cpp
+++ b/src/mongo/db/repl/repl_set_test_egress.cpp
@@ -105,7 +105,7 @@ public:
HostAndPort target;
if (auto optTarget = cmd.getTarget()) {
- target = validateTarget(opCtx, optTarget.get());
+ target = validateTarget(opCtx, optTarget.value());
} else {
target = selectTarget(opCtx);
}
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index f76c8217cf4..9f41df15ce4 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -136,8 +136,8 @@ bool ReplicationConsistencyMarkersImpl::getInitialSyncFlag(OperationContext* opC
3,
"returning initial sync flag value of {flag}",
"Returning initial sync flag value",
- "flag"_attr = flag.get());
- return flag.get();
+ "flag"_attr = flag.value());
+ return flag.value();
}
void ReplicationConsistencyMarkersImpl::setInitialSyncFlag(OperationContext* opCtx) {
@@ -254,7 +254,7 @@ OpTime ReplicationConsistencyMarkersImpl::getAppliedThrough(OperationContext* op
"appliedThroughString"_attr = appliedThrough->toString(),
"appliedThroughBSON"_attr = appliedThrough->toBSON());
- return appliedThrough.get();
+ return appliedThrough.value();
}
void ReplicationConsistencyMarkersImpl::ensureFastCountOnOplogTruncateAfterPoint(
@@ -481,17 +481,17 @@ ReplicationConsistencyMarkersImpl::refreshOplogTruncateAfterPointIfPrimary(
// entry (it can be momentarily between oplog entry timestamps), _lastNoHolesOplogTimestamp
// tracks the oplog entry so as to ensure we send out all updates before desisting until new
// operations occur.
- OpTime opTime = fassert(4455502, OpTime::parseFromOplogEntry(truncateOplogEntryBSON.get()));
+ OpTime opTime = fassert(4455502, OpTime::parseFromOplogEntry(truncateOplogEntryBSON.value()));
_lastNoHolesOplogTimestamp = opTime.getTimestamp();
_lastNoHolesOplogOpTimeAndWallTime = fassert(
4455501,
- OpTimeAndWallTime::parseOpTimeAndWallTimeFromOplogEntry(truncateOplogEntryBSON.get()));
+ OpTimeAndWallTime::parseOpTimeAndWallTimeFromOplogEntry(truncateOplogEntryBSON.value()));
// Pass the _lastNoHolesOplogTimestamp timestamp down to the storage layer to prevent oplog
// history lte to oplogTruncateAfterPoint from being entirely deleted. There should always be a
// single oplog entry lte to the oplogTruncateAfterPoint. Otherwise there will not be a valid
// oplog entry with which to update the caller.
- _storageInterface->setPinnedOplogTimestamp(opCtx, _lastNoHolesOplogTimestamp.get());
+ _storageInterface->setPinnedOplogTimestamp(opCtx, _lastNoHolesOplogTimestamp.value());
return _lastNoHolesOplogOpTimeAndWallTime;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 20e6e2392a4..e1a937160db 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -2390,10 +2390,10 @@ std::shared_ptr<const HelloResponse> ReplicationCoordinatorImpl::awaitHelloRespo
"Waiting for a hello response from a topology change or until deadline: "
"{deadline}. Current TopologyVersion counter is {currentTopologyVersionCounter}",
"Waiting for a hello response from a topology change or until deadline",
- "deadline"_attr = deadline.get(),
+ "deadline"_attr = deadline.value(),
"currentTopologyVersionCounter"_attr = topologyVersion.getCounter());
auto statusWithHello =
- futureGetNoThrowWithDeadline(opCtx, future, deadline.get(), opCtx->getTimeoutError());
+ futureGetNoThrowWithDeadline(opCtx, future, deadline.value(), opCtx->getTimeoutError());
auto status = statusWithHello.getStatus();
if (MONGO_unlikely(hangAfterWaitingForTopologyChangeTimesOut.shouldFail())) {
@@ -3796,7 +3796,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx,
ReadWriteConcernDefaults::get(opCtx->getServiceContext()).getDefault(opCtx);
const auto wcDefault = rwcDefaults.getDefaultWriteConcern();
if (wcDefault) {
- auto validateWCStatus = newConfig.validateWriteConcern(wcDefault.get());
+ auto validateWCStatus = newConfig.validateWriteConcern(wcDefault.value());
if (!validateWCStatus.isOK()) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "May not remove custom write concern "
@@ -4987,16 +4987,16 @@ ReplicationCoordinatorImpl::_setCurrentRSConfig(WithLock lk,
}
// Wake up writeConcern waiters that are no longer satisfiable due to the rsConfig change.
- _replicationWaiterList.setValueIf_inlock(
- [this](const OpTime& opTime, const SharedWaiterHandle& waiter) {
- invariant(waiter->writeConcern);
- // This throws if a waiter's writeConcern is no longer satisfiable, in which case
- // setValueIf_inlock will fulfill the waiter's promise with the error status.
- uassertStatusOK(_checkIfWriteConcernCanBeSatisfied_inlock(waiter->writeConcern.get()));
- // Return false meaning that the waiter is still satisfiable and thus can remain in the
- // waiter list.
- return false;
- });
+ _replicationWaiterList.setValueIf_inlock([this](const OpTime& opTime,
+ const SharedWaiterHandle& waiter) {
+ invariant(waiter->writeConcern);
+ // This throws if a waiter's writeConcern is no longer satisfiable, in which case
+ // setValueIf_inlock will fulfill the waiter's promise with the error status.
+ uassertStatusOK(_checkIfWriteConcernCanBeSatisfied_inlock(waiter->writeConcern.value()));
+ // Return false meaning that the waiter is still satisfiable and thus can remain in the
+ // waiter list.
+ return false;
+ });
_cancelCatchupTakeover_inlock();
_cancelPriorityTakeover_inlock();
@@ -5038,7 +5038,7 @@ void ReplicationCoordinatorImpl::_wakeReadyWaiters(WithLock lk, boost::optional<
_replicationWaiterList.setValueIf_inlock(
[this](const OpTime& opTime, const SharedWaiterHandle& waiter) {
invariant(waiter->writeConcern);
- return _doneWaitingForReplication_inlock(opTime, waiter->writeConcern.get());
+ return _doneWaitingForReplication_inlock(opTime, waiter->writeConcern.value());
},
opTime);
}
@@ -6241,7 +6241,7 @@ void ReplicationCoordinatorImpl::_validateDefaultWriteConcernOnShardStartup(With
// flag is set as we record it during sharding initialization phase, as on restarting a
// shard node for upgrading or any other reason, sharding initialization happens before
// config initialization.
- if (_wasCWWCSetOnConfigServerOnStartup && !_wasCWWCSetOnConfigServerOnStartup.get() &&
+ if (_wasCWWCSetOnConfigServerOnStartup && !_wasCWWCSetOnConfigServerOnStartup.value() &&
!_rsConfig.isImplicitDefaultWriteConcernMajority()) {
auto msg =
"Cannot start shard because the implicit default write concern on this shard is "
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index 842826924b4..2f0e89b2cc9 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -1317,7 +1317,7 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary)
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
}
@@ -1364,7 +1364,7 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) {
// correct time and that a priority takeover has not been scheduled.
ASSERT(replCoord->getCatchupTakeover_forTest());
ASSERT_FALSE(replCoord->getPriorityTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
}
@@ -1463,7 +1463,7 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
ASSERT(replCoord->getCatchupTakeover_forTest());
executor::TaskExecutor::CallbackHandle catchupTakeoverCbh =
replCoord->getCatchupTakeoverCbh_forTest();
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1472,7 +1472,7 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) {
config, now + config.getHeartbeatInterval(), HostAndPort("node2", 12345), behindOptime);
// Make sure another catchup takeover wasn't scheduled
- ASSERT_EQUALS(catchupTakeoverTime, replCoord->getCatchupTakeover_forTest().get());
+ ASSERT_EQUALS(catchupTakeoverTime, replCoord->getCatchupTakeover_forTest().value());
ASSERT_TRUE(catchupTakeoverCbh == replCoord->getCatchupTakeoverCbh_forTest());
}
@@ -1521,7 +1521,7 @@ TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1581,7 +1581,7 @@ TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1653,7 +1653,7 @@ TEST_F(TakeoverTest, CatchupTakeoverCanceledIfTransitionToRollback) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1714,7 +1714,7 @@ TEST_F(TakeoverTest, SuccessfulCatchupTakeover) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1798,7 +1798,7 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1907,7 +1907,7 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1968,7 +1968,7 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
// Make sure that the catchup takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getCatchupTakeover_forTest());
- auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().get();
+ auto catchupTakeoverTime = replCoord->getCatchupTakeover_forTest().value();
Milliseconds catchupTakeoverDelay = catchupTakeoverTime - now;
ASSERT_EQUALS(config.getCatchUpTakeoverDelay(), catchupTakeoverDelay);
@@ -1988,7 +1988,7 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) {
// Make sure that the priority takeover has now been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// Node 1 schedules the priority takeover, and since it has the second highest
@@ -2039,7 +2039,7 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent
// Make sure that the priority takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// Also make sure that updating the term cancels the scheduled priority takeover.
@@ -2082,7 +2082,7 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) {
// Make sure that the priority takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// The priority takeover might be scheduled at a time later than one election
@@ -2150,7 +2150,7 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
// Make sure that the priority takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// At this point the other nodes are all ahead of the current node, so it can't call for
@@ -2181,7 +2181,7 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) {
// Make sure that a new priority takeover has been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// Now make us caught up enough to call for priority takeover to succeed.
@@ -2238,7 +2238,7 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) {
// Make sure that the priority takeover has actually been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ auto priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// At this point the other nodes are all ahead of the current node, so it can't call for
@@ -2269,7 +2269,7 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) {
// Make sure that a new priority takeover has been scheduled and at the
// correct time.
ASSERT(replCoord->getPriorityTakeover_forTest());
- priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().get();
+ priorityTakeoverTime = replCoord->getPriorityTakeover_forTest().value();
assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0);
// Now make us caught up enough to call for priority takeover to succeed.
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 521521e59b2..590edbc292d 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -1179,7 +1179,7 @@ TEST_F(ReplCoordTest, NodeCalculatesDefaultWriteConcernOnStartupExistingLocalCon
HostAndPort("node1", 12345));
auto& rwcDefaults = ReadWriteConcernDefaults::get(getServiceContext());
ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest());
- ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().get());
+ ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().value());
}
@@ -1200,7 +1200,7 @@ TEST_F(ReplCoordTest,
HostAndPort("node1", 12345));
auto& rwcDefaults = ReadWriteConcernDefaults::get(getServiceContext());
ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest());
- ASSERT_FALSE(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().get());
+ ASSERT_FALSE(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().value());
}
@@ -1261,7 +1261,7 @@ TEST_F(ReplCoordTest, NodeCalculatesDefaultWriteConcernOnStartupNewConfigMajorit
auto& rwcDefaults = ReadWriteConcernDefaults::get(getServiceContext());
ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest());
- ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().get());
+ ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().value());
}
@@ -1322,7 +1322,7 @@ TEST_F(ReplCoordTest, NodeCalculatesDefaultWriteConcernOnStartupNewConfigNoMajor
auto& rwcDefaults = ReadWriteConcernDefaults::get(getServiceContext());
ASSERT(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest());
- ASSERT_FALSE(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().get());
+ ASSERT_FALSE(rwcDefaults.getImplicitDefaultWriteConcernMajority_forTest().value());
}
@@ -3622,7 +3622,7 @@ TEST_F(ReplCoordTest, AwaitHelloResponseReturnsOnStepDown) {
expectedCounter = topologyVersionAfterDisablingWrites->getCounter() + 1;
deadline = getNet()->now() + maxAwaitTime;
const auto responseStepdownComplete = awaitHelloWithNewOpCtx(
- getReplCoord(), topologyVersionAfterDisablingWrites.get(), {}, deadline);
+ getReplCoord(), topologyVersionAfterDisablingWrites.value(), {}, deadline);
const auto topologyVersionStepDownComplete = responseStepdownComplete->getTopologyVersion();
ASSERT_EQUALS(topologyVersionStepDownComplete->getCounter(), expectedCounter);
ASSERT_EQUALS(topologyVersionStepDownComplete->getProcessId(), expectedProcessId);
@@ -4981,7 +4981,7 @@ TEST_F(ReplCoordTest, AwaitHelloResponseReturnsOnElectionWin) {
// The server TopologyVersion will increment again once we exit drain mode.
expectedCounter = topologyVersionAfterElection->getCounter() + 1;
const auto responseAfterDrainComplete = awaitHelloWithNewOpCtx(
- getReplCoord(), topologyVersionAfterElection.get(), {}, deadline);
+ getReplCoord(), topologyVersionAfterElection.value(), {}, deadline);
const auto topologyVersionAfterDrainComplete =
responseAfterDrainComplete->getTopologyVersion();
ASSERT_EQUALS(topologyVersionAfterDrainComplete->getCounter(), expectedCounter);
@@ -5075,7 +5075,7 @@ TEST_F(ReplCoordTest, AwaitHelloResponseReturnsOnElectionWinWithReconfig) {
// The server TopologyVersion will increment once we finish reconfig.
expectedCounter = topologyVersionAfterElection->getCounter() + 1;
const auto responseAfterReconfig = awaitHelloWithNewOpCtx(
- getReplCoord(), topologyVersionAfterElection.get(), {}, deadline);
+ getReplCoord(), topologyVersionAfterElection.value(), {}, deadline);
const auto topologyVersionAfterReconfig = responseAfterReconfig->getTopologyVersion();
ASSERT_EQUALS(topologyVersionAfterReconfig->getCounter(), expectedCounter);
ASSERT_EQUALS(topologyVersionAfterReconfig->getProcessId(), expectedProcessId);
@@ -5090,7 +5090,7 @@ TEST_F(ReplCoordTest, AwaitHelloResponseReturnsOnElectionWinWithReconfig) {
// The server TopologyVersion will increment again once we exit drain mode.
expectedCounter = topologyVersionAfterReconfig->getCounter() + 1;
const auto responseAfterDrainComplete = awaitHelloWithNewOpCtx(
- getReplCoord(), topologyVersionAfterReconfig.get(), {}, deadline);
+ getReplCoord(), topologyVersionAfterReconfig.value(), {}, deadline);
const auto topologyVersionAfterDrainComplete =
responseAfterDrainComplete->getTopologyVersion();
ASSERT_EQUALS(topologyVersionAfterDrainComplete->getCounter(), expectedCounter);
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 4544d37ec4d..d04d0b8d736 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -134,11 +134,11 @@ TopologyVersion appendReplicationInfo(OperationContext* opCtx,
auto cwwc = ReadWriteConcernDefaults::get(opCtx).getCWWC(opCtx);
if (cwwc) {
- result->append(HelloCommandReply::kCwwcFieldName, cwwc.get().toBSON());
+ result->append(HelloCommandReply::kCwwcFieldName, cwwc.value().toBSON());
}
}
- return helloResponse->getTopologyVersion().get();
+ return helloResponse->getTopologyVersion().value();
}
auto currentTopologyVersion = replCoord->getTopologyVersion();
@@ -428,7 +428,7 @@ public:
LOGV2_DEBUG(23904,
3,
"Using maxAwaitTimeMS for awaitable hello protocol",
- "maxAwaitTimeMS"_attr = maxAwaitTimeMS.get());
+ "maxAwaitTimeMS"_attr = maxAwaitTimeMS.value());
curOp->pauseTimer();
timerGuard.emplace([curOp]() { curOp->resumeTimer(); });
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index a8cd23cd89b..6e6fcb0dfd5 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -396,7 +396,7 @@ void ReplicationRecoveryImpl::recoverFromOplogUpTo(OperationContext* opCtx, Time
fassert(31436, "No recovery timestamp, cannot recover from the oplog");
}
- startPoint = _adjustStartPointIfNecessary(opCtx, startPoint.get());
+ startPoint = _adjustStartPointIfNecessary(opCtx, startPoint.value());
invariant(!endPoint.isNull());
@@ -832,14 +832,14 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
// Parse the response.
auto truncateAfterOpTime =
- fassert(51766, repl::OpTime::parseFromOplogEntry(truncateAfterOplogEntryBSON.get()));
+ fassert(51766, repl::OpTime::parseFromOplogEntry(truncateAfterOplogEntryBSON.value()));
auto truncateAfterOplogEntryTs = truncateAfterOpTime.getTimestamp();
auto truncateAfterRecordId = RecordId(truncateAfterOplogEntryTs.asULL());
invariant(truncateAfterRecordId <= RecordId(truncateAfterTimestamp.asULL()),
str::stream() << "Should have found a oplog entry timestamp lte to "
<< truncateAfterTimestamp.toString() << ", but instead found "
- << redact(truncateAfterOplogEntryBSON.get()) << " with timestamp "
+ << redact(truncateAfterOplogEntryBSON.value()) << " with timestamp "
<< Timestamp(truncateAfterRecordId.getLong()).toString());
// Truncate the oplog AFTER the oplog entry found to be <= truncateAfterTimestamp.
@@ -903,9 +903,9 @@ void ReplicationRecoveryImpl::_truncateOplogIfNeededAndThenClearOplogTruncateAft
"The oplog truncation point is equal to or earlier than the stable timestamp, so "
"truncating after the stable timestamp instead",
"truncatePoint"_attr = truncatePoint,
- "stableTimestamp"_attr = (*stableTimestamp).get());
+ "stableTimestamp"_attr = (*stableTimestamp).value());
- truncatePoint = (*stableTimestamp).get();
+ truncatePoint = (*stableTimestamp).value();
}
LOGV2(21557,
@@ -944,7 +944,7 @@ Timestamp ReplicationRecoveryImpl::_adjustStartPointIfNecessary(OperationContext
}
auto adjustmentOpTime =
- fassert(5466602, OpTime::parseFromOplogEntry(adjustmentOplogEntryBSON.get()));
+ fassert(5466602, OpTime::parseFromOplogEntry(adjustmentOplogEntryBSON.value()));
auto adjustmentTimestamp = adjustmentOpTime.getTimestamp();
if (startPoint != adjustmentTimestamp) {
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index b63f5a704a6..e0dc5f6a020 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -926,7 +926,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
// We call BSONElement::wrap() on each _id element to create a new BSONObj with an owned
// buffer, as the underlying storage may be gone when we access this map to write
// rollback files.
- _observerInfo.rollbackDeletedIdsMap[uuid.get()].insert(idElem.wrap());
+ _observerInfo.rollbackDeletedIdsMap[uuid.value()].insert(idElem.wrap());
const auto cmdName = opType == OpTypeEnum::kInsert ? kInsertCmdName : kUpdateCmdName;
++_observerInfo.rollbackCommandCounts[cmdName];
}
@@ -955,16 +955,16 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
}
// Rolling back an insert must decrement the count by 1.
- _countDiffs[oplogEntry.getUuid().get()] -= 1;
+ _countDiffs[oplogEntry.getUuid().value()] -= 1;
} else if (opType == OpTypeEnum::kDelete) {
// Rolling back a delete must increment the count by 1.
- _countDiffs[oplogEntry.getUuid().get()] += 1;
+ _countDiffs[oplogEntry.getUuid().value()] += 1;
} else if (opType == OpTypeEnum::kCommand) {
if (oplogEntry.getCommandType() == OplogEntry::CommandType::kCreate) {
// If we roll back a create, then we do not need to change the size of that uuid.
- _countDiffs.erase(oplogEntry.getUuid().get());
- _pendingDrops.erase(oplogEntry.getUuid().get());
- _newCounts.erase(oplogEntry.getUuid().get());
+ _countDiffs.erase(oplogEntry.getUuid().value());
+ _pendingDrops.erase(oplogEntry.getUuid().value());
+ _newCounts.erase(oplogEntry.getUuid().value());
} else if (oplogEntry.getCommandType() == OplogEntry::CommandType::kImportCollection) {
auto importEntry = mongo::ImportCollectionOplogEntry::parse(
IDLParserContext("importCollectionOplogEntry"), oplogEntry.getObject());
@@ -988,7 +988,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr
// collection is managed by the storage engine and is not accessible through the UUID
// catalog.
// Adding a _newCounts entry ensures that the count will be set after the rollback.
- const auto uuid = oplogEntry.getUuid().get();
+ const auto uuid = oplogEntry.getUuid().value();
invariant(_countDiffs.find(uuid) == _countDiffs.end(),
str::stream() << "Unexpected existing count diff for " << uuid.toString()
<< " op: " << redact(oplogEntry.toBSONForLogging()));
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index 7d296aa3208..6944d6a14b4 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -176,7 +176,7 @@ std::pair<BSONObj, RecordId> RollbackTest::makeCommandOp(Timestamp ts,
bob.append("ts", ts);
bob.append("op", "c");
if (uuid) { // Not all ops have UUID fields.
- uuid.get().appendToBuilder(&bob, "ui");
+ uuid.value().appendToBuilder(&bob, "ui");
}
bob.append("ns", nss);
bob.append("o", cmdObj);
@@ -196,7 +196,7 @@ std::pair<BSONObj, RecordId> RollbackTest::makeCommandOpForApplyOps(boost::optio
BSONObjBuilder bob;
bob.append("op", "c");
if (uuid) { // Not all ops have UUID fields.
- uuid.get().appendToBuilder(&bob, "ui");
+ uuid.value().appendToBuilder(&bob, "ui");
}
bob.append("ns", nss);
bob.append("o", cmdObj);
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 66c05c2ef0f..10f4e85f720 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -309,7 +309,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
txnBob.append("_id", sessionId->toBSON());
auto txnObj = txnBob.obj();
- DocID txnDoc(txnObj, txnObj.firstElement(), transactionTableUUID.get());
+ DocID txnDoc(txnObj, txnObj.firstElement(), transactionTableUUID.value());
txnDoc.ns = NamespaceString::kSessionTransactionsTableNamespace.ns();
fixUpInfo.docsToRefetch.insert(txnDoc);
@@ -400,7 +400,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o
"Missing index name in dropIndexes operation on rollback.");
}
- BSONObj obj2 = oplogEntry.getObject2().get().getOwned();
+ BSONObj obj2 = oplogEntry.getObject2().value().getOwned();
// Inserts the index name and the index spec of the index to be created into the map
// of index name and index specs that need to be created for the given collection.
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 384226d7675..045bf46d458 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -354,7 +354,7 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx,
} else {
autoColl.emplace(opCtx, nsOrUUID, MODE_IX);
auto collectionResult = getCollection(
- autoColl.get(), nsOrUUID, "The collection must exist before inserting documents.");
+ autoColl.value(), nsOrUUID, "The collection must exist before inserting documents.");
if (!collectionResult.isOK()) {
return collectionResult.getStatus();
}
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index 359cb986fb9..3dc86806abe 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -431,7 +431,7 @@ public:
printStackTrace();
FAIL("Did not find any documents.");
}
- return optRecord.get().data.toBson();
+ return optRecord.value().data.toBson();
}
std::shared_ptr<BSONCollectionCatalogEntry::MetaData> getMetaDataAtTime(
diff --git a/src/mongo/db/repl/tenant_database_cloner_test.cpp b/src/mongo/db/repl/tenant_database_cloner_test.cpp
index 4b089101465..5e59d0fcc1e 100644
--- a/src/mongo/db/repl/tenant_database_cloner_test.cpp
+++ b/src/mongo/db/repl/tenant_database_cloner_test.cpp
@@ -72,7 +72,7 @@ protected:
_storageInterface.insertDocumentsFn = [this](OperationContext* opCtx,
const NamespaceStringOrUUID& nsOrUUID,
const std::vector<InsertStatement>& ops) {
- const auto collInfo = &_collections[nsOrUUID.nss().get()];
+ const auto collInfo = &_collections[nsOrUUID.nss().value()];
collInfo->numDocsInserted += ops.size();
return Status::OK();
};
diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
index 87c380f153f..0f30ffe4ac5 100644
--- a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
+++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp
@@ -373,20 +373,20 @@ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx) {
case TenantMigrationDonorStateEnum::kBlocking:
invariant(doc.getBlockTimestamp());
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
break;
case TenantMigrationDonorStateEnum::kCommitted:
invariant(doc.getBlockTimestamp());
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
- mtab->setCommitOpTime(opCtx, doc.getCommitOrAbortOpTime().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
+ mtab->setCommitOpTime(opCtx, doc.getCommitOrAbortOpTime().value());
break;
case TenantMigrationDonorStateEnum::kAborted:
if (doc.getBlockTimestamp()) {
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
}
- mtab->setAbortOpTime(opCtx, doc.getCommitOrAbortOpTime().get());
+ mtab->setAbortOpTime(opCtx, doc.getCommitOrAbortOpTime().value());
break;
case TenantMigrationDonorStateEnum::kUninitialized:
MONGO_UNREACHABLE;
@@ -426,7 +426,7 @@ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx) {
case TenantMigrationRecipientStateEnum::kConsistent:
case TenantMigrationRecipientStateEnum::kDone:
if (doc.getRejectReadsBeforeTimestamp()) {
- mtab->startRejectingReadsBefore(doc.getRejectReadsBeforeTimestamp().get());
+ mtab->startRejectingReadsBefore(doc.getRejectReadsBeforeTimestamp().value());
}
break;
case TenantMigrationRecipientStateEnum::kUninitialized:
@@ -450,7 +450,7 @@ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx) {
auto optionalTenants = doc.getTenantIds();
invariant(optionalTenants);
- for (const auto& tenantId : optionalTenants.get()) {
+ for (const auto& tenantId : optionalTenants.value()) {
invariant(doc.getRecipientConnectionString());
auto mtab = std::make_shared<TenantMigrationDonorAccessBlocker>(
opCtx->getServiceContext(),
@@ -467,20 +467,20 @@ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx) {
case ShardSplitDonorStateEnum::kBlocking:
invariant(doc.getBlockTimestamp());
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
break;
case ShardSplitDonorStateEnum::kCommitted:
invariant(doc.getBlockTimestamp());
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
- mtab->setCommitOpTime(opCtx, doc.getCommitOrAbortOpTime().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
+ mtab->setCommitOpTime(opCtx, doc.getCommitOrAbortOpTime().value());
break;
case ShardSplitDonorStateEnum::kAborted:
if (doc.getBlockTimestamp()) {
mtab->startBlockingWrites();
- mtab->startBlockingReadsAfter(doc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(doc.getBlockTimestamp().value());
}
- mtab->setAbortOpTime(opCtx, doc.getCommitOrAbortOpTime().get());
+ mtab->setAbortOpTime(opCtx, doc.getCommitOrAbortOpTime().value());
break;
case ShardSplitDonorStateEnum::kUninitialized:
MONGO_UNREACHABLE;
diff --git a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
index c282a86c15b..8ccc3270c27 100644
--- a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp
@@ -408,7 +408,7 @@ void TenantMigrationDonorAccessBlocker::appendInfoForServerStatus(BSONObjBuilder
builder->append("state", _state.toString());
if (_blockTimestamp) {
- builder->append("blockTimestamp", _blockTimestamp.get());
+ builder->append("blockTimestamp", _blockTimestamp.value());
}
if (_commitOpTime) {
builder->append("commitOpTime", _commitOpTime->toBSON());
diff --git a/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp b/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp
index a96ce884255..ba4e5ddb3ab 100644
--- a/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp
@@ -124,7 +124,7 @@ void onTransitionToBlocking(OperationContext* opCtx,
// Both primaries and secondaries call startBlockingReadsAfter in the op observer, since
// startBlockingReadsAfter just needs to be called before the "start blocking" write's oplog
// hole is filled.
- mtab->startBlockingReadsAfter(donorStateDoc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(donorStateDoc.getBlockTimestamp().value());
}
/**
@@ -139,7 +139,7 @@ void onTransitionToCommitted(OperationContext* opCtx,
opCtx->getServiceContext(), donorStateDoc.getTenantId());
invariant(mtab);
- mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().get());
+ mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value());
}
/**
@@ -153,7 +153,7 @@ void onTransitionToAborted(OperationContext* opCtx,
auto mtab = tenant_migration_access_blocker::getTenantMigrationDonorAccessBlocker(
opCtx->getServiceContext(), donorStateDoc.getTenantId());
invariant(mtab);
- mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().get());
+ mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value());
}
/**
@@ -189,7 +189,7 @@ public:
// here that the commit or abort opTime has been majority committed (guaranteed
// to be true since by design the donor never marks its state doc as garbage
// collectable before the migration decision is majority committed).
- mtab->onMajorityCommitPointUpdate(_donorStateDoc.getCommitOrAbortOpTime().get());
+ mtab->onMajorityCommitPointUpdate(_donorStateDoc.getCommitOrAbortOpTime().value());
}
if (_donorStateDoc.getState() == TenantMigrationDonorStateEnum::kAborted) {
@@ -340,7 +340,7 @@ void TenantMigrationDonorOpObserver::onDelete(OperationContext* opCtx,
if (tenantIdToDeleteDecoration(opCtx)) {
opCtx->recoveryUnit()->onCommit([opCtx](boost::optional<Timestamp>) {
TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext())
- .remove(tenantIdToDeleteDecoration(opCtx).get(),
+ .remove(tenantIdToDeleteDecoration(opCtx).value(),
TenantMigrationAccessBlocker::BlockerType::kDonor);
});
}
@@ -348,7 +348,8 @@ void TenantMigrationDonorOpObserver::onDelete(OperationContext* opCtx,
if (migrationIdToDeleteDecoration(opCtx)) {
opCtx->recoveryUnit()->onCommit([opCtx](boost::optional<Timestamp>) {
TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext())
- .removeShardMergeDonorAccessBlocker(migrationIdToDeleteDecoration(opCtx).get());
+ .removeShardMergeDonorAccessBlocker(
+ migrationIdToDeleteDecoration(opCtx).value());
});
}
}
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index 77f8de0fe8f..e3b6b38bba4 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -298,7 +298,7 @@ TenantMigrationDonorService::Instance::Instance(ServiceContext* const serviceCon
// The migration was resumed on stepup.
if (_stateDoc.getAbortReason()) {
- auto abortReasonBson = _stateDoc.getAbortReason().get();
+ auto abortReasonBson = _stateDoc.getAbortReason().value();
auto code = abortReasonBson["code"].Int();
auto errmsg = abortReasonBson["errmsg"].String();
_abortReason = Status(ErrorCodes::Error(code), errmsg);
@@ -403,7 +403,7 @@ boost::optional<BSONObj> TenantMigrationDonorService::Instance::reportForCurrent
bob.append("readPreference", _readPreference.toInnerBSON());
bob.append("receivedCancellation", _abortRequested);
if (_durableState) {
- bob.append("lastDurableState", _durableState.get().state);
+ bob.append("lastDurableState", _durableState.value().state);
} else {
bob.appendUndefined("lastDurableState");
}
@@ -589,7 +589,7 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
invariant(_abortReason);
BSONObjBuilder bob;
- _abortReason.get().serializeErrorToBSON(&bob);
+ _abortReason.value().serializeErrorToBSON(&bob);
_stateDoc.setAbortReason(bob.obj());
break;
}
@@ -626,7 +626,7 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
});
invariant(updateOpTime);
- return updateOpTime.get();
+ return updateOpTime.value();
})
.until([](StatusWith<repl::OpTime> swOpTime) { return swOpTime.getStatus().isOK(); })
.withBackoffBetweenIterations(kExponentialBackoff)
diff --git a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
index 15e1c997f26..165fd0178ed 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp
@@ -201,7 +201,7 @@ void TenantMigrationRecipientAccessBlocker::appendInfoForServerStatus(
builder->append("state", _state.toString());
if (_rejectBeforeTimestamp) {
- builder->append("rejectBeforeTimestamp", _rejectBeforeTimestamp.get());
+ builder->append("rejectBeforeTimestamp", _rejectBeforeTimestamp.value());
}
builder->append("ttlIsBlocked", _ttlIsBlocked);
if (_protocol == MigrationProtocolEnum::kMultitenantMigrations) {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
index 77b16a52436..9a7445bee1e 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
@@ -94,12 +94,12 @@ void onSetRejectReadsBeforeTimestamp(OperationContext* opCtx,
auto mtab = tenant_migration_access_blocker::getTenantMigrationRecipientAccessBlocker(
opCtx->getServiceContext(), recipientStateDoc.getTenantId());
invariant(mtab);
- mtab->startRejectingReadsBefore(recipientStateDoc.getRejectReadsBeforeTimestamp().get());
+ mtab->startRejectingReadsBefore(recipientStateDoc.getRejectReadsBeforeTimestamp().value());
} else {
tenant_migration_access_blocker::startRejectingReadsBefore(
opCtx,
recipientStateDoc.getId(),
- recipientStateDoc.getRejectReadsBeforeTimestamp().get());
+ recipientStateDoc.getRejectReadsBeforeTimestamp().value());
}
}
} // namespace
@@ -285,7 +285,7 @@ void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx,
if (nss == NamespaceString::kTenantMigrationRecipientsNamespace &&
!tenant_migration_access_blocker::inRecoveryMode(opCtx)) {
if (tenantIdToDeleteDecoration(opCtx)) {
- auto tenantId = tenantIdToDeleteDecoration(opCtx).get();
+ auto tenantId = tenantIdToDeleteDecoration(opCtx).value();
LOGV2_INFO(8423337,
"Removing expired 'multitenant migration' migration",
"tenantId"_attr = tenantId);
@@ -296,7 +296,7 @@ void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx,
}
if (migrationIdToDeleteDecoration(opCtx)) {
- auto migrationId = migrationIdToDeleteDecoration(opCtx).get();
+ auto migrationId = migrationIdToDeleteDecoration(opCtx).value();
LOGV2_INFO(6114101,
"Removing expired 'shard merge' migration",
"migrationId"_attr = migrationId);
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 699428f1599..c84ba067ef8 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -609,7 +609,7 @@ std::unique_ptr<DBClientConnection> TenantMigrationRecipientService::Instance::_
0 /* socketTimeout */,
nullptr /* uri */,
nullptr /* apiParameters */,
- _transientSSLParams ? &_transientSSLParams.get() : nullptr);
+ _transientSSLParams ? &_transientSSLParams.value() : nullptr);
if (!swClientBase.isOK()) {
LOGV2_ERROR(4880400,
"Failed to connect to migration donor",
@@ -1546,7 +1546,7 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart
{
stdx::lock_guard lk(_mutex);
invariant(_stateDoc.getStartFetchingDonorOpTime());
- startFetchingTimestamp = _stateDoc.getStartFetchingDonorOpTime().get().getTimestamp();
+ startFetchingTimestamp = _stateDoc.getStartFetchingDonorOpTime().value().getTimestamp();
}
LOGV2_DEBUG(5535300,
@@ -1686,7 +1686,8 @@ void TenantMigrationRecipientService::Instance::_startOplogFetcher() {
// If the oplog buffer already contains fetched documents, we must be resuming a
// migration.
if (auto topOfOplogBuffer = _donorOplogBuffer->lastObjectPushed(opCtx.get())) {
- startFetchOpTime = uassertStatusOK(OpTime::parseFromOplogEntry(topOfOplogBuffer.get()));
+ startFetchOpTime =
+ uassertStatusOK(OpTime::parseFromOplogEntry(topOfOplogBuffer.value()));
resumingFromOplogBuffer = true;
}
}
@@ -2032,7 +2033,7 @@ TenantMigrationRecipientService::Instance::_waitForDataToBecomeConsistent() {
}
return _tenantOplogApplier->getNotificationForOpTime(
- _stateDoc.getDataConsistentStopDonorOpTime().get());
+ _stateDoc.getDataConsistentStopDonorOpTime().value());
}
SemiFuture<void> TenantMigrationRecipientService::Instance::_persistConsistentState() {
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index 9c9213acefa..47b79d53319 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -124,7 +124,7 @@ MutableOplogEntry makeNoOpOplogEntry(OpTime opTime,
oplogEntry.setObject2(o);
oplogEntry.setWallClockTime(Date_t::now());
if (migrationUUID) {
- oplogEntry.setFromTenantMigration(migrationUUID.get());
+ oplogEntry.setFromTenantMigration(migrationUUID.value());
}
return oplogEntry;
}
@@ -2603,7 +2603,7 @@ TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationRecipientAddResumeTok
OplogEntry noopEntry(noopDoc);
ASSERT_TRUE(noopEntry.getOpType() == OpTypeEnum::kNoop);
ASSERT_EQUALS(noopEntry.getTimestamp(), resumeToken2);
- ASSERT_EQUALS(noopEntry.getTerm().get(), -1);
+ ASSERT_EQUALS(noopEntry.getTerm().value(), -1);
ASSERT_EQUALS(noopEntry.getNss(), NamespaceString(""));
}
@@ -2755,7 +2755,8 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientForgetMigration_WaitUntilSt
ASSERT_TRUE(doc.getReadPreference().equals(ReadPreferenceSetting(ReadPreference::PrimaryOnly)));
ASSERT_TRUE(doc.getState() == TenantMigrationRecipientStateEnum::kDone);
ASSERT_TRUE(doc.getExpireAt() != boost::none);
- ASSERT_TRUE(doc.getExpireAt().get() > opCtx->getServiceContext()->getFastClockSource()->now());
+ ASSERT_TRUE(doc.getExpireAt().value() >
+ opCtx->getServiceContext()->getFastClockSource()->now());
ASSERT_TRUE(doc.getStartApplyingDonorOpTime() == boost::none);
ASSERT_TRUE(doc.getStartFetchingDonorOpTime() == boost::none);
ASSERT_TRUE(doc.getDataConsistentStopDonorOpTime() == boost::none);
@@ -2820,7 +2821,8 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientForgetMigration_AfterStartO
ASSERT_TRUE(doc.getReadPreference().equals(ReadPreferenceSetting(ReadPreference::PrimaryOnly)));
ASSERT_TRUE(doc.getState() == TenantMigrationRecipientStateEnum::kDone);
ASSERT_TRUE(doc.getExpireAt() != boost::none);
- ASSERT_TRUE(doc.getExpireAt().get() > opCtx->getServiceContext()->getFastClockSource()->now());
+ ASSERT_TRUE(doc.getExpireAt().value() >
+ opCtx->getServiceContext()->getFastClockSource()->now());
checkStateDocPersisted(opCtx.get(), instance.get());
}
@@ -2913,7 +2915,7 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientForgetMigration_AfterConsis
doc.getReadPreference().equals(ReadPreferenceSetting(ReadPreference::PrimaryOnly)));
ASSERT_TRUE(doc.getState() == TenantMigrationRecipientStateEnum::kDone);
ASSERT_TRUE(doc.getExpireAt() != boost::none);
- ASSERT_TRUE(doc.getExpireAt().get() >
+ ASSERT_TRUE(doc.getExpireAt().value() >
opCtx->getServiceContext()->getFastClockSource()->now());
checkStateDocPersisted(opCtx.get(), instance.get());
}
@@ -2998,7 +3000,7 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientForgetMigration_AfterFail)
doc.getReadPreference().equals(ReadPreferenceSetting(ReadPreference::PrimaryOnly)));
ASSERT_TRUE(doc.getState() == TenantMigrationRecipientStateEnum::kDone);
ASSERT_TRUE(doc.getExpireAt() != boost::none);
- ASSERT_TRUE(doc.getExpireAt().get() >
+ ASSERT_TRUE(doc.getExpireAt().value() >
opCtx->getServiceContext()->getFastClockSource()->now());
checkStateDocPersisted(opCtx.get(), instance.get());
}
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 7500dac2d55..dbe6dd12b74 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -173,7 +173,7 @@ Status _applyTransactionFromOplogChain(OperationContext* opCtx,
repl::OplogEntry getPreviousOplogEntry(OperationContext* opCtx, const repl::OplogEntry& entry) {
const auto prevOpTime = entry.getPrevWriteOpTimeInTransaction();
invariant(prevOpTime);
- TransactionHistoryIterator iter(prevOpTime.get());
+ TransactionHistoryIterator iter(prevOpTime.value());
invariant(iter.hasNext());
const auto prevOplogEntry = iter.next(opCtx);
@@ -299,7 +299,7 @@ std::pair<std::vector<OplogEntry>, bool> _readTransactionOperationsFromOplogChai
const auto lastEntryWrittenToOplogOpTime = oldestEntryInBatch.getPrevWriteOpTimeInTransaction();
invariant(lastEntryWrittenToOplogOpTime < lastEntryInTxn.getOpTime());
- TransactionHistoryIterator iter(lastEntryWrittenToOplogOpTime.get());
+ TransactionHistoryIterator iter(lastEntryWrittenToOplogOpTime.value());
// If we started with a prepared commit, we want to forget about that operation and move onto
// the prepare.
diff --git a/src/mongo/db/repl_index_build_state.cpp b/src/mongo/db/repl_index_build_state.cpp
index 1c04166ba4e..0a22f8a48f3 100644
--- a/src/mongo/db/repl_index_build_state.cpp
+++ b/src/mongo/db/repl_index_build_state.cpp
@@ -202,8 +202,8 @@ void ReplIndexBuildState::onOplogAbort(OperationContext* opCtx, const NamespaceS
LOGV2(3856206,
"Aborting index build from oplog entry",
"buildUUID"_attr = buildUUID,
- "abortTimestamp"_attr = _indexBuildState.getTimestamp().get(),
- "abortReason"_attr = _indexBuildState.getAbortReason().get(),
+ "abortTimestamp"_attr = _indexBuildState.getTimestamp().value(),
+ "abortReason"_attr = _indexBuildState.getAbortReason().value(),
"collectionUUID"_attr = collectionUUID);
}
@@ -436,7 +436,7 @@ Status ReplIndexBuildState::onConflictWithNewIndexBuild(const ReplIndexBuildStat
}
if (existingIndexBuildState.isAborted()) {
if (auto abortReason = existingIndexBuildState.getAbortReason()) {
- ss << ", abort reason: " << abortReason.get();
+ ss << ", abort reason: " << abortReason.value();
}
aborted = true;
}
diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp
index 61646702239..c8c0343eda0 100644
--- a/src/mongo/db/s/active_migrations_registry.cpp
+++ b/src/mongo/db/s/active_migrations_registry.cpp
@@ -231,8 +231,8 @@ BSONObj ActiveMigrationsRegistry::getActiveMigrationStatusReport(OperationContex
// desireable for reporting, and then diagnosing, migrations that are stuck.
if (nss) {
// Lock the collection so nothing changes while we're getting the migration report.
- AutoGetCollection autoColl(opCtx, nss.get(), MODE_IS);
- auto csr = CollectionShardingRuntime::get(opCtx, nss.get());
+ AutoGetCollection autoColl(opCtx, nss.value(), MODE_IS);
+ auto csr = CollectionShardingRuntime::get(opCtx, nss.value());
auto csrLock = CollectionShardingRuntime::CSRLock::lockShared(opCtx, csr);
if (auto msm = MigrationSourceManager::get(csr, csrLock)) {
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 6822de95f8a..0db96d41a27 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -119,7 +119,7 @@ public:
BSONObj toBSON() const {
BSONObjBuilder builder;
builder.append("executionTimeMillis", _executionTimer.millis());
- builder.append("errorOccurred", _errMsg.is_initialized());
+ builder.append("errorOccurred", _errMsg.has_value());
if (_errMsg) {
builder.append("errmsg", *_errMsg);
@@ -551,7 +551,7 @@ void Balancer::_consumeActionStreamLoop() {
_newInfoOnStreamingActions.store(false);
auto nextAction = selectedStream->getNextStreamingAction(opCtx.get());
- if ((streamDrained = !nextAction.is_initialized())) {
+ if ((streamDrained = !nextAction.has_value())) {
continue;
}
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
index 1503fa49b9e..e1899a66eac 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp
@@ -813,7 +813,7 @@ private:
_abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks);
return;
}
- const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().get();
+ const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().value();
_collectionChunks.emplace_back(chunk.getRange(), chunk.getShard(), estimatedChunkSize);
}
@@ -1189,7 +1189,7 @@ public:
// with no estimated size.
for (const auto& chunk : collectionChunks) {
auto chunkSize = chunk.getEstimatedSizeBytes();
- if (!chunkSize || (uint64_t)chunkSize.get() > maxChunkSizeBytes) {
+ if (!chunkSize || (uint64_t)chunkSize.value() > maxChunkSizeBytes) {
pendingActionsByShards[chunk.getShard()].rangesToFindSplitPoints.emplace_back(
chunk.getMin(), chunk.getMax());
}
@@ -1668,10 +1668,10 @@ void BalancerDefragmentationPolicyImpl::_initializeCollectionState(WithLock,
return;
}
auto phaseToBuild = coll.getDefragmentationPhase()
- ? coll.getDefragmentationPhase().get()
+ ? coll.getDefragmentationPhase().value()
: DefragmentationPhaseEnum::kMergeAndMeasureChunks;
- auto collectionPhase = _transitionPhases(
- opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().is_initialized());
+ auto collectionPhase =
+ _transitionPhases(opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().has_value());
while (collectionPhase && collectionPhase->isComplete() &&
MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail())) {
collectionPhase = _transitionPhases(opCtx, coll, collectionPhase->getNextPhase());
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
index 1512beb054c..ee56d9f32ea 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
@@ -254,7 +254,7 @@ TEST_F(BalancerDefragmentationPolicyTest,
setDefaultClusterStats();
_defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll);
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction);
auto resp = StatusWith(DataSizeResponse(2000, 4));
@@ -354,7 +354,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneUserCancellationBeginsPhas
// Defragmentation should transition to phase 3
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kSplitChunks);
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
auto splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction);
}
@@ -373,7 +373,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestNonRetriableErrorRebuildsCurrentPh
ASSERT_TRUE(_defragmentationPolicy.isDefragmentingCollection(coll.getUuid()));
verifyExpectedDefragmentationPhaseOndisk(DefragmentationPhaseEnum::kMergeAndMeasureChunks);
// 2. The action returned by the stream should be now an actionable DataSizeCommand...
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction);
// 3. with the expected content
ASSERT_EQ(coll.getNss(), dataSizeAction.nss);
@@ -409,8 +409,8 @@ TEST_F(BalancerDefragmentationPolicyTest,
// Phase 1 should restart.
nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
- ASSERT_TRUE(nextAction2.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
+ ASSERT_TRUE(nextAction2.has_value());
DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction);
DataSizeInfo dataSizeAction2 = stdx::get<DataSizeInfo>(*nextAction2);
}
@@ -470,7 +470,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeSuccessfulMerge
ASSERT_TRUE(nextAction == boost::none);
_defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll);
nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
MergeInfo mergeInfoAction = stdx::get<MergeInfo>(*nextAction);
ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMin(), kKeyAtMin);
ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMax(), kKeyAtMax);
@@ -478,7 +478,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAcknowledgeSuccessfulMerge
ASSERT_TRUE(nextAction == boost::none);
_defragmentationPolicy.applyActionResult(operationContext(), mergeInfoAction, Status::OK());
nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
DataSizeInfo dataSizeAction = stdx::get<DataSizeInfo>(*nextAction);
ASSERT_EQ(mergeInfoAction.nss, dataSizeAction.nss);
ASSERT_BSONOBJ_EQ(mergeInfoAction.chunkRange.getMin(), dataSizeAction.chunkRange.getMin());
@@ -514,9 +514,9 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) {
_defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll);
// Test
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
auto nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction2.is_initialized());
+ ASSERT_TRUE(nextAction2.has_value());
// Verify the content of the received merge actions
// (Note: there is no guarantee on the order provided by the stream)
MergeInfo mergeAction = stdx::get<MergeInfo>(*nextAction);
@@ -533,7 +533,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) {
ASSERT_BSONOBJ_EQ(mergeAction.chunkRange.getMax(), kKeyAtMax);
}
auto nextAction3 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_FALSE(nextAction3.is_initialized());
+ ASSERT_FALSE(nextAction3.has_value());
}
TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) {
@@ -554,11 +554,11 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) {
_defragmentationPolicy.startCollectionDefragmentation(operationContext(), coll);
// Three actions (in an unspecified order) should be immediately available.
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
auto nextAction2 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction2.is_initialized());
+ ASSERT_TRUE(nextAction2.has_value());
auto nextAction3 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction3.is_initialized());
+ ASSERT_TRUE(nextAction3.has_value());
// Verify their content of the received merge actions
uint8_t timesLowerRangeMergeFound = 0;
uint8_t timesUpperRangeMergeFound = 0;
@@ -595,7 +595,7 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) {
ASSERT_EQ(1, timesMiddleRangeDataSizeFound);
auto nextAction4 = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_FALSE(nextAction4.is_initialized());
+ ASSERT_FALSE(nextAction4.has_value());
}
// Phase 2 tests.
@@ -615,7 +615,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoMissingDataSizeRestartsPha
_defragmentationPolicy.selectChunksToMove(operationContext(), &usedShards);
ASSERT_EQ(0, pendingMigrations.size());
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
auto dataSizeAction = stdx::get<DataSizeInfo>(*nextAction);
}
@@ -660,7 +660,7 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoChunkCanBeMovedAndMergedWi
_defragmentationPolicy.applyActionResult(operationContext(), moveAction, Status::OK());
nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
usedShards.clear();
pendingMigrations = _defragmentationPolicy.selectChunksToMove(operationContext(), &usedShards);
ASSERT_TRUE(pendingMigrations.empty());
@@ -777,7 +777,7 @@ TEST_F(BalancerDefragmentationPolicyTest, SingleLargeChunkCausesAutoSplitAndSpli
// The new action returned by the stream should be an actionable AutoSplitVector command...
nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
AutoSplitVectorInfo splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction);
// with the expected content
ASSERT_EQ(coll.getNss(), splitVectorAction.nss);
@@ -797,7 +797,7 @@ TEST_F(BalancerDefragmentationPolicyTest, CollectionMaxChunkSizeIsUsedForPhase3)
auto nextAction = _defragmentationPolicy.getNextStreamingAction(operationContext());
// The action returned by the stream should be now an actionable AutoSplitVector command...
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
AutoSplitVectorInfo splitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction);
// with the expected content
ASSERT_EQ(coll.getNss(), splitVectorAction.nss);
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
index 4ea6fa8b63f..e29e39a0f41 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_impl.cpp
@@ -213,7 +213,7 @@ SharedSemiFuture<void> ClusterChunksResizePolicyImpl::activate(OperationContext*
"maxChunkSizeBytes"_attr = defaultMaxChunksSizeBytes);
stdx::lock_guard<Latch> lk(_stateMutex);
- if (!_activeRequestPromise.is_initialized()) {
+ if (!_activeRequestPromise.has_value()) {
invariant(!_unprocessedCollections && _collectionsBeingProcessed.empty());
_defaultMaxChunksSizeBytes = defaultMaxChunksSizeBytes;
invariant(_defaultMaxChunksSizeBytes > 0);
@@ -236,13 +236,13 @@ SharedSemiFuture<void> ClusterChunksResizePolicyImpl::activate(OperationContext*
bool ClusterChunksResizePolicyImpl::isActive() {
stdx::lock_guard<Latch> lk(_stateMutex);
- return _activeRequestPromise.is_initialized();
+ return _activeRequestPromise.has_value();
}
void ClusterChunksResizePolicyImpl::stop() {
{
stdx::lock_guard<Latch> lk(_stateMutex);
- if (_activeRequestPromise.is_initialized()) {
+ if (_activeRequestPromise.has_value()) {
_collectionsBeingProcessed.clear();
_unprocessedCollections = nullptr;
_activeRequestPromise->setFrom(
@@ -261,7 +261,7 @@ StringData ClusterChunksResizePolicyImpl::getName() const {
boost::optional<DefragmentationAction> ClusterChunksResizePolicyImpl::getNextStreamingAction(
OperationContext* opCtx) {
stdx::lock_guard<Latch> lk(_stateMutex);
- if (!_activeRequestPromise.is_initialized()) {
+ if (!_activeRequestPromise.has_value()) {
return boost::none;
}
@@ -296,7 +296,7 @@ boost::optional<DefragmentationAction> ClusterChunksResizePolicyImpl::getNextStr
}
auto nextAction = collState.popNextAction(opCtx);
- if (nextAction.is_initialized()) {
+ if (nextAction.has_value()) {
return nextAction;
}
@@ -378,7 +378,7 @@ void ClusterChunksResizePolicyImpl::applyActionResult(OperationContext* opCtx,
});
stdx::lock_guard<Latch> lk(_stateMutex);
- if (!_activeRequestPromise.is_initialized()) {
+ if (!_activeRequestPromise.has_value()) {
return;
}
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
index 94b6e874cbf..d08b453d4b5 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
@@ -78,13 +78,13 @@ protected:
bool markAsAlreadyProcessed = false,
boost::optional<int64_t> maxChunkSizeBytes = boost::none) {
setupCollection(nss, shardKeyPattern, chunkList);
- if (markAsAlreadyProcessed || maxChunkSizeBytes.is_initialized()) {
+ if (markAsAlreadyProcessed || maxChunkSizeBytes.has_value()) {
BSONObjBuilder updateQueryBuilder;
BSONObjBuilder setObj(updateQueryBuilder.subobjStart("$set"));
if (markAsAlreadyProcessed) {
setObj.append(CollectionType::kChunksAlreadySplitForDowngradeFieldName, true);
}
- if (maxChunkSizeBytes.is_initialized()) {
+ if (maxChunkSizeBytes.has_value()) {
setObj.append(CollectionType::kMaxChunkSizeBytesFieldName, *maxChunkSizeBytes);
}
setObj.done();
@@ -126,7 +126,7 @@ TEST_F(ClusterChunksResizePolicyTest, ResizeAClusterWithNoChunksEndsImmediately)
// evaluated/updated.
auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
- ASSERT_FALSE(nextAction.is_initialized());
+ ASSERT_FALSE(nextAction.has_value());
ASSERT_TRUE(completionFuture.isReady());
ASSERT_FALSE(_clusterChunksResizePolicy.isActive());
}
@@ -234,7 +234,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyGeneratesNoActionAfterReceivingAn
nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
- ASSERT_FALSE(nextAction.is_initialized());
+ ASSERT_FALSE(nextAction.has_value());
// The process of the chunk is completed; being the only entry in config.chunks, the process of
// the whole cluster should also be complete
ASSERT_TRUE(completionFuture.isReady());
@@ -329,7 +329,7 @@ TEST_F(ClusterChunksResizePolicyTest,
_opCtx, *nextAction, Status(ErrorCodes::OperationFailed, "Testing nonRetriable error"));
nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
- ASSERT_TRUE(nextAction.is_initialized());
+ ASSERT_TRUE(nextAction.has_value());
auto reissuedSplitVectorAction = stdx::get<AutoSplitVectorInfo>(*nextAction);
ASSERT_BSONOBJ_EQ(originalSplitVectorAction.keyPattern, reissuedSplitVectorAction.keyPattern);
@@ -364,7 +364,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyCompletesWhenAllActionsAreAcknowl
auto noAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
ASSERT_TRUE(_clusterChunksResizePolicy.isActive());
ASSERT_FALSE(completionFuture.isReady());
- ASSERT_FALSE(noAction.is_initialized());
+ ASSERT_FALSE(noAction.has_value());
// As splitVectors are acknowledged, splitChunk Actions are generated
StatusWith<AutoSplitVectorResponse> splitVectorResult1 =
@@ -397,7 +397,7 @@ TEST_F(ClusterChunksResizePolicyTest, ThePolicyCompletesWhenAllActionsAreAcknowl
ASSERT_EQ(1, numFullyProcessedCollections);
auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
- ASSERT_FALSE(nextAction.is_initialized());
+ ASSERT_FALSE(nextAction.has_value());
ASSERT_FALSE(_clusterChunksResizePolicy.isActive());
ASSERT_TRUE(completionFuture.isReady());
@@ -419,7 +419,7 @@ TEST_F(ClusterChunksResizePolicyTest, CollectionsMarkedAsAlreadyProcessedGetIgno
ASSERT_FALSE(completionFuture.isReady());
auto nextAction = _clusterChunksResizePolicy.getNextStreamingAction(_opCtx);
- ASSERT_FALSE(nextAction.is_initialized());
+ ASSERT_FALSE(nextAction.has_value());
ASSERT_TRUE(completionFuture.isReady());
ASSERT_FALSE(_clusterChunksResizePolicy.isActive());
}
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index 4f1a4ac71b1..4da7deb522f 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -178,10 +178,10 @@ BSONObj MigrationType::toBSON() const {
builder.append(waitForDelete.name(), _waitForDelete);
builder.append(forceJumbo.name(), _forceJumbo);
- if (_maxChunkSizeBytes.is_initialized()) {
+ if (_maxChunkSizeBytes.has_value()) {
builder.appendNumber(maxChunkSizeBytes.name(), static_cast<long long>(*_maxChunkSizeBytes));
}
- if (_secondaryThrottle.is_initialized()) {
+ if (_secondaryThrottle.has_value()) {
_secondaryThrottle->append(&builder);
}
return builder.obj();
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 87a773ed7fd..8107bb3485d 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -71,7 +71,7 @@ boost::optional<ShardKeyPattern> CollectionMetadata::getReshardingKeyIfShouldFor
// Used a switch statement so that the compiler warns anyone who modifies the coordinator
// states enum.
- switch (reshardingFields.get().getState()) {
+ switch (reshardingFields.value().getState()) {
case CoordinatorStateEnum::kUnused:
case CoordinatorStateEnum::kInitializing:
case CoordinatorStateEnum::kBlockingWrites:
diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
index 69c67d89dcb..6700e4c99b1 100644
--- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
+++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
@@ -90,12 +90,12 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state,
*skipCompact = true;
return;
} else if (hasEcocRenameNow) {
- if (ecocRenameUuid.get() != state.getEcocRenameUuid().value()) {
+ if (ecocRenameUuid.value() != state.getEcocRenameUuid().value()) {
LOGV2_DEBUG(6517002,
1,
"Skipping compaction due to mismatched collection uuid",
"ecocRenameNss"_attr = ecocRenameNss,
- "uuid"_attr = ecocRenameUuid.get(),
+ "uuid"_attr = ecocRenameUuid.value(),
"expectedUUID"_attr = state.getEcocRenameUuid().value());
*skipCompact = true;
}
@@ -119,14 +119,14 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state,
"ecocNss"_attr = ecocNss);
*skipCompact = true;
return;
- } else if (ecocUuid.get() != state.getEcocUuid().value()) {
+ } else if (ecocUuid.value() != state.getEcocUuid().value()) {
// The generation of the collection to be compacted is different than the one which was
// requested.
LOGV2_DEBUG(6350491,
1,
"Skipping rename of mismatched collection uuid",
"ecocNss"_attr = ecocNss,
- "uuid"_attr = ecocUuid.get(),
+ "uuid"_attr = ecocUuid.value(),
"expectedUUID"_attr = state.getEcocUuid().value());
*skipCompact = true;
return;
@@ -135,7 +135,7 @@ void doRenameOperation(const CompactStructuredEncryptionDataState& state,
LOGV2(6517004,
"Renaming the encrypted compaction collection",
"ecocNss"_attr = ecocNss,
- "ecocUuid"_attr = ecocUuid.get(),
+ "ecocUuid"_attr = ecocUuid.value(),
"ecocRenameNss"_attr = ecocRenameNss);
// Otherwise, perform the rename so long as the target namespace does not exist.
diff --git a/src/mongo/db/s/config/configsvr_collmod_command.cpp b/src/mongo/db/s/config/configsvr_collmod_command.cpp
index 6d224756002..1b81a326878 100644
--- a/src/mongo/db/s/config/configsvr_collmod_command.cpp
+++ b/src/mongo/db/s/config/configsvr_collmod_command.cpp
@@ -89,8 +89,8 @@ public:
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
const auto& collMod = request().getCollModRequest();
- if (collMod.getTimeseries() && collMod.getTimeseries().get().getGranularity()) {
- auto granularity = collMod.getTimeseries().get().getGranularity().get();
+ if (collMod.getTimeseries() && collMod.getTimeseries().value().getGranularity()) {
+ auto granularity = collMod.getTimeseries().value().getGranularity().value();
ShardingCatalogManager::get(opCtx)->updateTimeSeriesGranularity(
opCtx, ns(), granularity);
}
diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
index 1a094c7db5f..2d4c84b7e8e 100644
--- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
+++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
@@ -111,7 +111,7 @@ public:
!request().getUnique().get_value_or(false));
if (request().getCollation()) {
- auto& collation = request().getCollation().get();
+ auto& collation = request().getCollation().value();
auto collator =
uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collation));
@@ -161,7 +161,7 @@ public:
// etc.
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
reshardCollectionJoinedExistingOperation.pauseWhileSet(opCtx);
- existingInstance.get()->getCoordinatorDocWrittenFuture().get(opCtx);
+ existingInstance.value()->getCoordinatorDocWrittenFuture().get(opCtx);
return existingInstance;
}
@@ -227,7 +227,7 @@ public:
if (instance) {
// There is work to be done in order to have the collection's shard key match the
// requested shard key. Wait until the work is complete.
- instance.get()->getCompletionFuture().get(opCtx);
+ instance.value()->getCompletionFuture().get(opCtx);
}
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 3739fbaf277..b589ccad80d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -152,7 +152,7 @@ BSONObj commitOrAbortTransaction(OperationContext* opCtx,
newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
AuthorizationSession::get(newOpCtx.get()->getClient())
->grantInternalAuthorization(newOpCtx.get()->getClient());
- newOpCtx.get()->setLogicalSessionId(opCtx->getLogicalSessionId().get());
+ newOpCtx.get()->setLogicalSessionId(opCtx->getLogicalSessionId().value());
newOpCtx.get()->setTxnNumber(txnNumber);
BSONObjBuilder bob;
@@ -639,7 +639,7 @@ void ShardingCatalogManager::insertConfigDocuments(OperationContext* opCtx,
}());
if (txnNumber) {
- writeToConfigDocumentInTxn(opCtx, nss, request, txnNumber.get());
+ writeToConfigDocumentInTxn(opCtx, nss, request, txnNumber.value());
} else {
uassertStatusOK(
getStatusFromWriteCommandReply(executeConfigRequest(opCtx, nss, request)));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 78b231c38ab..78af77db426 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -776,7 +776,7 @@ void ShardingCatalogManager::_mergeChunksInTransaction(
mergedChunk.setEstimatedSizeBytes(boost::none);
mergedChunk.setHistory(
- {ChunkHistory(validAfter.get(), mergedChunk.getShard())});
+ {ChunkHistory(validAfter.value(), mergedChunk.getShard())});
entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(
mergedChunk.toConfigBSON()));
@@ -1153,7 +1153,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
int entriesDeleted = 0;
while (newHistory.size() > 1 &&
newHistory.back().getValidAfter().getSecs() + windowInSeconds <
- validAfter.get().getSecs()) {
+ validAfter.value().getSecs()) {
newHistory.pop_back();
++entriesDeleted;
}
@@ -1167,16 +1167,16 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
LOGV2_DEBUG(4778500, 1, "Deleted old chunk history entries", attrs);
}
- if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.get()) {
+ if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter.value()) {
return {ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "The chunk history for chunk with namespace " << nss.ns()
<< " and min key " << migratedChunk.getMin()
<< " is corrupted. The last validAfter "
<< newHistory.back().getValidAfter().toString()
<< " is greater or equal to the new validAfter "
- << validAfter.get().toString()};
+ << validAfter.value().toString()};
}
- newHistory.emplace(newHistory.begin(), ChunkHistory(validAfter.get(), toShard));
+ newHistory.emplace(newHistory.begin(), ChunkHistory(validAfter.value(), toShard));
newMigratedChunk->setHistory(std::move(newHistory));
std::shared_ptr<std::vector<ChunkType>> newSplitChunks =
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index fb045aae478..dfd843674ed 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -508,7 +508,7 @@ void ShardingCatalogManager::configureCollectionBalancing(
updatedFields++;
}
if (defragmentCollection) {
- bool doDefragmentation = defragmentCollection.get();
+ bool doDefragmentation = defragmentCollection.value();
if (doDefragmentation) {
setBuilder.append(CollectionType::kDefragmentCollectionFieldName,
doDefragmentation);
@@ -518,7 +518,7 @@ void ShardingCatalogManager::configureCollectionBalancing(
}
}
if (enableAutoSplitter) {
- bool doSplit = enableAutoSplitter.get();
+ bool doSplit = enableAutoSplitter.value();
setBuilder.append(CollectionType::kNoAutoSplitFieldName, !doSplit);
updatedFields++;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index b54338947b1..7e191f0525a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -155,13 +155,13 @@ TEST_F(RemoveShardTest, RemoveShardAnotherShardDraining) {
auto result = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status);
- ASSERT_EQUALS(false, result.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, result.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard1.getName()));
auto result2 = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard2.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, result2.status);
- ASSERT_EQUALS(false, result2.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, result2.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard2.getName()));
}
@@ -200,7 +200,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
auto result = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, result.status);
- ASSERT_EQUALS(false, result.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, result.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard1.getName()));
}
@@ -245,13 +245,13 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
auto startedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status);
- ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, startedResult.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard1.getName()));
auto ongoingResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status);
- ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value());
ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks);
ASSERT_EQUALS(1, ongoingResult.remainingCounts->jumboChunks);
ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases);
@@ -278,13 +278,13 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingDatabasesRemaining) {
auto startedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status);
- ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, startedResult.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard1.getName()));
auto ongoingResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status);
- ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value());
ASSERT_EQUALS(0, ongoingResult.remainingCounts->totalChunks);
ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks);
ASSERT_EQUALS(1, ongoingResult.remainingCounts->databases);
@@ -332,13 +332,13 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
auto startedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::STARTED, startedResult.status);
- ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, startedResult.remainingCounts.has_value());
ASSERT_TRUE(isDraining(shard1.getName()));
auto ongoingResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::ONGOING, ongoingResult.status);
- ASSERT_EQUALS(true, ongoingResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(true, ongoingResult.remainingCounts.has_value());
ASSERT_EQUALS(3, ongoingResult.remainingCounts->totalChunks);
ASSERT_EQUALS(0, ongoingResult.remainingCounts->jumboChunks);
ASSERT_EQUALS(0, ongoingResult.remainingCounts->databases);
@@ -356,7 +356,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
auto completedResult = ShardingCatalogManager::get(operationContext())
->removeShard(operationContext(), shard1.getName());
ASSERT_EQUALS(RemoveShardProgress::COMPLETED, completedResult.status);
- ASSERT_EQUALS(false, startedResult.remainingCounts.is_initialized());
+ ASSERT_EQUALS(false, startedResult.remainingCounts.has_value());
// Now make sure that the shard no longer exists on config.
auto response = assertGet(shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
index 0d5be679227..95c9557c7f8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp
@@ -110,8 +110,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones)
ASSERT_THROWS_CODE(
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
tags,
2 /* numShards */,
@@ -122,8 +122,8 @@ TEST_F(CreateFirstChunksTest, Split_Disallowed_With_Both_SplitPoints_And_Zones)
ASSERT_THROWS_CODE(
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
tags,
2 /* numShards */,
@@ -164,8 +164,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromSplitVector_Man
auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
{}, /* tags */
3 /* numShards */,
@@ -214,8 +214,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_SplitPoints_FromClient_ManyChun
auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
@@ -250,8 +250,8 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_WithZones_OneChunkToPrimary) {
auto optimization =
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
@@ -296,8 +296,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_SplitPoints_FromClient_ManyChunksD
auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
@@ -346,8 +346,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_NoSplitPoints_OneChunkToPrimary) {
auto optimization = InitialSplitPolicy::calculateOptimizationStrategy(
operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
@@ -382,8 +382,8 @@ TEST_F(CreateFirstChunksTest, EmptyCollection_WithZones_ManyChunksOnFirstZoneSha
auto optimization =
InitialSplitPolicy::calculateOptimizationStrategy(operationContext(),
kShardKeyPattern,
- request.getNumInitialChunks().get(),
- request.getPresplitHashedZones().get(),
+ request.getNumInitialChunks().value(),
+ request.getPresplitHashedZones().value(),
request.getInitialSplitPoints(),
zones,
3 /* numShards */,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index c02b9e38ad0..333ea221c12 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -439,7 +439,7 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
<< " The CWWC on the shard is (" << cwwcOnShard << ")."};
}
- auto cwwcOnConfig = cachedCWWC.get().toBSON();
+ auto cwwcOnConfig = cachedCWWC.value().toBSON();
BSONObjComparator comparator(
BSONObj(), BSONObjComparator::FieldNamesMode::kConsider, nullptr);
if (comparator.compare(cwwcOnShard, cwwcOnConfig) != 0) {
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index aa622b0bd03..58f11c66e02 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -617,7 +617,7 @@ void CreateCollectionCoordinator::_createCollectionAndIndexes(OperationContext*
// We need to implicitly create a timeseries view and underlying bucket collection.
if (_collectionEmpty && _request.getTimeseries()) {
const auto viewName = nss().getTimeseriesViewNamespace();
- auto createCmd = makeCreateCommand(viewName, collation, _request.getTimeseries().get());
+ auto createCmd = makeCreateCommand(viewName, collation, _request.getTimeseries().value());
BSONObj createRes;
DBDirectClient localClient(opCtx);
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 0262090f6af..70cf4e2ea34 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -139,7 +139,7 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
if (collIsSharded) {
invariant(_doc.getCollInfo());
- const auto& coll = _doc.getCollInfo().get();
+ const auto& coll = _doc.getCollInfo().value();
sharding_ddl_util::removeCollAndChunksMetadataFromConfig(
opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern);
}
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index a6676e133b4..72d8ab50647 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -215,7 +215,7 @@ ExecutorFuture<void> DropDatabaseCoordinator::_runImpl(
}
if (_doc.getCollInfo()) {
- const auto& coll = _doc.getCollInfo().get();
+ const auto& coll = _doc.getCollInfo().value();
LOGV2_DEBUG(5494504,
2,
"Completing collection drop from previous primary",
diff --git a/src/mongo/db/s/forwardable_operation_metadata.cpp b/src/mongo/db/s/forwardable_operation_metadata.cpp
index 51c06f80347..458484deb7f 100644
--- a/src/mongo/db/s/forwardable_operation_metadata.cpp
+++ b/src/mongo/db/s/forwardable_operation_metadata.cpp
@@ -57,11 +57,11 @@ void ForwardableOperationMetadata::setOn(OperationContext* opCtx) const {
Client* client = opCtx->getClient();
if (const auto& comment = getComment()) {
stdx::lock_guard<Client> lk(*client);
- opCtx->setComment(comment.get());
+ opCtx->setComment(comment.value());
}
if (const auto& optAuthMetadata = getImpersonatedUserMetadata()) {
- const auto& authMetadata = optAuthMetadata.get();
+ const auto& authMetadata = optAuthMetadata.value();
const auto& users = authMetadata.getUsers();
if (!users.empty() || !authMetadata.getRoles().empty()) {
fassert(ErrorCodes::InternalError, users.size() == 1);
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 1f8755fc12a..79de7a46fd2 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -69,7 +69,7 @@ bool metadataOverlapsRange(const boost::optional<CollectionMetadata>& metadata,
if (!metadata) {
return false;
}
- return metadataOverlapsRange(metadata.get(), range);
+ return metadataOverlapsRange(metadata.value(), range);
}
} // namespace
@@ -105,7 +105,7 @@ public:
// boost::none
const CollectionMetadata& get() {
invariant(_metadataTracker->metadata);
- return _metadataTracker->metadata.get();
+ return _metadataTracker->metadata.value();
}
private:
@@ -178,7 +178,7 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
invariant(!_metadata.empty());
// The active metadata should always be available (not boost::none)
invariant(_metadata.back()->metadata);
- const auto& activeMetadata = _metadata.back()->metadata.get();
+ const auto& activeMetadata = _metadata.back()->metadata.value();
const auto remoteCollVersion = remoteMetadata.getCollVersion();
const auto activeCollVersion = activeMetadata.getCollVersion();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index c751ee64f89..8d6b5050f1e 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -227,8 +227,8 @@ void LogTransactionOperationsForShardingHandler::commit(boost::optional<Timestam
continue;
}
- auto const& minKey = cloner->_args.getMin().get();
- auto const& maxKey = cloner->_args.getMax().get();
+ auto const& minKey = cloner->_args.getMin().value();
+ auto const& maxKey = cloner->_args.getMax().value();
auto const& shardKeyPattern = cloner->_shardKeyPattern;
if (!isInRange(documentKey, minKey, maxKey, shardKeyPattern)) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index 8be0acd90df..376f3d35880 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -316,7 +316,7 @@ public:
WriteConcernOptions majorityWC{WriteConcernOptions::kMajority,
WriteConcernOptions::SyncMode::UNSET,
WriteConcernOptions::kNoTimeout};
- uassertStatusOK(waitForWriteConcern(opCtx, opTime.get(), majorityWC, &wcResult));
+ uassertStatusOK(waitForWriteConcern(opCtx, opTime.value(), majorityWC, &wcResult));
auto rollbackIdAtMigrationInit = [&]() {
AutoGetActiveCloner autoCloner(opCtx, migrationSessionId, false);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 58fe7659d21..d9be4951278 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -369,7 +369,7 @@ bool MigrationDestinationManager::isActive() const {
}
bool MigrationDestinationManager::_isActive(WithLock) const {
- return _sessionId.is_initialized();
+ return _sessionId.has_value();
}
void MigrationDestinationManager::report(BSONObjBuilder& b,
@@ -389,7 +389,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b,
}
stdx::lock_guard<Latch> sl(_mutex);
- b.appendBool("active", _sessionId.is_initialized());
+ b.appendBool("active", _sessionId.has_value());
if (_sessionId) {
b.append("sessionId", _sessionId->toString());
@@ -1372,7 +1372,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
}
migrationutil::persistUpdatedNumOrphans(
- opCtx, _migrationId.get(), *_collectionUuid, batchNumCloned);
+ opCtx, _migrationId.value(), *_collectionUuid, batchNumCloned);
{
stdx::lock_guard<Latch> statsLock(_mutex);
@@ -1821,7 +1821,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
if (changeInOrphans != 0) {
migrationutil::persistUpdatedNumOrphans(
- opCtx, _migrationId.get(), *_collectionUuid, changeInOrphans);
+ opCtx, _migrationId.value(), *_collectionUuid, changeInOrphans);
}
return didAnything;
}
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 08998b7ca32..dd5c1b73c83 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -253,7 +253,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
shardVersion.majorVersion() > 0);
// Compute the max bound in case only `min` is set (moveRange)
- if (!_args.getMax().is_initialized()) {
+ if (!_args.getMax().has_value()) {
// TODO SERVER-64926 do not assume min always present
const auto& min = *_args.getMin();
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 11a00d34f04..00c8bb6a0b9 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -295,7 +295,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// - Locally drop the target collection
// - Locally rename source to target
ShardsvrRenameCollectionParticipant renameCollParticipantRequest(
- fromNss, _doc.getSourceUUID().get());
+ fromNss, _doc.getSourceUUID().value());
renameCollParticipantRequest.setDbName(fromNss.db());
renameCollParticipantRequest.setTargetUUID(_doc.getTargetUUID());
renameCollParticipantRequest.setRenameCollectionRequest(_request);
@@ -367,7 +367,7 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
// On participant shards:
// - Unblock CRUD on participants for both source and destination collections
ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest(
- fromNss, _doc.getSourceUUID().get());
+ fromNss, _doc.getSourceUUID().value());
unblockParticipantRequest.setDbName(fromNss.db());
unblockParticipantRequest.setRenameCollectionRequest(_request);
auto const cmdObj = CommandHelpers::appendMajorityWriteConcern(
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index 6fc4a2bed2f..ee8aebded3e 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -169,7 +169,7 @@ boost::optional<BSONObj> RenameParticipantInstance::reportForCurrentOp(
BSONObjBuilder cmdBob;
if (const auto& optComment = _doc.getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
+ cmdBob.append(optComment.value().firstElement());
}
BSONObjBuilder bob;
bob.append("type", "op");
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index afb1a0f7ab4..28872ee8268 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -65,10 +65,10 @@ void notifyChangeStreamsOnReshardCollectionComplete(OperationContext* opCtx,
cmdBuilder.append("unique", doc.getUnique().get_value_or(false));
if (doc.getNumInitialChunks()) {
- cmdBuilder.append("numInitialChunks", doc.getNumInitialChunks().get());
+ cmdBuilder.append("numInitialChunks", doc.getNumInitialChunks().value());
}
if (doc.getCollation()) {
- cmdBuilder.append("collation", doc.getCollation().get());
+ cmdBuilder.append("collation", doc.getCollation().value());
}
if (doc.getZones()) {
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
index 61eb1a620c4..0d517aed6b1 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
@@ -169,11 +169,11 @@ CoordinatorCommitMonitor::queryRemainingOperationTimeForRecipients() const {
const auto remainingTime = extractOperationRemainingTime(shardResponse.data);
// A recipient shard does not report the remaining operation time when there is no data
// to copy and no oplog entry to apply.
- if (remainingTime && remainingTime.get() < minRemainingTime) {
- minRemainingTime = remainingTime.get();
+ if (remainingTime && remainingTime.value() < minRemainingTime) {
+ minRemainingTime = remainingTime.value();
}
- if (remainingTime && remainingTime.get() > maxRemainingTime) {
- maxRemainingTime = remainingTime.get();
+ if (remainingTime && remainingTime.value() > maxRemainingTime) {
+ maxRemainingTime = remainingTime.value();
}
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
index da457d8eab3..6899a54e4f7 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
@@ -162,7 +162,7 @@ void ReshardingCoordinatorObserver::onReshardingParticipantTransition(
const ReshardingCoordinatorDocument& updatedStateDoc) {
stdx::lock_guard<Latch> lk(_mutex);
if (auto abortReason = getAbortReasonIfExists(updatedStateDoc)) {
- _onAbortOrStepdown(lk, abortReason.get());
+ _onAbortOrStepdown(lk, abortReason.value());
// Don't exit early since the coordinator waits for all participants to report state 'done'.
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 181024995d2..a787cea9775 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -355,13 +355,14 @@ BSONObj createReshardingFieldsUpdateForOriginalNss(
BSONObj setFields =
BSON("uuid" << coordinatorDoc.getReshardingUUID() << "key"
<< coordinatorDoc.getReshardingKey().toBSON() << "lastmodEpoch"
- << newCollectionEpoch.get() << "lastmod"
+ << newCollectionEpoch.value() << "lastmod"
<< opCtx->getServiceContext()->getPreciseClockSource()->now()
<< "reshardingFields.state"
<< CoordinatorState_serializer(coordinatorDoc.getState()).toString()
<< "reshardingFields.recipientFields" << recipientFields.toBSON());
if (newCollectionTimestamp.has_value()) {
- setFields = setFields.addFields(BSON("timestamp" << newCollectionTimestamp.get()));
+ setFields =
+ setFields.addFields(BSON("timestamp" << newCollectionTimestamp.value()));
}
return BSON("$set" << setFields);
@@ -441,7 +442,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx,
case CoordinatorStateEnum::kPreparingToDonate: {
// Insert new entry for the temporary nss into config.collections
auto collType = resharding::createTempReshardingCollectionType(
- opCtx, coordinatorDoc, chunkVersion.get(), collation.get());
+ opCtx, coordinatorDoc, chunkVersion.value(), collation.value());
return BatchedCommandRequest::buildInsertOp(
CollectionType::ConfigNS, std::vector<BSONObj>{collType.toBSON()});
}
@@ -465,11 +466,11 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx,
"reshardingFields.state"
<< CoordinatorState_serializer(nextState).toString()
<< "reshardingFields.recipientFields.approxDocumentsToCopy"
- << coordinatorDoc.getApproxDocumentsToCopy().get()
+ << coordinatorDoc.getApproxDocumentsToCopy().value()
<< "reshardingFields.recipientFields.approxBytesToCopy"
- << coordinatorDoc.getApproxBytesToCopy().get()
+ << coordinatorDoc.getApproxBytesToCopy().value()
<< "reshardingFields.recipientFields.cloneTimestamp"
- << coordinatorDoc.getCloneTimestamp().get()
+ << coordinatorDoc.getCloneTimestamp().value()
<< "reshardingFields.recipientFields.donorShards"
<< donorShardsBuilder.arr() << "lastmod"
<< opCtx->getServiceContext()->getPreciseClockSource()->now())),
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index a849cc5ca87..f7e4aaf9f0e 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -351,7 +351,7 @@ public:
std::move(uuid),
shardKey);
if (reshardingFields)
- collType.setReshardingFields(std::move(reshardingFields.get()));
+ collType.setReshardingFields(std::move(reshardingFields.value()));
if (coordinatorDoc.getState() == CoordinatorStateEnum::kDone ||
coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) {
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index a5236d91c5b..40e6a2296e6 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -128,7 +128,7 @@ protected:
std::move(uuid),
shardKey);
if (reshardingFields)
- collType.setReshardingFields(std::move(reshardingFields.get()));
+ collType.setReshardingFields(std::move(reshardingFields.value()));
if (coordinatorDoc.getState() == CoordinatorStateEnum::kDone ||
coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) {
@@ -262,8 +262,8 @@ protected:
ASSERT(coordinatorDoc.getActive());
if (expectedCoordinatorDoc.getCloneTimestamp()) {
ASSERT(coordinatorDoc.getCloneTimestamp());
- ASSERT_EQUALS(coordinatorDoc.getCloneTimestamp().get(),
- expectedCoordinatorDoc.getCloneTimestamp().get());
+ ASSERT_EQUALS(coordinatorDoc.getCloneTimestamp().value(),
+ expectedCoordinatorDoc.getCloneTimestamp().value());
} else {
ASSERT(!coordinatorDoc.getCloneTimestamp());
}
@@ -271,8 +271,8 @@ protected:
// Confirm the (non)existence of the CoordinatorDocument abortReason.
if (expectedCoordinatorDoc.getAbortReason()) {
ASSERT(coordinatorDoc.getAbortReason());
- ASSERT_BSONOBJ_EQ(coordinatorDoc.getAbortReason().get(),
- expectedCoordinatorDoc.getAbortReason().get());
+ ASSERT_BSONOBJ_EQ(coordinatorDoc.getAbortReason().value(),
+ expectedCoordinatorDoc.getAbortReason().value());
} else {
ASSERT(!coordinatorDoc.getAbortReason());
}
@@ -297,8 +297,8 @@ protected:
ASSERT(onDiskIt != onDiskDonorShards.end());
if (it->getMutableState().getMinFetchTimestamp()) {
ASSERT(onDiskIt->getMutableState().getMinFetchTimestamp());
- ASSERT_EQUALS(onDiskIt->getMutableState().getMinFetchTimestamp().get(),
- it->getMutableState().getMinFetchTimestamp().get());
+ ASSERT_EQUALS(onDiskIt->getMutableState().getMinFetchTimestamp().value(),
+ it->getMutableState().getMinFetchTimestamp().value());
} else {
ASSERT(!onDiskIt->getMutableState().getMinFetchTimestamp());
}
@@ -346,7 +346,7 @@ protected:
return;
ASSERT(onDiskEntry.getReshardingFields());
- auto onDiskReshardingFields = onDiskEntry.getReshardingFields().get();
+ auto onDiskReshardingFields = onDiskEntry.getReshardingFields().value();
ASSERT(onDiskReshardingFields.getReshardingUUID() ==
expectedReshardingFields->getReshardingUUID());
ASSERT(onDiskReshardingFields.getState() == expectedReshardingFields->getState());
@@ -396,10 +396,10 @@ protected:
ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType->getAllowMigrations());
- auto expectedReshardingFields = expectedCollType->getReshardingFields().get();
+ auto expectedReshardingFields = expectedCollType->getReshardingFields().value();
ASSERT(onDiskEntry.getReshardingFields());
- auto onDiskReshardingFields = onDiskEntry.getReshardingFields().get();
+ auto onDiskReshardingFields = onDiskEntry.getReshardingFields().value();
ASSERT_EQUALS(onDiskReshardingFields.getReshardingUUID(),
expectedReshardingFields.getReshardingUUID());
ASSERT(onDiskReshardingFields.getState() == expectedReshardingFields.getState());
@@ -410,8 +410,9 @@ protected:
if (expectedReshardingFields.getRecipientFields()->getCloneTimestamp()) {
ASSERT(onDiskReshardingFields.getRecipientFields()->getCloneTimestamp());
- ASSERT_EQUALS(onDiskReshardingFields.getRecipientFields()->getCloneTimestamp().get(),
- expectedReshardingFields.getRecipientFields()->getCloneTimestamp().get());
+ ASSERT_EQUALS(
+ onDiskReshardingFields.getRecipientFields()->getCloneTimestamp().value(),
+ expectedReshardingFields.getRecipientFields()->getCloneTimestamp().value());
} else {
ASSERT(!onDiskReshardingFields.getRecipientFields()->getCloneTimestamp());
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index e5bd8defdbd..7068918b875 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -405,7 +405,7 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateDonorServiceInstance) {
ASSERT(donorStateMachine != boost::none);
- donorStateMachine.get()->interrupt({ErrorCodes::InternalError, "Shut down for test"});
+ donorStateMachine.value()->interrupt({ErrorCodes::InternalError, "Shut down for test"});
}
TEST_F(ReshardingDonorRecipientCommonTest, CreateRecipientServiceInstance) {
@@ -432,7 +432,7 @@ TEST_F(ReshardingDonorRecipientCommonTest, CreateRecipientServiceInstance) {
ASSERT(recipientStateMachine != boost::none);
- recipientStateMachine.get()->interrupt({ErrorCodes::InternalError, "Shut down for test"});
+ recipientStateMachine.value()->interrupt({ErrorCodes::InternalError, "Shut down for test"});
}
TEST_F(ReshardingDonorRecipientCommonTest,
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index ac4f30d216c..367c594ab7b 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -220,7 +220,7 @@ ReshardingDonorService::DonorStateMachine::DonorStateMachine(
_metadata{donorDoc.getCommonReshardingMetadata()},
_recipientShardIds{donorDoc.getRecipientShards()},
_donorCtx{donorDoc.getMutableState()},
- _donorMetricsToRestore{donorDoc.getMetrics() ? donorDoc.getMetrics().get()
+ _donorMetricsToRestore{donorDoc.getMetrics() ? donorDoc.getMetrics().value()
: ReshardingDonorMetrics()},
_externalState{std::move(externalState)},
_markKilledExecutor(std::make_shared<ThreadPool>([] {
@@ -512,7 +512,7 @@ boost::optional<BSONObj> ReshardingDonorService::DonorStateMachine::reportForCur
void ReshardingDonorService::DonorStateMachine::onReshardingFieldsChanges(
OperationContext* opCtx, const TypeCollectionReshardingFields& reshardingFields) {
if (reshardingFields.getState() == CoordinatorStateEnum::kAborting) {
- abort(reshardingFields.getUserCanceled().get());
+ abort(reshardingFields.getUserCanceled().value());
return;
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index d4f5046e340..cfe93c7e8cf 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -772,7 +772,7 @@ TEST_F(ReshardingDonorServiceTest, RestoreMetricsOnKBlockingWrites) {
donor
->reportForCurrentOp(MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle,
MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle)
- .get();
+ .value();
ASSERT_EQ(currOp.getStringField("donorState"),
DonorState_serializer(DonorStateEnum::kBlockingWrites));
ASSERT_GTE(currOp.getField("totalOperationTimeElapsedSecs").Long(), opTimeDurationSecs);
diff --git a/src/mongo/db/s/resharding/resharding_metrics.cpp b/src/mongo/db/s/resharding/resharding_metrics.cpp
index 7595559831e..69d2e899d84 100644
--- a/src/mongo/db/s/resharding/resharding_metrics.cpp
+++ b/src/mongo/db/s/resharding/resharding_metrics.cpp
@@ -62,7 +62,7 @@ BSONObj createOriginalCommand(const NamespaceString& nss, BSONObj shardKey) {
Date_t readStartTime(const CommonReshardingMetadata& metadata, ClockSource* fallbackSource) {
if (const auto& startTime = metadata.getStartTime()) {
- return startTime.get();
+ return startTime.value();
} else {
return fallbackSource->now();
}
diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp
index 3441f7c1eea..d6321abb360 100644
--- a/src/mongo/db/s/resharding/resharding_op_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp
@@ -53,8 +53,7 @@ std::shared_ptr<ReshardingCoordinatorObserver> getReshardingCoordinatorObserver(
auto instance =
ReshardingCoordinatorService::ReshardingCoordinator::lookup(opCtx, service, reshardingId);
- iassert(
- 5400001, "ReshardingCoordinatorService instance does not exist", instance.is_initialized());
+ iassert(5400001, "ReshardingCoordinatorService instance does not exist", instance.has_value());
return (*instance)->getObserver();
}
@@ -62,7 +61,7 @@ std::shared_ptr<ReshardingCoordinatorObserver> getReshardingCoordinatorObserver(
boost::optional<Timestamp> parseNewMinFetchTimestampValue(const BSONObj& obj) {
auto doc = ReshardingDonorDocument::parse(IDLParserContext("Resharding"), obj);
if (doc.getMutableState().getState() == DonorStateEnum::kDonatingInitialData) {
- return doc.getMutableState().getMinFetchTimestamp().get();
+ return doc.getMutableState().getMinFetchTimestamp().value();
} else {
return boost::none;
}
@@ -114,8 +113,8 @@ boost::optional<Timestamp> _calculatePin(OperationContext* opCtx) {
Timestamp ret = Timestamp::max();
auto cursor = collection->getCursor(opCtx);
for (auto doc = cursor->next(); doc; doc = cursor->next()) {
- if (auto fetchTs = parseNewMinFetchTimestampValue(doc.get().data.toBson()); fetchTs) {
- ret = std::min(ret, fetchTs.get());
+ if (auto fetchTs = parseNewMinFetchTimestampValue(doc.value().data.toBson()); fetchTs) {
+ ret = std::min(ret, fetchTs.value());
}
}
@@ -136,7 +135,7 @@ void _doPin(OperationContext* opCtx) {
}
StatusWith<Timestamp> res = storageEngine->pinOldestTimestamp(
- opCtx, ReshardingHistoryHook::kName.toString(), pin.get(), false);
+ opCtx, ReshardingHistoryHook::kName.toString(), pin.value(), false);
if (!res.isOK()) {
if (replCoord->getReplicationMode() != repl::ReplicationCoordinator::Mode::modeReplSet) {
// The pin has failed, but we're in standalone mode. Ignore the error.
@@ -155,7 +154,7 @@ void _doPin(OperationContext* opCtx) {
// is the most robust path forward. Ignore this case.
LOGV2_WARNING(5384104,
"This node is unable to pin history for resharding",
- "requestedTs"_attr = pin.get());
+ "requestedTs"_attr = pin.value());
} else {
// For recovery cases we also ignore the error. The expected scenario is the pin
// request is no longer needed, but the write to delete the pin was rolled
@@ -164,7 +163,7 @@ void _doPin(OperationContext* opCtx) {
// consequence to observing this error. Ignore this case.
LOGV2(5384103,
"The requested pin was unavailable, but should also be unnecessary",
- "requestedTs"_attr = pin.get());
+ "requestedTs"_attr = pin.value());
}
}
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 9a643ef819e..d0214736e61 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -333,7 +333,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
BSONObj oField = op.getObject();
BSONObj o2Field;
if (op.getObject2())
- o2Field = op.getObject2().get();
+ o2Field = op.getObject2().value();
// If the 'o2' field does not have an _id, the oplog entry is corrupted.
auto idField = o2Field["_id"];
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 4e6c32ac1e2..268147678c3 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -275,7 +275,7 @@ public:
}
const ChunkManager& chunkManager() {
- return _cm.get();
+ return _cm.value();
}
const std::vector<NamespaceString>& stashCollections() {
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index 9a63fe0daea..5879e241f54 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -520,7 +520,7 @@ boost::optional<BSONObj> ReshardingRecipientService::RecipientStateMachine::repo
void ReshardingRecipientService::RecipientStateMachine::onReshardingFieldsChanges(
OperationContext* opCtx, const TypeCollectionReshardingFields& reshardingFields) {
if (reshardingFields.getState() == CoordinatorStateEnum::kAborting) {
- abort(reshardingFields.getUserCanceled().get());
+ abort(reshardingFields.getUserCanceled().value());
return;
}
@@ -662,7 +662,7 @@ void ReshardingRecipientService::RecipientStateMachine::_ensureDataReplicationSt
_recipientService->getInstanceCleanupExecutor(),
abortToken,
factory,
- txnCloneTime.get())
+ txnCloneTime.value())
.share();
stdx::lock_guard lk(_mutex);
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index d054ae355ab..b9809f93d89 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -869,7 +869,7 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUp) {
->reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle,
MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle)
- .get();
+ .value();
ASSERT_EQ(currOp.getField("documentsCopied").numberLong(), 1L);
ASSERT_EQ(currOp.getField("bytesCopied").numberLong(), (long)reshardedDoc.objsize());
@@ -880,7 +880,7 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUp) {
->reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle,
MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle)
- .get();
+ .value();
ASSERT_EQ(currOp.getField("documentsCopied").numberLong(), 1L);
ASSERT_EQ(currOp.getField("bytesCopied").numberLong(), (long)reshardedDoc.objsize());
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 223da448f54..0582a78037c 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -262,9 +262,9 @@ protected:
auto bsonOplog = client.findOne(std::move(findCmd));
ASSERT(!bsonOplog.isEmpty());
auto oplogEntry = repl::MutableOplogEntry::parse(bsonOplog).getValue();
- ASSERT_EQ(oplogEntry.getTxnNumber().get(), txnNum);
+ ASSERT_EQ(oplogEntry.getTxnNumber().value(), txnNum);
ASSERT_BSONOBJ_EQ(oplogEntry.getObject(), BSON("$sessionMigrateInfo" << 1));
- ASSERT_BSONOBJ_EQ(oplogEntry.getObject2().get(), BSON("$incompleteOplogHistory" << 1));
+ ASSERT_BSONOBJ_EQ(oplogEntry.getObject2().value(), BSON("$incompleteOplogHistory" << 1));
ASSERT(oplogEntry.getOpType() == repl::OpTypeEnum::kNoop);
auto bsonTxn =
@@ -355,8 +355,8 @@ protected:
std::shared_ptr<executor::ThreadPoolTaskExecutor> cleanupExecutor,
boost::optional<CancellationToken> customCancelToken = boost::none) {
// Allows callers to control the cancellation of the cloner's run() function when specified.
- auto cancelToken = customCancelToken.is_initialized()
- ? customCancelToken.get()
+ auto cancelToken = customCancelToken.has_value()
+ ? customCancelToken.value()
: operationContext()->getCancellationToken();
auto cancelableOpCtxExecutor = std::make_shared<ThreadPool>([] {
diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp
index 4ff2c9e3643..f7d20c6d813 100644
--- a/src/mongo/db/s/resharding/resharding_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_util.cpp
@@ -175,7 +175,7 @@ void checkForHolesAndOverlapsInChunks(std::vector<ReshardedChunk>& chunks,
if (prevMax) {
uassert(ErrorCodes::BadValue,
"Chunk ranges must be contiguous",
- SimpleBSONObjComparator::kInstance.evaluate(prevMax.get() == chunk.getMin()));
+ SimpleBSONObjComparator::kInstance.evaluate(prevMax.value() == chunk.getMin()));
}
prevMax = boost::optional<BSONObj>(chunk.getMax());
}
@@ -202,7 +202,7 @@ Timestamp getHighestMinFetchTimestamp(const std::vector<DonorShardEntry>& donorS
uassert(4957300,
"All donors must have a minFetchTimestamp, but donor {} does not."_format(
StringData{donor.getId()}),
- donorFetchTimestamp.is_initialized());
+ donorFetchTimestamp.has_value());
if (maxMinFetchTimestamp < donorFetchTimestamp.value()) {
maxMinFetchTimestamp = donorFetchTimestamp.value();
}
@@ -221,7 +221,7 @@ void checkForOverlappingZones(std::vector<ReshardingZoneType>& zones) {
if (prevMax) {
uassert(ErrorCodes::BadValue,
"Zone ranges must not overlap",
- SimpleBSONObjComparator::kInstance.evaluate(prevMax.get() <= zone.getMin()));
+ SimpleBSONObjComparator::kInstance.evaluate(prevMax.value() <= zone.getMin()));
}
prevMax = boost::optional<BSONObj>(zone.getMax());
}
diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp
index d7511dee872..8c9a1b8cb32 100644
--- a/src/mongo/db/s/session_catalog_migration_destination.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination.cpp
@@ -118,7 +118,7 @@ void setPrePostImageTs(const ProcessOplogResult& lastResult, repl::MutableOplogE
// the appropriate no-op. This code on the destination patches up the CRUD operation oplog entry
// to look like the classic format.
if (entry->getNeedsRetryImage()) {
- switch (entry->getNeedsRetryImage().get()) {
+ switch (entry->getNeedsRetryImage().value()) {
case repl::RetryImageEnum::kPreImage:
entry->setPreImageOpTime({repl::OpTime()});
break;
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index afa613481c6..ec7513d7a84 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -420,7 +420,7 @@ bool SessionCatalogMigrationSource::shouldSkipOplogEntry(const mongo::repl::Oplo
// prevent a multi-statement transaction from being retried as a retryable write.
return false;
}
- auto shardKey = shardKeyPattern.extractShardKeyFromOplogEntry(object2.get());
+ auto shardKey = shardKeyPattern.extractShardKeyFromOplogEntry(object2.value());
return !chunkRange.containsKey(shardKey);
}
@@ -506,7 +506,7 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock lk, OperationCo
// Skipping an entry here will also result in the pre/post images to also not be
// sent in the migration as they're handled by 'fetchPrePostImageOplog' below.
- if (shouldSkipOplogEntry(nextOplog.get(), _keyPattern, _chunkRange)) {
+ if (shouldSkipOplogEntry(nextOplog.value(), _keyPattern, _chunkRange)) {
continue;
}
@@ -770,8 +770,8 @@ boost::optional<repl::OplogEntry> SessionCatalogMigrationSource::SessionOplogIte
// Otherwise, skip the record by returning boost::none.
auto result = [&]() -> boost::optional<repl::OplogEntry> {
if (!_record.getState() ||
- _record.getState().get() == DurableTxnStateEnum::kCommitted ||
- _record.getState().get() == DurableTxnStateEnum::kPrepared) {
+ _record.getState().value() == DurableTxnStateEnum::kCommitted ||
+ _record.getState().value() == DurableTxnStateEnum::kPrepared) {
return makeSentinelOplogEntry(
_record.getSessionId(),
_record.getTxnNum(),
diff --git a/src/mongo/db/s/session_catalog_migration_source_test.cpp b/src/mongo/db/s/session_catalog_migration_source_test.cpp
index d723c069c2a..62bba20660b 100644
--- a/src/mongo/db/s/session_catalog_migration_source_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source_test.cpp
@@ -181,7 +181,7 @@ repl::OplogEntry makeRewrittenOplogInSession(repl::OpTime opTime,
*original.getTxnNumber(),
original.getStatementIds(), // statement ids
original.getPrevWriteOpTimeInTransaction()
- .get()); // optime of previous write within same transaction
+ .value()); // optime of previous write within same transaction
};
repl::DurableReplOperation makeDurableReplOp(
@@ -722,8 +722,8 @@ TEST_F(SessionCatalogMigrationSourceTest, ForgeImageEntriesWhenFetchingEntriesWi
// Check that the key fields are what we expect. The destination will overwrite any unneeded
// fields when it processes the incoming entries.
ASSERT_BSONOBJ_EQ(preImage, nextOplogResult.oplog->getObject());
- ASSERT_EQUALS(txnNumber, nextOplogResult.oplog->getTxnNumber().get());
- ASSERT_EQUALS(sessionId, nextOplogResult.oplog->getSessionId().get());
+ ASSERT_EQUALS(txnNumber, nextOplogResult.oplog->getTxnNumber().value());
+ ASSERT_EQUALS(sessionId, nextOplogResult.oplog->getSessionId().value());
ASSERT_EQUALS("n", repl::OpType_serializer(nextOplogResult.oplog->getOpType()));
ASSERT_EQ(entry.getStatementIds().size(), nextOplogResult.oplog->getStatementIds().size());
for (size_t i = 0; i < entry.getStatementIds().size(); i++) {
diff --git a/src/mongo/db/s/shard_key_index_util.cpp b/src/mongo/db/s/shard_key_index_util.cpp
index 1cdd4f99008..7050abedb5e 100644
--- a/src/mongo/db/s/shard_key_index_util.cpp
+++ b/src/mongo/db/s/shard_key_index_util.cpp
@@ -186,7 +186,7 @@ bool isLastShardKeyIndex(OperationContext* opCtx,
const BSONObj& shardKey) {
return !_findShardKeyPrefixedIndex(
opCtx, collection, indexCatalog, indexName, shardKey, false /* requireSingleKey */)
- .is_initialized();
+ .has_value();
}
boost::optional<ShardKeyIndex> findShardKeyPrefixedIndex(OperationContext* opCtx,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index a111b9bf592..f3c2a80f4a0 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -212,7 +212,7 @@ ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
ASSERT_EQUALS(collAndChunksRes.epoch, collectionType.getEpoch());
ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL);
- ASSERT(!collAndChunksRes.timeseriesFields.is_initialized());
+ ASSERT(!collAndChunksRes.timeseriesFields.has_value());
for (unsigned int i = 0; i < collAndChunksRes.changedChunks.size(); ++i) {
ASSERT_BSONOBJ_EQ(collAndChunksRes.changedChunks[i].toShardBSON(), chunks[i].toShardBSON());
}
@@ -454,7 +454,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
_remoteLoaderMock->setChunkRefreshReturnValue(chunks);
auto collAndChunksRes = _shardLoader->getChunksSince(kNss, ChunkVersion::UNSHARDED()).get();
- ASSERT(collAndChunksRes.timeseriesFields.is_initialized());
+ ASSERT(collAndChunksRes.timeseriesFields.has_value());
ASSERT(collAndChunksRes.timeseriesFields->getGranularity() ==
BucketGranularityEnum::Seconds);
}
@@ -475,7 +475,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
_remoteLoaderMock->setChunkRefreshReturnValue(std::vector{lastChunk});
auto collAndChunksRes = _shardLoader->getChunksSince(kNss, maxLoaderVersion).get();
- ASSERT(collAndChunksRes.timeseriesFields.is_initialized());
+ ASSERT(collAndChunksRes.timeseriesFields.has_value());
ASSERT(collAndChunksRes.timeseriesFields->getGranularity() == BucketGranularityEnum::Hours);
}
}
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp
index 7d880424df3..75475adfd78 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp
@@ -312,7 +312,7 @@ SemiFuture<void> ShardingDDLCoordinator::run(std::shared_ptr<executor::ScopedTas
})
.then([this, executor, token, anchor = shared_from_this()] {
if (const auto bucketNss = metadata().getBucketNss()) {
- return _acquireLockAsync(executor, token, bucketNss.get().ns());
+ return _acquireLockAsync(executor, token, bucketNss.value().ns());
}
return ExecutorFuture<void>(**executor);
})
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index 97c4118f101..9f1b2e26dd2 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -166,7 +166,7 @@ private:
return;
}
updateState->updateInProgress = true;
- update = updateState->nextUpdateToSend.get();
+ update = updateState->nextUpdateToSend.value();
updateState->nextUpdateToSend = boost::none;
}
diff --git a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
index 16d75a2bfb9..28e893755ef 100644
--- a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
@@ -193,8 +193,9 @@ public:
if (optRenameCollectionParticipant) {
uassert(ErrorCodes::CommandFailed,
"Provided UUID does not match",
- optRenameCollectionParticipant.get()->sourceUUID() == req.getSourceUUID());
- optRenameCollectionParticipant.get()->getUnblockCrudFuture().get(opCtx);
+ optRenameCollectionParticipant.value()->sourceUUID() ==
+ req.getSourceUUID());
+ optRenameCollectionParticipant.value()->getUnblockCrudFuture().get(opCtx);
}
// Since no write that generated a retryable write oplog entry with this sessionId
diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp
index 2bc0b8507de..de43200ad8b 100644
--- a/src/mongo/db/s/split_vector.cpp
+++ b/src/mongo/db/s/split_vector.cpp
@@ -124,12 +124,12 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
}
// We need a maximum size for the chunk.
- if (!maxChunkSizeBytes || maxChunkSizeBytes.get() <= 0) {
+ if (!maxChunkSizeBytes || maxChunkSizeBytes.value() <= 0) {
uasserted(ErrorCodes::InvalidOptions, "need to specify the desired max chunk size");
}
// If there's not enough data for more than one chunk, no point continuing.
- if (dataSize < maxChunkSizeBytes.get() || recCount == 0) {
+ if (dataSize < maxChunkSizeBytes.value() || recCount == 0) {
std::vector<BSONObj> emptyVector;
return emptyVector;
}
@@ -146,18 +146,18 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
// maxChunkObjects, if provided.
const long long avgRecSize = dataSize / recCount;
- long long keyCount = maxChunkSizeBytes.get() / (2 * avgRecSize);
+ long long keyCount = maxChunkSizeBytes.value() / (2 * avgRecSize);
- if (maxChunkObjects.get() && (maxChunkObjects.get() < keyCount)) {
+ if (maxChunkObjects.value() && (maxChunkObjects.value() < keyCount)) {
LOGV2(22108,
"Limiting the number of documents per chunk to {maxChunkObjects} based "
"on the maxChunkObjects parameter for split vector command (compared to maximum "
"possible: {maxPossibleDocumentsPerChunk})",
"Limiting the number of documents per chunk for split vector command based on "
"the maxChunksObject parameter",
- "maxChunkObjects"_attr = maxChunkObjects.get(),
+ "maxChunkObjects"_attr = maxChunkObjects.value(),
"maxPossibleDocumentsPerChunk"_attr = keyCount);
- keyCount = maxChunkObjects.get();
+ keyCount = maxChunkObjects.value();
}
//
@@ -280,7 +280,8 @@ std::vector<BSONObj> splitVector(OperationContext* opCtx,
}
// Stop if we have enough split points.
- if (maxSplitPoints && maxSplitPoints.get() && (numChunks >= maxSplitPoints.get())) {
+ if (maxSplitPoints && maxSplitPoints.value() &&
+ (numChunks >= maxSplitPoints.value())) {
LOGV2(22111,
"Max number of requested split points reached ({numSplitPoints}) before "
"the end of chunk {namespace} {minKey} -->> {maxKey}",
diff --git a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp
index a22b2bc8019..90a8caccc2b 100644
--- a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp
+++ b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp
@@ -41,7 +41,7 @@ namespace {
void createTransactionCoordinatorImpl(OperationContext* opCtx,
TxnNumber clientTxnNumber,
boost::optional<TxnRetryCounter> clientTxnRetryCounter) {
- auto clientLsid = opCtx->getLogicalSessionId().get();
+ auto clientLsid = opCtx->getLogicalSessionId().value();
auto clockSource = opCtx->getServiceContext()->getFastClockSource();
// If this shard has been selected as the coordinator, set up the coordinator state
diff --git a/src/mongo/db/s/type_lockpings.cpp b/src/mongo/db/s/type_lockpings.cpp
index 345b9aa9806..60b4721aad2 100644
--- a/src/mongo/db/s/type_lockpings.cpp
+++ b/src/mongo/db/s/type_lockpings.cpp
@@ -66,11 +66,11 @@ StatusWith<LockpingsType> LockpingsType::fromBSON(const BSONObj& source) {
}
Status LockpingsType::validate() const {
- if (!_process.is_initialized() || _process->empty()) {
+ if (!_process.has_value() || _process->empty()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << process.name() << " field"};
}
- if (!_ping.is_initialized()) {
+ if (!_ping.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << ping.name() << " field"};
}
diff --git a/src/mongo/db/s/type_locks.cpp b/src/mongo/db/s/type_locks.cpp
index 59a9b6e44c2..4571e4a6635 100644
--- a/src/mongo/db/s/type_locks.cpp
+++ b/src/mongo/db/s/type_locks.cpp
@@ -106,11 +106,11 @@ StatusWith<LocksType> LocksType::fromBSON(const BSONObj& source) {
}
Status LocksType::validate() const {
- if (!_name.is_initialized() || _name->empty()) {
+ if (!_name.has_value() || _name->empty()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << name.name() << " field"};
}
- if (!_state.is_initialized()) {
+ if (!_state.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << state.name() << " field"};
}
@@ -124,21 +124,21 @@ Status LocksType::validate() const {
// if the lock is locked, check the remaining fields
if (lockState != State::UNLOCKED) {
- if (!_process.is_initialized() || _process->empty()) {
+ if (!_process.has_value() || _process->empty()) {
return {ErrorCodes::NoSuchKey,
str::stream() << "missing " << process.name() << " field"};
}
- if (!_lockID.is_initialized()) {
+ if (!_lockID.has_value()) {
return {ErrorCodes::NoSuchKey,
str::stream() << "missing " << lockID.name() << " field"};
}
- if (!_who.is_initialized() || _who->empty()) {
+ if (!_who.has_value() || _who->empty()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << who.name() << " field"};
}
- if (!_why.is_initialized() || _why->empty()) {
+ if (!_why.has_value() || _why->empty()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << why.name() << " field"};
}
}
diff --git a/src/mongo/db/serverless/shard_split_commands.cpp b/src/mongo/db/serverless/shard_split_commands.cpp
index 9b63171144e..e2026a9b1dd 100644
--- a/src/mongo/db/serverless/shard_split_commands.cpp
+++ b/src/mongo/db/serverless/shard_split_commands.cpp
@@ -166,7 +166,7 @@ public:
uassert(ErrorCodes::CommandFailed,
"Failed to abort shard split",
state.abortReason &&
- state.abortReason.get() == ErrorCodes::TenantMigrationAborted);
+ state.abortReason.value() == ErrorCodes::TenantMigrationAborted);
uassert(ErrorCodes::TenantMigrationCommitted,
"Failed to abort : shard split already committed",
@@ -235,7 +235,7 @@ public:
str::stream() << "Could not find shard split with id " << cmd.getMigrationId(),
optionalDonor);
- auto donorPtr = optionalDonor.get();
+ auto donorPtr = optionalDonor.value();
auto decision = donorPtr->decisionFuture().get(opCtx);
diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp
index 11056e2deba..e016c9c4a86 100644
--- a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp
@@ -196,7 +196,7 @@ void onTransitionToBlocking(OperationContext* opCtx, const ShardSplitDonorDocume
// Both primaries and secondaries call startBlockingReadsAfter in the op observer, since
// startBlockingReadsAfter just needs to be called before the "start blocking" write's oplog
// hole is filled.
- mtab->startBlockingReadsAfter(donorStateDoc.getBlockTimestamp().get());
+ mtab->startBlockingReadsAfter(donorStateDoc.getBlockTimestamp().value());
}
}
@@ -211,12 +211,12 @@ void onTransitionToCommitted(OperationContext* opCtx,
auto tenants = donorStateDoc.getTenantIds();
invariant(tenants);
- for (const auto& tenantId : tenants.get()) {
+ for (const auto& tenantId : tenants.value()) {
auto mtab = tenant_migration_access_blocker::getTenantMigrationDonorAccessBlocker(
opCtx->getServiceContext(), tenantId);
invariant(mtab);
- mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().get());
+ mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value());
}
}
@@ -236,12 +236,12 @@ void onTransitionToAborted(OperationContext* opCtx, const ShardSplitDonorDocumen
return;
}
- for (const auto& tenantId : tenants.get()) {
+ for (const auto& tenantId : tenants.value()) {
auto mtab = tenant_migration_access_blocker::getTenantMigrationDonorAccessBlocker(
opCtx->getServiceContext(), tenantId);
invariant(mtab);
- mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().get());
+ mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value());
}
}
@@ -258,7 +258,7 @@ public:
void commit(boost::optional<Timestamp>) override {
if (_donorStateDoc.getExpireAt()) {
if (_donorStateDoc.getTenantIds()) {
- auto tenantIds = _donorStateDoc.getTenantIds().get();
+ auto tenantIds = _donorStateDoc.getTenantIds().value();
for (auto tenantId : tenantIds) {
auto mtab =
tenant_migration_access_blocker::getTenantMigrationDonorAccessBlocker(
@@ -286,7 +286,7 @@ public:
// design the donor never marks its state doc as garbage collectable
// before the migration decision is majority committed).
mtab->onMajorityCommitPointUpdate(
- _donorStateDoc.getCommitOrAbortOpTime().get());
+ _donorStateDoc.getCommitOrAbortOpTime().value());
}
if (_donorStateDoc.getState() == ShardSplitDonorStateEnum::kAborted) {
diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp
index 116e95e7525..c658a1ce434 100644
--- a/src/mongo/db/serverless/shard_split_donor_service.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service.cpp
@@ -91,7 +91,7 @@ void insertTenantAccessBlocker(WithLock lk,
auto recipientConnectionString = donorStateDoc.getRecipientConnectionString();
invariant(recipientConnectionString);
- for (const auto& tenantId : optionalTenants.get()) {
+ for (const auto& tenantId : optionalTenants.value()) {
auto mtab = std::make_shared<TenantMigrationDonorAccessBlocker>(
opCtx->getServiceContext(),
donorStateDoc.getId(),
@@ -984,7 +984,7 @@ ExecutorFuture<repl::OpTime> ShardSplitDonorService::DonorStateMachine::_updateS
invariant(_abortReason);
BSONObjBuilder bob;
- _abortReason.get().serializeErrorToBSON(&bob);
+ _abortReason.value().serializeErrorToBSON(&bob);
_stateDoc.setAbortReason(bob.obj());
break;
}
@@ -1138,7 +1138,7 @@ ShardSplitDonorService::DonorStateMachine::_handleErrorOrEnterAbortedState(
LOGV2(6086508,
"Entering 'aborted' state.",
"id"_attr = _migrationId,
- "abortReason"_attr = _abortReason.get());
+ "abortReason"_attr = _abortReason.value());
}
return ExecutorFuture<void>(**executor)
diff --git a/src/mongo/db/serverless/shard_split_donor_service_test.cpp b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
index 3395a9a563e..ed4b6c8a87d 100644
--- a/src/mongo/db/serverless/shard_split_donor_service_test.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
@@ -1050,7 +1050,7 @@ TEST_F(ShardSplitRecipientCleanupTest, ShardSplitRecipientCleanup) {
ASSERT_TRUE(hasActiveSplitForTenants(opCtx.get(), _tenantIds));
ASSERT_TRUE(optionalDonor);
- auto serviceInstance = optionalDonor.get();
+ auto serviceInstance = optionalDonor.value();
ASSERT(serviceInstance.get());
_pauseBeforeRecipientCleanupFp.reset();
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 1b76291285e..d2b327fad3e 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -323,7 +323,7 @@ StatusWith<repl::ReadConcernArgs> _extractReadConcern(OperationContext* opCtx,
const auto readConcernSource = rwcDefaults.getDefaultReadConcernSource();
customDefaultWasApplied =
(readConcernSource &&
- readConcernSource.get() == DefaultReadConcernSourceEnum::kGlobal);
+ readConcernSource.value() == DefaultReadConcernSourceEnum::kGlobal);
applyDefaultReadConcern(*rcDefault);
}
@@ -1408,7 +1408,7 @@ void ExecCommandDatabase::_initiateCommand() {
str::stream() << "Command " << command->getName()
<< " is not supported in multitenancy mode",
command->allowedWithSecurityToken());
- _tokenAuthorizationSessionGuard.emplace(opCtx, request.validatedTenancyScope.get());
+ _tokenAuthorizationSessionGuard.emplace(opCtx, request.validatedTenancyScope.value());
}
if (isHello()) {
diff --git a/src/mongo/db/session_killer.cpp b/src/mongo/db/session_killer.cpp
index c63dddc594c..7ad7aac1e74 100644
--- a/src/mongo/db/session_killer.cpp
+++ b/src/mongo/db/session_killer.cpp
@@ -89,9 +89,9 @@ SessionKiller::Matcher::Matcher(KillAllSessionsByPatternSet&& patterns)
for (const auto& item : _patterns) {
auto& pattern = item.pattern;
if (pattern.getUid()) {
- _uids.emplace(pattern.getUid().get(), &pattern);
+ _uids.emplace(pattern.getUid().value(), &pattern);
} else if (pattern.getLsid()) {
- _lsids.emplace(pattern.getLsid().get(), &pattern);
+ _lsids.emplace(pattern.getLsid().value(), &pattern);
} else {
// If we're killing everything, it's the only pattern we care about.
decltype(_patterns) onlyKillAll{{item}};
diff --git a/src/mongo/db/sessions_collection.cpp b/src/mongo/db/sessions_collection.cpp
index b72c85cbadc..528f3ab5c16 100644
--- a/src/mongo/db/sessions_collection.cpp
+++ b/src/mongo/db/sessions_collection.cpp
@@ -130,10 +130,10 @@ void runBulkCmd(StringData label,
auto makeBatch = [&] {
buf.reset();
batchBuilder.emplace(buf);
- initBatch(&(batchBuilder.get()));
+ initBatch(&(batchBuilder.value()));
entries.emplace(batchBuilder->subarrayStart(label));
- return &(entries.get());
+ return &(entries.value());
};
auto sendLocalBatch = [&](BSONArrayBuilder*) {
diff --git a/src/mongo/db/stats/single_transaction_stats.cpp b/src/mongo/db/stats/single_transaction_stats.cpp
index 3234998fb28..18f2d9965ac 100644
--- a/src/mongo/db/stats/single_transaction_stats.cpp
+++ b/src/mongo/db/stats/single_transaction_stats.cpp
@@ -57,11 +57,11 @@ Microseconds SingleTransactionStats::getPreparedDuration(TickSource* tickSource,
if (_preparedStartTime != boost::none) {
// If the transaction hasn't ended yet, we return how long it has currently been running
// for.
- invariant(_preparedStartTime.get() > 0);
+ invariant(_preparedStartTime.value() > 0);
if (_endTime == 0) {
- return tickSource->ticksTo<Microseconds>(curTick - _preparedStartTime.get());
+ return tickSource->ticksTo<Microseconds>(curTick - _preparedStartTime.value());
}
- return tickSource->ticksTo<Microseconds>(_endTime - _preparedStartTime.get());
+ return tickSource->ticksTo<Microseconds>(_endTime - _preparedStartTime.value());
}
return Microseconds(0);
}
diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp
index b128bb7958f..2be6bd5af8f 100644
--- a/src/mongo/db/stats/storage_stats.cpp
+++ b/src/mongo/db/stats/storage_stats.cpp
@@ -163,7 +163,7 @@ Status appendCollectionStorageStats(OperationContext* opCtx,
collation = collator->getSpec().toBSON();
}
auto clusteredSpec = clustered_util::formatClusterKeyForListIndexes(
- collection->getClusteredInfo().get(), collation);
+ collection->getClusteredInfo().value(), collation);
auto indexSpec = collection->getClusteredInfo()->getIndexSpec();
auto nameOptional = indexSpec.getName();
// An index name is always expected.
diff --git a/src/mongo/db/storage/backup_block.cpp b/src/mongo/db/storage/backup_block.cpp
index 3cc2b0b0cf3..357290caeb3 100644
--- a/src/mongo/db/storage/backup_block.cpp
+++ b/src/mongo/db/storage/backup_block.cpp
@@ -134,7 +134,7 @@ void BackupBlock::_initialize(OperationContext* opCtx,
// Check if the ident had a different value at the checkpoint timestamp. If so, we want to use
// that instead as that will be the ident's value when restoring from the backup.
boost::optional<std::pair<NamespaceString, UUID>> historicalEntry =
- HistoricalIdentTracker::get(opCtx).lookup(_filenameStem, checkpointTimestamp.get());
+ HistoricalIdentTracker::get(opCtx).lookup(_filenameStem, checkpointTimestamp.value());
if (historicalEntry) {
_uuid = historicalEntry->second;
_setNamespaceString(historicalEntry->first);
diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp
index 9d2c45c69f6..03f7079808e 100644
--- a/src/mongo/db/storage/durable_catalog_impl.cpp
+++ b/src/mongo/db/storage/durable_catalog_impl.cpp
@@ -751,7 +751,7 @@ StatusWith<DurableCatalog::ImportResult> DurableCatalogImpl::importCollection(
auto rs = _engine->getEngine()->getRecordStore(opCtx, nss, entry.ident, md.options);
invariant(rs);
- return DurableCatalog::ImportResult(entry.catalogId, std::move(rs), md.options.uuid.get());
+ return DurableCatalog::ImportResult(entry.catalogId, std::move(rs), md.options.uuid.value());
}
Status DurableCatalogImpl::renameCollection(OperationContext* opCtx,
diff --git a/src/mongo/db/storage/durable_history_pin.cpp b/src/mongo/db/storage/durable_history_pin.cpp
index 505e5816421..2bb962ad353 100644
--- a/src/mongo/db/storage/durable_history_pin.cpp
+++ b/src/mongo/db/storage/durable_history_pin.cpp
@@ -89,12 +89,12 @@ void DurableHistoryRegistry::reconcilePins(OperationContext* opCtx) {
"ts"_attr = pinTs);
if (pinTs) {
auto swTimestamp =
- engine->pinOldestTimestamp(opCtx, pin->getName(), pinTs.get(), false);
+ engine->pinOldestTimestamp(opCtx, pin->getName(), pinTs.value(), false);
if (!swTimestamp.isOK()) {
LOGV2_WARNING(5384105,
"Unable to repin oldest timestamp",
"service"_attr = pin->getName(),
- "request"_attr = pinTs.get(),
+ "request"_attr = pinTs.value(),
"error"_attr = swTimestamp.getStatus());
}
} else {
diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp
index c74edd4d025..fd4bcb94885 100644
--- a/src/mongo/db/storage/kv/durable_catalog_test.cpp
+++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp
@@ -113,7 +113,7 @@ public:
std::move(coll.second));
CollectionCatalog::write(operationContext(), [&](CollectionCatalog& catalog) {
catalog.registerCollection(
- operationContext(), options.uuid.get(), std::move(collection));
+ operationContext(), options.uuid.value(), std::move(collection));
});
wuow.commit();
@@ -645,7 +645,7 @@ TEST_F(ImportCollectionTest, ImportCollection) {
idxIdent);
// Test that a collection UUID is generated for import.
- ASSERT_NE(md->options.uuid.get(), importResult.uuid);
+ ASSERT_NE(md->options.uuid.value(), importResult.uuid);
// Substitute in the generated UUID and check that the rest of fields in the catalog entry
// match.
md->options.uuid = importResult.uuid;
diff --git a/src/mongo/db/storage/record_store_test_oplog.cpp b/src/mongo/db/storage/record_store_test_oplog.cpp
index cc014de1681..5b8179cb739 100644
--- a/src/mongo/db/storage/record_store_test_oplog.cpp
+++ b/src/mongo/db/storage/record_store_test_oplog.cpp
@@ -284,7 +284,7 @@ std::string stringifyForDebug(OperationContext* opCtx,
auto optOplogReadTimestampInt = opCtx->recoveryUnit()->getOplogVisibilityTs();
if (optOplogReadTimestampInt) {
output << "Latest oplog visibility timestamp: "
- << Timestamp(optOplogReadTimestampInt.get());
+ << Timestamp(optOplogReadTimestampInt.value());
}
if (record) {
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index b8be8cca3f2..98d7f0e3f1a 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -333,7 +333,7 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx, LastShutdownState l
// `recoveryTimestamp`. Choose the `oldestTimestamp` for collections that existed at the
// `oldestTimestamp` and conservatively choose the `recoveryTimestamp` for everything
// else.
- minVisibleTs = recoveryTs.get();
+ minVisibleTs = recoveryTs.value();
if (existedAtOldestTs.find(entry.catalogId) != existedAtOldestTs.end()) {
// Collections found at the `oldestTimestamp` on startup can have their minimum
// visible timestamp pulled back to that value.
@@ -387,7 +387,7 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx,
collection->setMinimumVisibleSnapshot(minVisibleTs);
CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) {
- catalog.registerCollection(opCtx, md->options.uuid.get(), std::move(collection));
+ catalog.registerCollection(opCtx, md->options.uuid.value(), std::move(collection));
});
}
@@ -488,7 +488,7 @@ bool StorageEngineImpl::_handleInternalIdent(OperationContext* opCtx,
auto cursor = rs->getCursor(opCtx);
auto record = cursor->next();
if (record) {
- auto doc = record.get().data.toBson();
+ auto doc = record.value().data.toBson();
// Parse the documents here so that we can restart the build if the document doesn't
// contain all the necessary information to be able to resume building the index.
@@ -972,7 +972,7 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx,
// After repairing, re-initialize the collection with a valid RecordStore.
CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) {
- auto uuid = catalog.lookupUUIDByNSS(opCtx, nss).get();
+ auto uuid = catalog.lookupUUIDByNSS(opCtx, nss).value();
catalog.deregisterCollection(opCtx, uuid);
});
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index cd1bef9b1e5..9276dc0d54e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -839,7 +839,7 @@ Status WiredTigerKVEngine::_rebuildIdent(WT_SESSION* session, const char* uri) {
"file"_attr = filePath->generic_string(),
"backup"_attr = corruptFile.generic_string());
- auto status = fsyncRename(filePath.get(), corruptFile);
+ auto status = fsyncRename(filePath.value(), corruptFile);
if (!status.isOK()) {
return status;
}
@@ -1250,7 +1250,7 @@ WiredTigerKVEngine::beginNonBlockingBackup(OperationContext* opCtx,
std::vector<DurableCatalog::Entry> catalogEntries = catalog->getAllCatalogEntries(opCtx);
for (const DurableCatalog::Entry& e : catalogEntries) {
// Populate the collection ident with its namespace and UUID.
- UUID uuid = catalog->getMetaData(opCtx, e.catalogId)->options.uuid.get();
+ UUID uuid = catalog->getMetaData(opCtx, e.catalogId)->options.uuid.value();
_wtBackup.identToNamespaceAndUUIDMap.emplace(e.ident, std::make_pair(e.nss, uuid));
// Populate the collection's index idents with the collection's namespace and UUID.
@@ -1469,7 +1469,7 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
"Renaming data file to temporary",
"file"_attr = identFilePath->generic_string(),
"temporary"_attr = tmpFile.generic_string());
- auto status = fsyncRename(identFilePath.get(), tmpFile);
+ auto status = fsyncRename(identFilePath.value(), tmpFile);
if (!status.isOK()) {
return status;
}
@@ -1496,7 +1496,7 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx,
return status;
}
- status = fsyncRename(tmpFile, identFilePath.get());
+ status = fsyncRename(tmpFile, identFilePath.value());
if (!status.isOK()) {
return status;
}
@@ -2528,7 +2528,7 @@ Timestamp WiredTigerKVEngine::getPinnedOplog() const {
}
if (_oplogPinnedByBackup) {
// All the oplog since `_oplogPinnedByBackup` should remain intact during the backup.
- return std::min(_oplogPinnedByBackup.get(), pinned);
+ return std::min(_oplogPinnedByBackup.value(), pinned);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index ff3350eea0d..d157629e1a8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -313,7 +313,7 @@ TEST_F(WiredTigerKVEngineTest, TestOplogTruncation) {
// to 10 seconds to observe an asynchronous update that iterates once per second.
for (auto iterations = 0; iterations < 100; ++iterations) {
if (_engine->getPinnedOplog() >= newPinned) {
- ASSERT_TRUE(_engine->getOplogNeededForCrashRecovery().get() >= newPinned);
+ ASSERT_TRUE(_engine->getOplogNeededForCrashRecovery().value() >= newPinned);
return;
}
@@ -350,7 +350,7 @@ TEST_F(WiredTigerKVEngineTest, TestOplogTruncation) {
_engine->setStableTimestamp(Timestamp(40, 1), false);
// Await a new checkpoint. Oplog needed for rollback does not advance.
sleepmillis(1100);
- ASSERT_EQ(_engine->getOplogNeededForCrashRecovery().get(), Timestamp(30, 1));
+ ASSERT_EQ(_engine->getOplogNeededForCrashRecovery().value(), Timestamp(30, 1));
_engine->setStableTimestamp(Timestamp(30, 1), false);
callbackShouldFail.store(false);
assertPinnedMovesSoon(Timestamp(40, 1));
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index e1db19aa708..a1ad92795a8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -313,7 +313,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
invariantWTOK(s->checkpoint(s, config), s);
if (token) {
- journalListener->onDurable(token.get());
+ journalListener->onDurable(token.value());
}
}
LOGV2_DEBUG(22418, 4, "created checkpoint (forced)");
@@ -368,7 +368,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
}
if (token) {
- journalListener->onDurable(token.get());
+ journalListener->onDurable(token.value());
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index 819e19992ca..066fd03412a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -91,7 +91,7 @@ Timestamp WiredTigerSnapshotManager::beginTransactionOnCommittedSnapshot(
uassert(ErrorCodes::ReadConcernMajorityNotAvailableYet,
"Committed view disappeared while running operation",
_committedSnapshot);
- return _committedSnapshot.get();
+ return _committedSnapshot.value();
}();
if (MONGO_unlikely(hangBeforeMajorityReadTransactionStarted.shouldFail())) {
diff --git a/src/mongo/db/timeseries/bucket_catalog_test.cpp b/src/mongo/db/timeseries/bucket_catalog_test.cpp
index 045729e31b0..52b1543e3d5 100644
--- a/src/mongo/db/timeseries/bucket_catalog_test.cpp
+++ b/src/mongo/db/timeseries/bucket_catalog_test.cpp
@@ -1200,7 +1200,7 @@ TEST_F(BucketCatalogTest, ReopenCompressedBucketAndInsertCompatibleMeasurement)
_ns1,
/*eligibleForReopening=*/false,
/*validateDecompression=*/true);
- const BSONObj& compressedBucketDoc = compressionResult.compressedBucket.get();
+ const BSONObj& compressedBucketDoc = compressionResult.compressedBucket.value();
RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements",
true};
@@ -1262,7 +1262,7 @@ TEST_F(BucketCatalogTest, ReopenCompressedBucketAndInsertIncompatibleMeasurement
_ns1,
/*eligibleForReopening=*/false,
/*validateDecompression=*/true);
- const BSONObj& compressedBucketDoc = compressionResult.compressedBucket.get();
+ const BSONObj& compressedBucketDoc = compressionResult.compressedBucket.value();
RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements",
true};
diff --git a/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp b/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp
index 072deec9b77..6a4c9ee2f4b 100644
--- a/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp
+++ b/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp
@@ -52,7 +52,7 @@ protected:
ASSERT_TRUE(compressionResult.compressedBucket.has_value());
ASSERT_FALSE(compressionResult.decompressionFailed);
- test(compressionResult.compressedBucket.get());
+ test(compressionResult.compressedBucket.value());
}
};
diff --git a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp
index 4dcf9a73eda..f8aaa5c5fe9 100644
--- a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp
+++ b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp
@@ -387,7 +387,7 @@ boost::optional<BSONObj> createTimeseriesIndexFromBucketsIndex(
// exists, and modifies the kKeyFieldName field to timeseriesKeyValue.
BSONObj intermediateObj =
bucketsIndex.removeFields(StringDataSet{kOriginalSpecFieldName});
- return intermediateObj.addFields(BSON(kKeyFieldName << timeseriesKeyValue.get()),
+ return intermediateObj.addFields(BSON(kKeyFieldName << timeseriesKeyValue.value()),
StringDataSet{kKeyFieldName});
}
}
diff --git a/src/mongo/db/transaction_api_test.cpp b/src/mongo/db/transaction_api_test.cpp
index 90a8daf4366..f2486a72bfa 100644
--- a/src/mongo/db/transaction_api_test.cpp
+++ b/src/mongo/db/transaction_api_test.cpp
@@ -258,11 +258,11 @@ void assertSessionIdMetadata(BSONObj obj,
void assertAPIParameters(BSONObj obj, boost::optional<APIParameters> expectedParams) {
if (expectedParams) {
ASSERT_EQ(obj[APIParametersFromClient::kApiVersionFieldName].String(),
- expectedParams->getAPIVersion().get());
+ expectedParams->getAPIVersion().value());
ASSERT_EQ(obj[APIParametersFromClient::kApiStrictFieldName].Bool(),
- expectedParams->getAPIStrict().get());
+ expectedParams->getAPIStrict().value());
ASSERT_EQ(obj[APIParametersFromClient::kApiDeprecationErrorsFieldName].Bool(),
- expectedParams->getAPIDeprecationErrors().get());
+ expectedParams->getAPIDeprecationErrors().value());
} else {
ASSERT(obj[APIParametersFromClient::kApiVersionFieldName].eoo());
ASSERT(obj[APIParametersFromClient::kApiStrictFieldName].eoo());
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index 5d1910b7ffc..45de7032391 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -596,7 +596,7 @@ TransactionParticipant::getOldestActiveTimestamp(Timestamp stableTimestamp) {
boost::optional<Timestamp> oldestTxnTimestamp;
auto cursor = collection->getCursor(opCtx.get());
while (auto record = cursor->next()) {
- auto doc = record.get().data.toBson();
+ auto doc = record.value().data.toBson();
auto txnRecord =
SessionTxnRecord::parse(IDLParserContext("parse oldest active txn record"), doc);
if (txnRecord.getState() != DurableTxnStateEnum::kPrepared &&
@@ -1615,7 +1615,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction(
continue;
}
- transactionOperationUuids.insert(transactionOp.getUuid().get());
+ transactionOperationUuids.insert(transactionOp.getUuid().value());
}
auto catalog = CollectionCatalog::get(opCtx);
for (const auto& uuid : transactionOperationUuids) {
diff --git a/src/mongo/db/transaction_participant_test.cpp b/src/mongo/db/transaction_participant_test.cpp
index 60d0df6e6f2..d0babd17170 100644
--- a/src/mongo/db/transaction_participant_test.cpp
+++ b/src/mongo/db/transaction_participant_test.cpp
@@ -1527,9 +1527,9 @@ TEST_F(TxnParticipantTest, CorrectlyStashAPIParameters) {
auto txnParticipant = TransactionParticipant::get(opCtx());
auto defaultAPIParams = txnParticipant.getAPIParameters(opCtx());
- ASSERT_FALSE(defaultAPIParams.getAPIVersion().is_initialized());
- ASSERT_FALSE(defaultAPIParams.getAPIStrict().is_initialized());
- ASSERT_FALSE(defaultAPIParams.getAPIDeprecationErrors().is_initialized());
+ ASSERT_FALSE(defaultAPIParams.getAPIVersion().has_value());
+ ASSERT_FALSE(defaultAPIParams.getAPIStrict().has_value());
+ ASSERT_FALSE(defaultAPIParams.getAPIDeprecationErrors().has_value());
txnParticipant.unstashTransactionResources(opCtx(), "insert");
@@ -3146,8 +3146,8 @@ TEST_F(TransactionsMetricsTest, UseAPIParametersOnOpCtxForARetryableWrite) {
// retryable write.
APIParameters storedAPIParameters = txnParticipant.getAPIParameters(opCtx());
ASSERT_EQ("3", *storedAPIParameters.getAPIVersion());
- ASSERT_FALSE(storedAPIParameters.getAPIStrict().is_initialized());
- ASSERT_FALSE(storedAPIParameters.getAPIDeprecationErrors().is_initialized());
+ ASSERT_FALSE(storedAPIParameters.getAPIStrict().has_value());
+ ASSERT_FALSE(storedAPIParameters.getAPIDeprecationErrors().has_value());
// Stash secondAPIParameters.
txnParticipant.stashTransactionResources(opCtx());
@@ -3160,8 +3160,8 @@ TEST_F(TransactionsMetricsTest, UseAPIParametersOnOpCtxForARetryableWrite) {
// parameters in TxnResources.
storedAPIParameters = txnParticipant.getAPIParameters(opCtx());
ASSERT_EQ("4", *storedAPIParameters.getAPIVersion());
- ASSERT_FALSE(storedAPIParameters.getAPIStrict().is_initialized());
- ASSERT_FALSE(storedAPIParameters.getAPIDeprecationErrors().is_initialized());
+ ASSERT_FALSE(storedAPIParameters.getAPIStrict().has_value());
+ ASSERT_FALSE(storedAPIParameters.getAPIDeprecationErrors().has_value());
}
namespace {
@@ -4481,7 +4481,7 @@ TEST_F(TxnParticipantTest, OldestActiveTransactionTimestamp) {
ASSERT(coll);
auto cursor = coll->getCursor(opCtx());
while (auto record = cursor->next()) {
- auto bson = record.get().data.toBson();
+ auto bson = record.value().data.toBson();
if (bson["state"].String() != "prepared"_sd) {
continue;
}
diff --git a/src/mongo/db/update/push_node.cpp b/src/mongo/db/update/push_node.cpp
index 3a4e7c1705c..59146dd8c31 100644
--- a/src/mongo/db/update/push_node.cpp
+++ b/src/mongo/db/update/push_node.cpp
@@ -179,9 +179,9 @@ BSONObj PushNode::operatorValue() const {
eachBuilder << value;
}
if (_slice)
- subBuilder << "$slice" << _slice.get();
+ subBuilder << "$slice" << _slice.value();
if (_position)
- subBuilder << "$position" << _position.get();
+ subBuilder << "$position" << _position.value();
if (_sort) {
// The sort pattern is stored in a dummy enclosing object that we must unwrap.
if (_sort->useWholeValue)
@@ -214,15 +214,16 @@ ModifierNode::ModifyResult PushNode::insertElementsWithPosition(
if (arraySize == 0) {
invariant(array->pushBack(firstElementToInsert));
result = ModifyResult::kNormalUpdate;
- } else if (!position || position.get() > arraySize) {
+ } else if (!position || position.value() > arraySize) {
invariant(array->pushBack(firstElementToInsert));
result = ModifyResult::kArrayAppendUpdate;
- } else if (position.get() > 0) {
- auto insertAfter = getNthChild(*array, position.get() - 1);
+ } else if (position.value() > 0) {
+ auto insertAfter = getNthChild(*array, position.value() - 1);
invariant(insertAfter.addSiblingRight(firstElementToInsert));
result = ModifyResult::kNormalUpdate;
- } else if (position.get() < 0 && safeApproximateAbs(position.get()) < arraySize) {
- auto insertAfter = getNthChild(*array, arraySize - safeApproximateAbs(position.get()) - 1);
+ } else if (position.value() < 0 && safeApproximateAbs(position.value()) < arraySize) {
+ auto insertAfter =
+ getNthChild(*array, arraySize - safeApproximateAbs(position.value()) - 1);
invariant(insertAfter.addSiblingRight(firstElementToInsert));
result = ModifyResult::kNormalUpdate;
} else {
@@ -270,11 +271,11 @@ ModifierNode::ModifyResult PushNode::performPush(mutablebson::Element* element,
}
if (_slice) {
- const auto sliceAbs = safeApproximateAbs(_slice.get());
+ const auto sliceAbs = safeApproximateAbs(_slice.value());
while (static_cast<long long>(countChildren(*element)) > sliceAbs) {
result = ModifyResult::kNormalUpdate;
- if (_slice.get() >= 0) {
+ if (_slice.value() >= 0) {
invariant(element->popBack());
} else {
// A negative value in '_slice' trims the array down to abs(_slice) but removes
diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp
index 86f6ae492bf..ecb93343b4c 100644
--- a/src/mongo/db/update/update_driver_test.cpp
+++ b/src/mongo/db/update/update_driver_test.cpp
@@ -575,7 +575,7 @@ public:
auto parsedFilter = assertGet(MatchExpressionParser::parse(filter, expCtx));
auto expr = assertGet(ExpressionWithPlaceholder::make(std::move(parsedFilter)));
ASSERT(expr->getPlaceholder());
- arrayFilters[expr->getPlaceholder().get()] = std::move(expr);
+ arrayFilters[expr->getPlaceholder().value()] = std::move(expr);
}
_driver->setFromOplogApplication(fromOplog);
diff --git a/src/mongo/db/views/view_catalog_helpers.cpp b/src/mongo/db/views/view_catalog_helpers.cpp
index 018fad8437d..b5567da87ca 100644
--- a/src/mongo/db/views/view_catalog_helpers.cpp
+++ b/src/mongo/db/views/view_catalog_helpers.cpp
@@ -171,7 +171,7 @@ StatusWith<ResolvedView> resolveView(OperationContext* opCtx,
return StatusWith<ResolvedView>(
{*resolvedNss,
std::move(resolvedPipeline),
- collation ? std::move(collation.get()) : CollationSpec::kSimpleSpec,
+ collation ? std::move(collation.value()) : CollationSpec::kSimpleSpec,
tsOptions,
mixedData});
}
@@ -216,7 +216,7 @@ StatusWith<ResolvedView> resolveView(OperationContext* opCtx,
curOp->debug().addResolvedViews(dependencyChain, resolvedPipeline);
return StatusWith<ResolvedView>(
- {*resolvedNss, std::move(resolvedPipeline), std::move(collation.get())});
+ {*resolvedNss, std::move(resolvedPipeline), std::move(collation.value())});
}
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 841bdd61bb8..19d5880748a 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -520,7 +520,7 @@ TEST_F(ViewCatalogFixture, LookupRIDExistingView) {
ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation));
auto resourceID = ResourceId(RESOURCE_COLLECTION, "db.view"_sd);
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == "db.view");
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == "db.view");
}
TEST_F(ViewCatalogFixture, LookupRIDExistingViewRollback) {
@@ -566,7 +566,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterDropRollback) {
WriteUnitOfWork wunit(operationContext());
ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation));
wunit.commit();
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
}
{
@@ -582,7 +582,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterDropRollback) {
// Do not commit, rollback.
}
// Make sure drop was rolled back and view is still in catalog.
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
}
TEST_F(ViewCatalogFixture, LookupRIDAfterModify) {
@@ -592,7 +592,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModify) {
auto resourceID = ResourceId(RESOURCE_COLLECTION, "db.view"_sd);
ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation));
ASSERT_OK(modifyView(operationContext(), viewName, viewOn, emptyPipeline));
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
}
TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) {
@@ -604,7 +604,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) {
WriteUnitOfWork wunit(operationContext());
ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation));
wunit.commit();
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
}
{
@@ -621,11 +621,11 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) {
viewOn,
emptyPipeline,
view_catalog_helpers::validatePipeline));
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
// Do not commit, rollback.
}
// Make sure view resource is still available after rollback.
- ASSERT(getCatalog()->lookupResourceName(resourceID).get() == viewName.ns());
+ ASSERT(getCatalog()->lookupResourceName(resourceID).value() == viewName.ns());
}
TEST_F(ViewCatalogFixture, CreateViewThenDropAndLookup) {
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 5db53b05b8d..26ab912dcff 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -318,7 +318,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
auto entry = assertGet(cache->getEntry(planCacheKey));
size_t works = 1U;
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), works);
+ ASSERT_EQ(entry->works.value(), works);
const size_t kExpectedNumWorks = 10;
for (int i = 0; i < std::ceil(std::log(kExpectedNumWorks) / std::log(2)); ++i) {
@@ -333,7 +333,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
// The works on the cache entry should have doubled.
entry = assertGet(cache->getEntry(planCacheKey));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), works);
+ ASSERT_EQ(entry->works.value(), works);
}
// Run another query which takes less time, and be sure an active entry is created.
@@ -346,7 +346,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanAddsActiveCacheEntries) {
entry = assertGet(cache->getEntry(planCacheKey));
// This will query will match {a: 6} through {a:9} (4 works), plus one for EOF = 5 works.
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 5U);
+ ASSERT_EQ(entry->works.value(), 5U);
}
@@ -389,7 +389,7 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
auto entry = assertGet(cache->getEntry(planCacheKey));
size_t works = 1U;
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), works);
+ ASSERT_EQ(entry->works.value(), works);
// Run another query which takes long enough to evict the active cache entry. The current
// cache entry's works value is a very low number. When replanning is triggered, the cache
@@ -402,7 +402,7 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
ASSERT_EQ(cache->get(planCacheKey).state, PlanCache::CacheEntryState::kPresentInactive);
entry = assertGet(cache->getEntry(planCacheKey));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 2U);
+ ASSERT_EQ(entry->works.value(), 2U);
// Again, force replanning. This time run the initial query which finds no results. The multi
// planner will choose a plan with works value lower than the existing inactive
@@ -412,7 +412,7 @@ TEST_F(QueryStageCachedPlan, DeactivatesEntriesOnReplan) {
ASSERT_EQ(cache->get(planCacheKey).state, PlanCache::CacheEntryState::kPresentActive);
entry = assertGet(cache->getEntry(planCacheKey));
ASSERT_TRUE(entry->works);
- ASSERT_EQ(entry->works.get(), 1U);
+ ASSERT_EQ(entry->works.value(), 1U);
}
TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled) {
diff --git a/src/mongo/embedded/index_builds_coordinator_embedded.cpp b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
index a92249c1a35..be0ff00baed 100644
--- a/src/mongo/embedded/index_builds_coordinator_embedded.cpp
+++ b/src/mongo/embedded/index_builds_coordinator_embedded.cpp
@@ -64,7 +64,7 @@ IndexBuildsCoordinatorEmbedded::startIndexBuild(OperationContext* opCtx,
invariant(statusWithOptionalResult.getValue()->isReady());
// The requested index (specs) are already built or are being built. Return success early
// (this is v4.0 behavior compatible).
- return statusWithOptionalResult.getValue().get();
+ return statusWithOptionalResult.getValue().value();
}
auto status = _setUpIndexBuild(opCtx, buildUUID, Timestamp(), indexBuildOptions);
diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp
index e43939c5a13..5f70bf3f8ac 100644
--- a/src/mongo/executor/network_interface_tl.cpp
+++ b/src/mongo/executor/network_interface_tl.cpp
@@ -183,7 +183,7 @@ NetworkInterfaceTL::NetworkInterfaceTL(std::string instanceName,
#ifdef MONGO_CONFIG_SSL
if (_connPoolOpts.transientSSLParams) {
auto statusOrContext =
- _tl->createTransientSSLContext(_connPoolOpts.transientSSLParams.get());
+ _tl->createTransientSSLContext(_connPoolOpts.transientSSLParams.value());
uassertStatusOK(statusOrContext.getStatus());
transientSSLContext = std::move(statusOrContext.getValue());
}
@@ -832,7 +832,7 @@ void NetworkInterfaceTL::RequestManager::trySend(
"requestId"_attr = cmdState->requestOnAny.id,
"target"_attr = cmdState->requestOnAny.target[idx]);
- auto request = &requestState->request.get();
+ auto request = &requestState->request.value();
if (requestState->isHedge) {
invariant(request->options.isHedgeEnabled);
@@ -1164,11 +1164,11 @@ void NetworkInterfaceTL::cancelCommand(const TaskExecutor::CallbackHandle& cbHan
Status NetworkInterfaceTL::_killOperation(std::shared_ptr<RequestState> requestStateToKill) try {
auto [target, sslMode] = [&] {
invariant(requestStateToKill->request);
- auto request = requestStateToKill->request.get();
+ auto request = requestStateToKill->request.value();
return std::make_pair(request.target, request.sslMode);
}();
auto cmdStateToKill = requestStateToKill->cmdState;
- auto operationKey = cmdStateToKill->operationKey.get();
+ auto operationKey = cmdStateToKill->operationKey.value();
// Make a request state for _killOperations.
executor::RemoteCommandRequest killOpRequest(
diff --git a/src/mongo/executor/remote_command_request.cpp b/src/mongo/executor/remote_command_request.cpp
index 0d1a4750889..524541d9c59 100644
--- a/src/mongo/executor/remote_command_request.cpp
+++ b/src/mongo/executor/remote_command_request.cpp
@@ -82,7 +82,7 @@ RemoteCommandRequestBase::RemoteCommandRequestBase(RequestId requestId,
if (options.isHedgeEnabled) {
operationKey.emplace(UUID::gen());
- cmdObj = cmdObj.addField(BSON("clientOperationKey" << operationKey.get()).firstElement());
+ cmdObj = cmdObj.addField(BSON("clientOperationKey" << operationKey.value()).firstElement());
}
if (opCtx && APIParameters::get(opCtx).getParamsPassed()) {
@@ -172,7 +172,7 @@ std::string RemoteCommandRequestImpl<T>::toString() const {
if (options.isHedgeEnabled) {
invariant(operationKey);
out << " options.hedgeCount: " << options.hedgeCount;
- out << " operationKey: " << operationKey.get();
+ out << " operationKey: " << operationKey.value();
}
out << " cmd:" << cmdObj.toString();
diff --git a/src/mongo/executor/remote_command_response.cpp b/src/mongo/executor/remote_command_response.cpp
index 64e26295c87..8a751665a55 100644
--- a/src/mongo/executor/remote_command_response.cpp
+++ b/src/mongo/executor/remote_command_response.cpp
@@ -165,7 +165,7 @@ std::string RemoteCommandOnAnyResponse::toString() const {
data.toString(),
target ? StringData(target->toString()) : "[none]"_sd,
status.toString(),
- elapsed ? StringData(elapsed.get().toString()) : "n/a"_sd,
+ elapsed ? StringData(elapsed.value().toString()) : "n/a"_sd,
moreToCome);
}
diff --git a/src/mongo/executor/task_executor_cursor_test.cpp b/src/mongo/executor/task_executor_cursor_test.cpp
index f28bec59d97..1528f36a633 100644
--- a/src/mongo/executor/task_executor_cursor_test.cpp
+++ b/src/mongo/executor/task_executor_cursor_test.cpp
@@ -171,11 +171,11 @@ TEST_F(TaskExecutorCursorFixture, SingleBatchWorks) {
ASSERT_BSONOBJ_EQ(findCmd, scheduleSuccessfulCursorResponse("firstBatch", 1, 2, cursorId));
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
ASSERT_FALSE(hasReadyRequests());
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 2);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2);
ASSERT_FALSE(tec.getNext(opCtx.get()));
}
@@ -194,9 +194,9 @@ TEST_F(TaskExecutorCursorFixture, MultipleCursorsSingleBatchSucceeds) {
ASSERT_BSONOBJ_EQ(aggCmd, scheduleSuccessfulMultiCursorResponse("firstBatch", 1, 2, {0, 0}));
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 2);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2);
ASSERT_FALSE(tec.getNext(opCtx.get()));
@@ -204,8 +204,8 @@ TEST_F(TaskExecutorCursorFixture, MultipleCursorsSingleBatchSucceeds) {
ASSERT_EQUALS(cursorVec.size(), 1);
auto secondCursor = std::move(cursorVec[0]);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 2);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 4);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 2);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 4);
ASSERT_FALSE(hasReadyRequests());
ASSERT_FALSE(secondCursor.getNext(opCtx.get()));
@@ -223,9 +223,9 @@ TEST_F(TaskExecutorCursorFixture, MultipleCursorsGetMoreWorks) {
ASSERT_BSONOBJ_EQ(aggCmd, scheduleSuccessfulMultiCursorResponse("firstBatch", 1, 2, cursorIds));
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 2);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2);
auto cursorVec = tec.releaseAdditionalCursors();
ASSERT_EQUALS(cursorVec.size(), 1);
@@ -245,8 +245,8 @@ TEST_F(TaskExecutorCursorFixture, MultipleCursorsGetMoreWorks) {
// Repeat for second cursor.
auto secondCursor = std::move(cursorVec[0]);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 2);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 4);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 2);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 4);
ASSERT_THROWS_CODE(opCtx->runWithDeadline(Date_t::now() + Milliseconds(100),
ErrorCodes::ExceededTimeLimit,
@@ -258,20 +258,20 @@ TEST_F(TaskExecutorCursorFixture, MultipleCursorsGetMoreWorks) {
<< "test"),
scheduleSuccessfulCursorResponse("nextBatch", 6, 8, cursorIds[1]));
// Read second batch on both cursors.
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 3);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 4);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 5);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 6);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 7);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 8);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 3);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 4);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 5);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 6);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 7);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 8);
// Schedule EOF on both cursors.
scheduleSuccessfulCursorResponse("nextBatch", 6, 6, 0);
scheduleSuccessfulCursorResponse("nextBatch", 12, 12, 0);
// Read final document.
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 6);
- ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).get()["x"].Int(), 12);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 6);
+ ASSERT_EQUALS(secondCursor.getNext(opCtx.get()).value()["x"].Int(), 12);
// Shouldn't have any more requests, both cursors are closed.
ASSERT_FALSE(hasReadyRequests());
@@ -353,11 +353,11 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
scheduleSuccessfulCursorResponse("firstBatch", 1, 2, cursorId);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
ASSERT(hasReadyRequests());
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 2);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2);
// If we try to getNext() at this point, we are interruptible and can timeout
ASSERT_THROWS_CODE(opCtx->runWithDeadline(Date_t::now() + Milliseconds(100),
@@ -372,9 +372,9 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
<< "batchSize" << 3),
scheduleSuccessfulCursorResponse("nextBatch", 3, 5, cursorId));
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 3);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 4);
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 5);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 3);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 4);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 5);
cursorId = 0;
scheduleSuccessfulCursorResponse("nextBatch", 6, 6, cursorId);
@@ -382,7 +382,7 @@ TEST_F(TaskExecutorCursorFixture, MultipleBatchesWorks) {
// We don't issue extra getmores after returning a 0 cursor id
ASSERT_FALSE(hasReadyRequests());
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 6);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 6);
ASSERT_FALSE(tec.getNext(opCtx.get()));
}
@@ -423,7 +423,7 @@ TEST_F(TaskExecutorCursorFixture, EmptyFirstBatch) {
});
// Verify that the first doc is the doc from the second batch.
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
th.join();
}
@@ -451,7 +451,7 @@ TEST_F(TaskExecutorCursorFixture, EmptyNonInitialBatch) {
// Schedule a cursor response with a non-empty "firstBatch".
ASSERT_BSONOBJ_EQ(findCmd, scheduleSuccessfulCursorResponse("firstBatch", 1, 1, cursorId));
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1);
// Schedule two consecutive cursor responses with empty "nextBatch". Use end < start so
// we don't append any doc to "nextBatch".
@@ -478,7 +478,7 @@ TEST_F(TaskExecutorCursorFixture, EmptyNonInitialBatch) {
});
// Verify that the next doc is the doc from the fourth batch.
- ASSERT_EQUALS(tec.getNext(opCtx.get()).get()["x"].Int(), 2);
+ ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2);
th.join();
}
@@ -510,7 +510,7 @@ TEST_F(TaskExecutorCursorFixture, LsidIsPassed) {
<< "batchSize" << 1 << "lsid" << lsid.toBSON()),
scheduleSuccessfulCursorResponse("firstBatch", 1, 1, cursorId));
- ASSERT_EQUALS(tec->getNext(opCtx.get()).get()["x"].Int(), 1);
+ ASSERT_EQUALS(tec->getNext(opCtx.get()).value()["x"].Int(), 1);
// lsid in the getmore
ASSERT_BSONOBJ_EQ(BSON("getMore" << 1LL << "collection"
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index e7e2825fd32..5be6b56a2cb 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -725,7 +725,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleExhaust
stdx::unique_lock<Latch> lk(_mutex);
if (_inShutdown_inlock() || cbState->exhaustErased.load()) {
if (cbState->exhaustIter) {
- _poolInProgressQueue.erase(cbState->exhaustIter.get());
+ _poolInProgressQueue.erase(cbState->exhaustIter.value());
cbState->exhaustIter = boost::none;
}
return;
@@ -740,7 +740,7 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleExhaust
cbState->exhaustErased.store(1);
if (cbState->exhaustIter) {
- _poolInProgressQueue.erase(cbState->exhaustIter.get());
+ _poolInProgressQueue.erase(cbState->exhaustIter.value());
cbState->exhaustIter = boost::none;
}
@@ -781,7 +781,7 @@ void ThreadPoolTaskExecutor::scheduleExhaustIntoPool_inlock(std::shared_ptr<Call
stdx::unique_lock<Latch> lk) {
_poolInProgressQueue.push_back(cbState);
cbState->exhaustIter = --_poolInProgressQueue.end();
- auto expectedExhaustIter = cbState->exhaustIter.get();
+ auto expectedExhaustIter = cbState->exhaustIter.value();
lk.unlock();
if (cbState->baton) {
@@ -853,7 +853,7 @@ void ThreadPoolTaskExecutor::runCallbackExhaust(std::shared_ptr<CallbackState> c
// 'expectedExhaustIter' so that we can still remove this task from the 'poolInProgressQueue' if
// this happens, but we do not want to reset the 'exhaustIter' value in this case.
if (cbState->exhaustIter) {
- if (cbState->exhaustIter.get() == expectedExhaustIter) {
+ if (cbState->exhaustIter.value() == expectedExhaustIter) {
cbState->exhaustIter = boost::none;
}
_poolInProgressQueue.erase(expectedExhaustIter);
diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp
index a6422a8e4ea..823ea92e70a 100644
--- a/src/mongo/idl/idl_test.cpp
+++ b/src/mongo/idl/idl_test.cpp
@@ -680,7 +680,7 @@ TEST(IDLVariantTests, TestVariantOptional) {
// The optional key is absent.
auto parsed = One_variant_optional::parse({"root"}, BSONObj());
- ASSERT_FALSE(parsed.getValue().is_initialized());
+ ASSERT_FALSE(parsed.getValue().has_value());
ASSERT_BSONOBJ_EQ(BSONObj(), parsed.toBSON());
}
@@ -1302,8 +1302,8 @@ TEST(IDLFieldTests, TestOptionalFields) {
assert_same_types<decltype(testStruct.getField5()),
boost::optional<std::array<std::uint8_t, 16>>>();
- ASSERT_EQUALS("Foo", testStruct.getField1().get());
- ASSERT_FALSE(testStruct.getField2().is_initialized());
+ ASSERT_EQUALS("Foo", testStruct.getField1().value());
+ ASSERT_FALSE(testStruct.getField2().has_value());
}
// Positive: Serialize struct with only string field
@@ -1324,8 +1324,8 @@ TEST(IDLFieldTests, TestOptionalFields) {
{
auto testDoc = BSON("field2" << 123);
auto testStruct = Optional_field::parse(ctxt, testDoc);
- ASSERT_FALSE(testStruct.getField1().is_initialized());
- ASSERT_EQUALS(123, testStruct.getField2().get());
+ ASSERT_FALSE(testStruct.getField1().has_value());
+ ASSERT_EQUALS(123, testStruct.getField2().value());
}
// Positive: Serialize struct with only int field
@@ -1355,11 +1355,11 @@ TEST(IDLFieldTests, TestAlwaysSerializeFields) {
assert_same_types<decltype(testStruct.getField4()), const boost::optional<mongo::BSONObj>&>();
assert_same_types<decltype(testStruct.getField5()), const boost::optional<mongo::BSONObj>&>();
- ASSERT_EQUALS("Foo", testStruct.getField1().get());
- ASSERT_FALSE(testStruct.getField2().is_initialized());
- ASSERT_BSONOBJ_EQ(BSON("a" << 1234), testStruct.getField3().get());
- ASSERT_FALSE(testStruct.getField4().is_initialized());
- ASSERT_FALSE(testStruct.getField5().is_initialized());
+ ASSERT_EQUALS("Foo", testStruct.getField1().value());
+ ASSERT_FALSE(testStruct.getField2().has_value());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1234), testStruct.getField3().value());
+ ASSERT_FALSE(testStruct.getField4().has_value());
+ ASSERT_FALSE(testStruct.getField5().has_value());
BSONObjBuilder builder;
testStruct.serialize(&builder);
@@ -1378,11 +1378,11 @@ void TestWeakType(TestT test_value) {
<< "field4" << test_value << "field5" << test_value);
auto testStruct = Optional_field::parse(ctxt, testDoc);
- ASSERT_FALSE(testStruct.getField1().is_initialized());
- ASSERT_FALSE(testStruct.getField2().is_initialized());
- ASSERT_FALSE(testStruct.getField3().is_initialized());
- ASSERT_FALSE(testStruct.getField4().is_initialized());
- ASSERT_FALSE(testStruct.getField5().is_initialized());
+ ASSERT_FALSE(testStruct.getField1().has_value());
+ ASSERT_FALSE(testStruct.getField2().has_value());
+ ASSERT_FALSE(testStruct.getField3().has_value());
+ ASSERT_FALSE(testStruct.getField4().has_value());
+ ASSERT_FALSE(testStruct.getField5().has_value());
}
// Positive: struct strict, and optional field works
@@ -1546,11 +1546,11 @@ TEST(IDLArrayTests, TestSimpleOptionalArrays) {
const boost::optional<std::vector<std::array<std::uint8_t, 16>>>&>();
std::vector<StringData> field1{"Foo", "Bar", "???"};
- ASSERT_TRUE(field1 == testStruct.getField1().get());
+ ASSERT_TRUE(field1 == testStruct.getField1().value());
std::vector<std::int32_t> field2{1, 2, 3};
- ASSERT_TRUE(field2 == testStruct.getField2().get());
+ ASSERT_TRUE(field2 == testStruct.getField2().value());
std::vector<double> field3{1.2, 3.4, 5.6};
- ASSERT_TRUE(field3 == testStruct.getField3().get());
+ ASSERT_TRUE(field3 == testStruct.getField3().value());
// Positive: Test we can roundtrip from the just parsed document
{
@@ -1731,9 +1731,9 @@ TEST(IDLArrayTests, TestArraysOfComplexTypes) {
ASSERT_EQUALS(testStruct.getField6().size(), 2u);
ASSERT_EQUALS(testStruct.getField6()[0].getValue(), "hello");
ASSERT_EQUALS(testStruct.getField6()[1].getValue(), "world");
- ASSERT_EQUALS(testStruct.getField6o().get().size(), 2u);
- ASSERT_EQUALS(testStruct.getField6o().get()[0].getValue(), "goodbye");
- ASSERT_EQUALS(testStruct.getField6o().get()[1].getValue(), "world");
+ ASSERT_EQUALS(testStruct.getField6o().value().size(), 2u);
+ ASSERT_EQUALS(testStruct.getField6o().value()[0].getValue(), "goodbye");
+ ASSERT_EQUALS(testStruct.getField6o().value()[1].getValue(), "world");
}
template <typename ParserT, BinDataType bindata_type>
@@ -2475,7 +2475,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) {
auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(testStruct.getField1(), 3);
ASSERT_EQUALS(testStruct.getField2(), "five");
- ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().get(), NamespaceString("db.coll1"));
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().value(), NamespaceString("db.coll1"));
assert_same_types<decltype(testStruct.getNamespaceOrUUID()), const NamespaceStringOrUUID&>();
@@ -2521,7 +2521,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS_WithTenant) {
const auto kTenantId = TenantId(OID::gen());
auto testStruct =
BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMRWithTenant(testDoc, kTenantId));
- ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().get(),
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().value(),
NamespaceString(kTenantId, "db.coll1"));
assert_same_types<decltype(testStruct.getNamespaceOrUUID()), const NamespaceStringOrUUID&>();
@@ -2545,7 +2545,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID) {
auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(testStruct.getField1(), 3);
ASSERT_EQUALS(testStruct.getField2(), "five");
- ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().get(), uuid);
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().value(), uuid);
assert_same_types<decltype(testStruct.getNamespaceOrUUID()), const NamespaceStringOrUUID&>();
@@ -2593,7 +2593,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID_WithTenant) {
const auto kTenantId = TenantId(OID::gen());
auto testStruct =
BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMRWithTenant(testDoc, kTenantId));
- ASSERT_EQUALS(testStruct.getNamespaceOrUUID().dbName().get(), DatabaseName(kTenantId, "db"));
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().dbName().value(), DatabaseName(kTenantId, "db"));
assert_same_types<decltype(testStruct.getNamespaceOrUUID()), const NamespaceStringOrUUID&>();
@@ -3808,7 +3808,7 @@ TEST(IDLCommand, BasicNamespaceConstGetterCommand_TestNonConstGetterGeneration)
auto testStruct = BasicNamespaceConstGetterCommand::parse(ctxt, makeOMR(testDoc));
ASSERT_EQUALS(testStruct.getField1(), 3);
- ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().get(), uuid);
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().value(), uuid);
// Verify that both const and non-const getters are generated.
assert_same_types<decltype(
diff --git a/src/mongo/logv2/log_detail.cpp b/src/mongo/logv2/log_detail.cpp
index d3004ce85e4..de6fecf0258 100644
--- a/src/mongo/logv2/log_detail.cpp
+++ b/src/mongo/logv2/log_detail.cpp
@@ -222,7 +222,8 @@ void _doLogImpl(int32_t id,
record.attribute_values().insert(
attributes::tenant(),
boost::log::attribute_value(
- new boost::log::attributes::attribute_value_impl<TenantId>(tenant.get())));
+ new boost::log::attributes::attribute_value_impl<TenantId>(
+ tenant.value())));
}
}
diff --git a/src/mongo/logv2/log_util.cpp b/src/mongo/logv2/log_util.cpp
index 32769c0cea0..ba87e3f8a2b 100644
--- a/src/mongo/logv2/log_util.cpp
+++ b/src/mongo/logv2/log_util.cpp
@@ -70,7 +70,7 @@ Status rotateLogs(bool renameFiles,
LOGV2(23166, "Log rotation initiated", "suffix"_attr = suffix, "logType"_attr = logType);
if (logType) {
- auto it = logRotateCallbacks.find(logType.get());
+ auto it = logRotateCallbacks.find(logType.value());
if (it == logRotateCallbacks.end()) {
LOGV2_WARNING(6221500, "Unknown log type for rotate", "logType"_attr = logType);
return Status(ErrorCodes::NoSuchKey, "Unknown log type for rotate");
diff --git a/src/mongo/platform/stack_locator_test.cpp b/src/mongo/platform/stack_locator_test.cpp
index fea80fbb806..2ed4aa95bc2 100644
--- a/src/mongo/platform/stack_locator_test.cpp
+++ b/src/mongo/platform/stack_locator_test.cpp
@@ -49,12 +49,12 @@ TEST(StackLocator, StacLocatorFindsStackOfTestExecutorThread) {
const auto available = locator.available();
ASSERT_TRUE(available);
- ASSERT_TRUE(available.get() > 0);
+ ASSERT_TRUE(available.value() > 0);
const auto size = locator.size();
ASSERT_TRUE(size);
- ASSERT_TRUE(size.get() > 0);
- ASSERT_TRUE(size.get() > available.get());
+ ASSERT_TRUE(size.value() > 0);
+ ASSERT_TRUE(size.value() > available.value());
}
TEST(StackLocator, StacksGrowsDown) {
@@ -101,7 +101,7 @@ struct LocatorThreadHelper {
const StackLocator locator;
located = static_cast<bool>(locator.available());
if (located)
- size = locator.size().get();
+ size = locator.size().value();
}
bool located = false;
diff --git a/src/mongo/rpc/metadata/client_metadata.cpp b/src/mongo/rpc/metadata/client_metadata.cpp
index 626a340284e..354d928fcdd 100644
--- a/src/mongo/rpc/metadata/client_metadata.cpp
+++ b/src/mongo/rpc/metadata/client_metadata.cpp
@@ -440,7 +440,7 @@ const ClientMetadata* ClientMetadata::getForClient(Client* client) noexcept {
// If we haven't finalized, it's still okay to return our existing value.
return nullptr;
}
- return &state.meta.get();
+ return &state.meta.value();
}
const ClientMetadata* ClientMetadata::getForOperation(OperationContext* opCtx) noexcept {
@@ -449,7 +449,7 @@ const ClientMetadata* ClientMetadata::getForOperation(OperationContext* opCtx) n
return nullptr;
}
invariant(state.meta);
- return &state.meta.get();
+ return &state.meta.value();
}
const ClientMetadata* ClientMetadata::get(Client* client) noexcept {
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index 994cdf489e8..d01507ba66a 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -93,7 +93,7 @@ TEST(ClientMetadataTest, TestLoopbackTest) {
auto obj = builder.obj();
auto swParseStatus = ClientMetadata::parse(obj[kMetadataDoc]);
ASSERT_OK(swParseStatus.getStatus());
- ASSERT_EQUALS("g", swParseStatus.getValue().get().getApplicationName());
+ ASSERT_EQUALS("g", swParseStatus.getValue().value().getApplicationName());
auto pid = ProcessId::getCurrent().toString();
@@ -147,7 +147,7 @@ TEST(ClientMetadataTest, TestLoopbackTest) {
auto swParse = ClientMetadata::parse(obj[kMetadataDoc]);
ASSERT_OK(swParse.getStatus());
- ASSERT_EQUALS("f", swParse.getValue().get().getApplicationName());
+ ASSERT_EQUALS("f", swParse.getValue().value().getApplicationName());
}
}
@@ -312,12 +312,12 @@ TEST(ClientMetadataTest, TestMongoSAppend) {
auto obj = builder.obj();
auto swParseStatus = ClientMetadata::parse(obj[kMetadataDoc]);
ASSERT_OK(swParseStatus.getStatus());
- ASSERT_EQUALS("g", swParseStatus.getValue().get().getApplicationName());
+ ASSERT_EQUALS("g", swParseStatus.getValue().value().getApplicationName());
- swParseStatus.getValue().get().setMongoSMetadata("h", "i", "j");
- ASSERT_EQUALS("g", swParseStatus.getValue().get().getApplicationName());
+ swParseStatus.getValue().value().setMongoSMetadata("h", "i", "j");
+ ASSERT_EQUALS("g", swParseStatus.getValue().value().getApplicationName());
- auto doc = swParseStatus.getValue().get().getDocument();
+ auto doc = swParseStatus.getValue().value().getDocument();
constexpr auto kMongos = "mongos"_sd;
constexpr auto kClient = "client"_sd;
diff --git a/src/mongo/rpc/metadata/impersonated_user_metadata.cpp b/src/mongo/rpc/metadata/impersonated_user_metadata.cpp
index 0e8e59bde6e..7fecbe88eb3 100644
--- a/src/mongo/rpc/metadata/impersonated_user_metadata.cpp
+++ b/src/mongo/rpc/metadata/impersonated_user_metadata.cpp
@@ -93,7 +93,7 @@ void writeAuthDataToImpersonatedUserMetadata(OperationContext* opCtx, BSONObjBui
ImpersonatedUserMetadata metadata;
if (userName) {
- metadata.setUsers({userName.get()});
+ metadata.setUsers({userName.value()});
} else {
metadata.setUsers({});
}
diff --git a/src/mongo/rpc/metadata/security_token_metadata_test.cpp b/src/mongo/rpc/metadata/security_token_metadata_test.cpp
index 98698572af1..9ab835904d4 100644
--- a/src/mongo/rpc/metadata/security_token_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/security_token_metadata_test.cpp
@@ -103,7 +103,7 @@ TEST_F(SecurityTokenMetadataTest, BasicSuccess) {
ASSERT_EQ(authedUser.getUser(), "user");
ASSERT_EQ(authedUser.getDB(), "admin");
ASSERT_TRUE(authedUser.getTenant() != boost::none);
- ASSERT_EQ(authedUser.getTenant().get(), kTenantId);
+ ASSERT_EQ(authedUser.getTenant().value(), kTenantId);
}
} // namespace
diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp
index 25d797084fe..fa9f9eedf1a 100644
--- a/src/mongo/rpc/op_msg_integration_test.cpp
+++ b/src/mongo/rpc/op_msg_integration_test.cpp
@@ -1333,7 +1333,7 @@ public:
auto uri = swURI.getValue();
if (helloOk.has_value()) {
- uri.setHelloOk(helloOk.get());
+ uri.setHelloOk(helloOk.value());
}
auto swConn = connStr.connect(_appName, 0, &uri);
diff --git a/src/mongo/rpc/write_concern_error_detail.cpp b/src/mongo/rpc/write_concern_error_detail.cpp
index aa274010b67..78767dfe43e 100644
--- a/src/mongo/rpc/write_concern_error_detail.cpp
+++ b/src/mongo/rpc/write_concern_error_detail.cpp
@@ -84,7 +84,7 @@ bool WriteConcernErrorDetail::parseBSON(const BSONObj& source, string* errMsg) {
try {
auto wce = WriteConcernError::parse({"writeConcernError"}, source);
_status = Status(ErrorCodes::Error(wce.getCode()), wce.getErrmsg(), source);
- if ((_isErrInfoSet = wce.getErrInfo().is_initialized())) {
+ if ((_isErrInfoSet = wce.getErrInfo().has_value())) {
_errInfo = wce.getErrInfo().value().getOwned();
}
} catch (DBException& ex) {
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index be537a1341a..ba77fd693e7 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -111,7 +111,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
ASSERT_BSONOBJ_EQ(query->getFilter(),
BSON(CollectionType::kNssFieldName << expectedColl.getNss().ns()));
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
- ASSERT_EQ(query->getLimit().get(), 1);
+ ASSERT_EQ(query->getLimit().value(), 1);
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
@@ -317,7 +317,7 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
NamespaceString::kConfigsvrShardsNamespace);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
@@ -421,7 +421,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::lastmod() << -1));
- ASSERT_EQ(query->getLimit().get(), 1);
+ ASSERT_EQ(query->getLimit().value(), 1);
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
@@ -487,7 +487,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForUUIDNoSortNoLimit) {
ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
@@ -1244,7 +1244,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) {
query->getNamespaceOrUUID().nss().value_or(NamespaceString()));
ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
@@ -1298,7 +1298,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) {
query->getNamespaceOrUUID().nss().value_or(NamespaceString()));
ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
diff --git a/src/mongo/s/catalog/type_changelog.cpp b/src/mongo/s/catalog/type_changelog.cpp
index 01faa0396fd..2732feda427 100644
--- a/src/mongo/s/catalog/type_changelog.cpp
+++ b/src/mongo/s/catalog/type_changelog.cpp
@@ -122,23 +122,23 @@ StatusWith<ChangeLogType> ChangeLogType::fromBSON(const BSONObj& source) {
}
Status ChangeLogType::validate() const {
- if (!_changeId.is_initialized() || _changeId->empty())
+ if (!_changeId.has_value() || _changeId->empty())
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << changeId.name() << " field"};
- if (!_server.is_initialized() || _server->empty())
+ if (!_server.has_value() || _server->empty())
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << server.name() << " field"};
- if (!_clientAddr.is_initialized() || _clientAddr->empty())
+ if (!_clientAddr.has_value() || _clientAddr->empty())
return {ErrorCodes::NoSuchKey,
str::stream() << "missing " << clientAddr.name() << " field"};
- if (!_time.is_initialized())
+ if (!_time.has_value())
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << time.name() << " field"};
- if (!_what.is_initialized() || _what->empty())
+ if (!_what.has_value() || _what->empty())
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << what.name() << " field"};
- if (!_details.is_initialized() || _details->isEmpty())
+ if (!_details.has_value() || _details->isEmpty())
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << details.name() << " field"};
return Status::OK();
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 3ad4047d3c1..9def6f376b2 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -514,7 +514,7 @@ void ChunkType::setShard(const ShardId& shard) {
void ChunkType::setEstimatedSizeBytes(const boost::optional<int64_t>& estimatedSize) {
uassert(ErrorCodes::BadValue,
"estimatedSizeBytes cannot be negative",
- !estimatedSize.is_initialized() || estimatedSize.get() >= 0);
+ !estimatedSize.has_value() || estimatedSize.value() >= 0);
_estimatedSizeBytes = estimatedSize;
}
@@ -533,19 +533,19 @@ void ChunkType::addHistoryToBSON(BSONObjBuilder& builder) const {
}
Status ChunkType::validate() const {
- if (!_min.is_initialized() || _min->isEmpty()) {
+ if (!_min.has_value() || _min->isEmpty()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << min.name() << " field");
}
- if (!_max.is_initialized() || _max->isEmpty()) {
+ if (!_max.has_value() || _max->isEmpty()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << max.name() << " field");
}
- if (!_version.is_initialized() || !_version->isSet()) {
+ if (!_version.has_value() || !_version->isSet()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing version field");
}
- if (!_shard.is_initialized() || !_shard->isValid()) {
+ if (!_shard.has_value() || !_shard->isValid()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "missing " << shard.name() << " field");
}
diff --git a/src/mongo/s/catalog/type_config_version.cpp b/src/mongo/s/catalog/type_config_version.cpp
index ec777b1f9fc..6baf07243ca 100644
--- a/src/mongo/s/catalog/type_config_version.cpp
+++ b/src/mongo/s/catalog/type_config_version.cpp
@@ -69,19 +69,19 @@ void VersionType::cloneTo(VersionType* other) const {
}
Status VersionType::validate() const {
- if (!_minCompatibleVersion.is_initialized()) {
+ if (!_minCompatibleVersion.has_value()) {
return {ErrorCodes::NoSuchKey,
str::stream() << "missing " << minCompatibleVersion.name() << " field"};
}
- if (!_currentVersion.is_initialized()) {
+ if (!_currentVersion.has_value()) {
return {ErrorCodes::NoSuchKey,
str::stream() << "missing " << currentVersion.name() << " field"};
}
// UpgradeHistory::UpgradeHistory_NoEpochVersion is the last version without a cluster id
if (getCurrentVersion() > UpgradeHistory::UpgradeHistory_NoEpochVersion &&
- !_clusterId.is_initialized()) {
+ !_clusterId.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << clusterId.name() << " field"};
}
diff --git a/src/mongo/s/catalog/type_mongos.cpp b/src/mongo/s/catalog/type_mongos.cpp
index d8a23295f63..e6430fae76b 100644
--- a/src/mongo/s/catalog/type_mongos.cpp
+++ b/src/mongo/s/catalog/type_mongos.cpp
@@ -131,19 +131,19 @@ StatusWith<MongosType> MongosType::fromBSON(const BSONObj& source) {
}
Status MongosType::validate() const {
- if (!_name.is_initialized() || _name->empty()) {
+ if (!_name.has_value() || _name->empty()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << name.name() << " field"};
}
- if (!_ping.is_initialized()) {
+ if (!_ping.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << ping.name() << " field"};
}
- if (!_uptime.is_initialized()) {
+ if (!_uptime.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << uptime.name() << " field"};
}
- if (!_waiting.is_initialized()) {
+ if (!_waiting.has_value()) {
return {ErrorCodes::NoSuchKey, str::stream() << "missing " << waiting.name() << " field"};
}
diff --git a/src/mongo/s/catalog/type_shard.cpp b/src/mongo/s/catalog/type_shard.cpp
index 4456729939c..a619ffd9f1b 100644
--- a/src/mongo/s/catalog/type_shard.cpp
+++ b/src/mongo/s/catalog/type_shard.cpp
@@ -153,17 +153,17 @@ StatusWith<ShardType> ShardType::fromBSON(const BSONObj& source) {
}
Status ShardType::validate() const {
- if (!_name.is_initialized() || _name->empty()) {
+ if (!_name.has_value() || _name->empty()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "missing " << name.name() << " field");
}
- if (!_host.is_initialized() || _host->empty()) {
+ if (!_host.has_value() || _host->empty()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "missing " << host.name() << " field");
}
- if (_maxSizeMB.is_initialized() && getMaxSizeMB() < 0) {
+ if (_maxSizeMB.has_value() && getMaxSizeMB() < 0) {
return Status(ErrorCodes::BadValue, str::stream() << "maxSize can't be negative");
}
@@ -219,7 +219,7 @@ void ShardType::setTags(const std::vector<std::string>& tags) {
}
void ShardType::setState(const ShardState state) {
- invariant(!_state.is_initialized());
+ invariant(!_state.has_value());
_state = state;
}
diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp
index 4f949c5b754..0edfc7eb5d8 100644
--- a/src/mongo/s/catalog/type_tags.cpp
+++ b/src/mongo/s/catalog/type_tags.cpp
@@ -93,19 +93,19 @@ StatusWith<TagsType> TagsType::fromBSON(const BSONObj& source) {
}
Status TagsType::validate() const {
- if (!_ns.is_initialized() || !_ns->isValid()) {
+ if (!_ns.has_value() || !_ns->isValid()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << ns.name() << " field");
}
- if (!_tag.is_initialized() || _tag->empty()) {
+ if (!_tag.has_value() || _tag->empty()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << tag.name() << " field");
}
- if (!_minKey.is_initialized() || _minKey->isEmpty()) {
+ if (!_minKey.has_value() || _minKey->isEmpty()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << min.name() << " field");
}
- if (!_maxKey.is_initialized() || _maxKey->isEmpty()) {
+ if (!_maxKey.has_value() || _maxKey->isEmpty()) {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing " << max.name() << " field");
}
@@ -114,8 +114,8 @@ Status TagsType::validate() const {
return Status(ErrorCodes::BadValue, "min and max have a different number of keys");
}
- BSONObjIterator minIt(_minKey.get());
- BSONObjIterator maxIt(_maxKey.get());
+ BSONObjIterator minIt(_minKey.value());
+ BSONObjIterator maxIt(_maxKey.value());
while (minIt.more() && maxIt.more()) {
BSONElement minElem = minIt.next();
BSONElement maxElem = maxIt.next();
@@ -125,7 +125,7 @@ Status TagsType::validate() const {
}
// 'max' should be greater than 'min'.
- if (_minKey->woCompare(_maxKey.get()) >= 0) {
+ if (_minKey->woCompare(_maxKey.value()) >= 0) {
return Status(ErrorCodes::BadValue, "max key must be greater than min key");
}
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 1600cdaede5..0c5823bc7c5 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -110,7 +110,7 @@ std::shared_ptr<RoutingTableHistory> createUpdatedRoutingTableHistory(
return 0;
}
if (collectionAndChunks.maxChunkSizeBytes) {
- invariant(collectionAndChunks.maxChunkSizeBytes.get() > 0);
+ invariant(collectionAndChunks.maxChunkSizeBytes.value() > 0);
return uint64_t(*collectionAndChunks.maxChunkSizeBytes);
}
return boost::none;
@@ -431,7 +431,7 @@ void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const boost::optional<DatabaseVersion>& databaseVersion) {
if (databaseVersion) {
const auto version =
- ComparableDatabaseVersion::makeComparableDatabaseVersion(databaseVersion.get());
+ ComparableDatabaseVersion::makeComparableDatabaseVersion(databaseVersion.value());
LOGV2_FOR_CATALOG_REFRESH(4899101,
2,
"Registering new database version",
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index b41aafde12c..c5885709e37 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -323,7 +323,7 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
ASSERT_OK(swChunkManager.getStatus());
const auto& chunkManager = swChunkManager.getValue();
- ASSERT(chunkManager.getTimeseriesFields().is_initialized());
+ ASSERT(chunkManager.getTimeseriesFields().has_value());
ASSERT(chunkManager.getTimeseriesFields()->getGranularity() ==
BucketGranularityEnum::Seconds);
}
@@ -348,7 +348,7 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
ASSERT_OK(swChunkManager.getStatus());
const auto& chunkManager = swChunkManager.getValue();
- ASSERT(chunkManager.getTimeseriesFields().is_initialized());
+ ASSERT(chunkManager.getTimeseriesFields().has_value());
ASSERT(chunkManager.getTimeseriesFields()->getGranularity() ==
BucketGranularityEnum::Hours);
}
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 4a944411695..2acbd4ab2be 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -87,7 +87,7 @@ const ShardId& ChunkInfo::getShardIdAt(const boost::optional<Timestamp>& ts) con
uasserted(ErrorCodes::StaleChunkHistory,
str::stream() << "Cannot find shardId the chunk belonged to at cluster time "
- << ts.get().toString());
+ << ts.value().toString());
}
void ChunkInfo::throwIfMovedSince(const Timestamp& ts) const {
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 593b475ed46..2c477bb3e56 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -203,11 +203,11 @@ ShardVersionMap ChunkMap::constructShardVersionMap() const {
if (!_chunkMap.empty()) {
invariant(!shardVersions.empty());
- invariant(firstMin.is_initialized());
- invariant(lastMax.is_initialized());
+ invariant(firstMin.has_value());
+ invariant(lastMax.has_value());
- checkAllElementsAreOfType(MinKey, firstMin.get());
- checkAllElementsAreOfType(MaxKey, lastMax.get());
+ checkAllElementsAreOfType(MinKey, firstMin.value());
+ checkAllElementsAreOfType(MaxKey, lastMax.value());
}
return shardVersions;
@@ -598,7 +598,7 @@ IndexBounds ChunkManager::getIndexBoundsForQuery(const BSONObj& key,
}
canonicalQuery.root()->getChildVector()->erase(
- canonicalQuery.root()->getChildVector()->begin() + geoIdx.get());
+ canonicalQuery.root()->getChildVector()->begin() + geoIdx.value());
}
// Consider shard key as an index
@@ -899,7 +899,7 @@ bool ComparableChunkVersion::operator<(const ComparableChunkVersion& other) cons
if (_forcedRefreshSequenceNum == 0)
return false; // Only default constructed values have _forcedRefreshSequenceNum == 0 and
// they are always equal
- if (_chunkVersion.is_initialized() != other._chunkVersion.is_initialized())
+ if (_chunkVersion.has_value() != other._chunkVersion.has_value())
return _epochDisambiguatingSequenceNum <
other._epochDisambiguatingSequenceNum; // One side is not initialised, but the other
// is, which can only happen if one side is
@@ -907,7 +907,7 @@ bool ComparableChunkVersion::operator<(const ComparableChunkVersion& other) cons
// makeComparableChunkVersion. In this case, use
// the _epochDisambiguatingSequenceNum to see
// which one is more recent.
- if (!_chunkVersion.is_initialized())
+ if (!_chunkVersion.has_value())
return _epochDisambiguatingSequenceNum <
other._epochDisambiguatingSequenceNum; // Both sides are not initialised, which can
// only happen if both were created from
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 0ffc158f29e..5d964f74e51 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -421,7 +421,7 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetUpdate(OperationContext*
!isUpsert);
// Since this is a timeseries query, we may need to rename the metaField.
- if (auto metaField = _cm.getTimeseriesFields().get().getMetaField()) {
+ if (auto metaField = _cm.getTimeseriesFields().value().getMetaField()) {
query = timeseries::translateQuery(query, *metaField);
} else {
// We want to avoid targeting the query incorrectly if no metaField is defined on the
@@ -627,14 +627,14 @@ std::vector<ShardEndpoint> ChunkManagerTargeter::targetAllShards(OperationContex
}
void ChunkManagerTargeter::noteCouldNotTarget() {
- dassert(!_lastError || _lastError.get() == LastErrorType::kCouldNotTarget);
+ dassert(!_lastError || _lastError.value() == LastErrorType::kCouldNotTarget);
_lastError = LastErrorType::kCouldNotTarget;
}
void ChunkManagerTargeter::noteStaleShardResponse(OperationContext* opCtx,
const ShardEndpoint& endpoint,
const StaleConfigInfo& staleInfo) {
- dassert(!_lastError || _lastError.get() == LastErrorType::kStaleShardVersion);
+ dassert(!_lastError || _lastError.value() == LastErrorType::kStaleShardVersion);
Grid::get(opCtx)->catalogCache()->invalidateShardOrEntireCollectionEntryForShardedCollection(
staleInfo.getNss(), staleInfo.getVersionWanted(), endpoint.shardName);
@@ -652,7 +652,7 @@ void ChunkManagerTargeter::noteStaleShardResponse(OperationContext* opCtx,
void ChunkManagerTargeter::noteStaleDbResponse(OperationContext* opCtx,
const ShardEndpoint& endpoint,
const StaleDbRoutingVersion& staleInfo) {
- dassert(!_lastError || _lastError.get() == LastErrorType::kStaleDbVersion);
+ dassert(!_lastError || _lastError.value() == LastErrorType::kStaleDbVersion);
Grid::get(opCtx)->catalogCache()->onStaleDatabaseVersion(_nss.db(),
staleInfo.getVersionWanted());
_lastError = LastErrorType::kStaleDbVersion;
@@ -670,16 +670,16 @@ bool ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx) {
LOGV2_DEBUG(22912,
4,
"ChunkManagerTargeter checking if refresh is needed",
- "couldNotTarget"_attr = _lastError.get() == LastErrorType::kCouldNotTarget,
- "staleShardVersion"_attr = _lastError.get() == LastErrorType::kStaleShardVersion,
- "staleDbVersion"_attr = _lastError.get() == LastErrorType::kStaleDbVersion);
+ "couldNotTarget"_attr = _lastError.value() == LastErrorType::kCouldNotTarget,
+ "staleShardVersion"_attr = _lastError.value() == LastErrorType::kStaleShardVersion,
+ "staleDbVersion"_attr = _lastError.value() == LastErrorType::kStaleDbVersion);
// Get the latest metadata information from the cache if there were issues
auto lastManager = _cm;
_cm = _init(opCtx, false);
auto metadataChanged = isMetadataDifferent(lastManager, _cm);
- if (_lastError.get() == LastErrorType::kCouldNotTarget && !metadataChanged) {
+ if (_lastError.value() == LastErrorType::kCouldNotTarget && !metadataChanged) {
// If we couldn't target and we dind't already update the metadata we must force a refresh
_cm = _init(opCtx, true);
metadataChanged = isMetadataDifferent(lastManager, _cm);
diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp
index 7b93e4353f6..046e67bd5fd 100644
--- a/src/mongo/s/cluster_identity_loader_test.cpp
+++ b/src/mongo/s/cluster_identity_loader_test.cpp
@@ -83,7 +83,7 @@ public:
ASSERT_EQ(query->getNamespaceOrUUID().nss()->ns(), "config.version");
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
if (result.isOK()) {
VersionType version;
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index bfb24f62ce4..b70329dc312 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -109,9 +109,9 @@ public:
// have a value for limit, otherwise, we apply it only once we have collected all
// counts.
if (countRequest.getLimit() && countRequest.getSkip()) {
- const auto limit = countRequest.getLimit().get();
+ const auto limit = countRequest.getLimit().value();
if (limit != 0) {
- countRequest.setLimit(limit + countRequest.getSkip().get());
+ countRequest.setLimit(limit + countRequest.getSkip().value());
}
}
countRequest.setSkip(boost::none);
diff --git a/src/mongo/s/commands/cluster_list_collections_cmd.cpp b/src/mongo/s/commands/cluster_list_collections_cmd.cpp
index 0f1a1eab2f2..f1be781cf9d 100644
--- a/src/mongo/s/commands/cluster_list_collections_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_collections_cmd.cpp
@@ -129,7 +129,7 @@ BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx,
// Compute the set of collection names which would be permissible to return.
std::set<std::string> collectionNames;
if (auto authUser = authzSession->getAuthenticatedUser()) {
- for (const auto& [resource, privilege] : authUser.get()->getPrivileges()) {
+ for (const auto& [resource, privilege] : authUser.value()->getPrivileges()) {
if (resource.isCollectionPattern() ||
(resource.isExactNamespacePattern() &&
resource.databaseToMatch() == dbName.toStringWithTenantId())) {
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 7aca53965d5..107f904f694 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -90,8 +90,8 @@ public:
if (authDB) {
uassert(ErrorCodes::Unauthorized,
"Insufficient permissions to list all databases",
- authDB.get() || mayListAllDatabases);
- return authDB.get();
+ authDB.value() || mayListAllDatabases);
+ return authDB.value();
}
// By default, list all databases if we can, otherwise
diff --git a/src/mongo/s/commands/cluster_list_indexes_cmd.cpp b/src/mongo/s/commands/cluster_list_indexes_cmd.cpp
index aec78c0788b..047511473bf 100644
--- a/src/mongo/s/commands/cluster_list_indexes_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_indexes_cmd.cpp
@@ -101,7 +101,7 @@ public:
const auto& nss = request().getNamespaceOrUUID().nss();
uassert(
ErrorCodes::BadValue, "Mongos requires a namespace for listIndexes command", nss);
- return nss.get();
+ return nss.value();
}
void doCheckAuthorization(OperationContext* opCtx) const final {
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index a6b2707a5fd..a43e919b217 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -410,7 +410,7 @@ bool handleWouldChangeOwningShardError(OperationContext* opCtx,
if (upsertedId) {
auto upsertDetail = std::make_unique<BatchedUpsertDetail>();
upsertDetail->setIndex(0);
- upsertDetail->setUpsertedID(upsertedId.get());
+ upsertDetail->setUpsertedID(upsertedId.value());
response->addToUpsertDetails(upsertDetail.release());
} else {
response->setNModified(response->getNModified() + 1);
@@ -615,10 +615,10 @@ bool ClusterWriteCmd::InvocationBase::runImpl(OperationContext* opCtx,
CurOp::get(opCtx)->debug().nShards =
stats.getTargetedShards().size() + (updatedShardKey ? 1 : 0);
- if (stats.getNumShardsOwningChunks().is_initialized())
+ if (stats.getNumShardsOwningChunks().has_value())
updateHostsTargetedMetrics(opCtx,
_batchedRequest.getBatchType(),
- stats.getNumShardsOwningChunks().get(),
+ stats.getNumShardsOwningChunks().value(),
stats.getTargetedShards().size() + (updatedShardKey ? 1 : 0));
if (auto txnRouter = TransactionRouter::get(opCtx)) {
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index bafb6b21fcf..58d70ed8dc1 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -538,7 +538,7 @@ void ParseAndRunCommand::_parseCommand() {
Client* client = opCtx->getClient();
const auto session = client->session();
if (session) {
- if (!opCtx->isExhaust() || !_isHello.get()) {
+ if (!opCtx->isExhaust() || !_isHello.value()) {
InExhaustHello::get(session.get())->setInExhaust(false, _commandName);
}
}
@@ -632,7 +632,7 @@ Status ParseAndRunCommand::RunInvocation::_setup() {
if (MONGO_unlikely(
hangBeforeCheckingMongosShutdownInterrupt.shouldFail([&](const BSONObj& data) {
if (data.hasField("cmdName") && data.hasField("ns")) {
- std::string cmdNS = _parc->_ns.get();
+ std::string cmdNS = _parc->_ns.value();
return ((data.getStringField("cmdName") == _parc->_commandName) &&
(data.getStringField("ns") == cmdNS));
}
@@ -650,7 +650,7 @@ Status ParseAndRunCommand::RunInvocation::_setup() {
return Status(ErrorCodes::SkipCommandExecution, status.reason());
};
- if (_parc->_isHello.get()) {
+ if (_parc->_isHello.value()) {
// Preload generic ClientMetadata ahead of our first hello request. After the first
// request, metaElement should always be empty.
auto metaElem = request.body[kMetadataDocumentName];
@@ -812,7 +812,7 @@ Status ParseAndRunCommand::RunInvocation::_setup() {
const auto readConcernSource = rwcDefaults.getDefaultReadConcernSource();
customDefaultReadConcernWasApplied =
(readConcernSource &&
- readConcernSource.get() == DefaultReadConcernSourceEnum::kGlobal);
+ readConcernSource.value() == DefaultReadConcernSourceEnum::kGlobal);
applyDefaultReadConcern(*rcDefault);
}
diff --git a/src/mongo/s/mongos_main.cpp b/src/mongo/s/mongos_main.cpp
index f480bf1f259..c3419ae3262 100644
--- a/src/mongo/s/mongos_main.cpp
+++ b/src/mongo/s/mongos_main.cpp
@@ -556,7 +556,7 @@ private:
return;
}
updateState->updateInProgress = true;
- update = updateState->nextUpdateToSend.get();
+ update = updateState->nextUpdateToSend.value();
updateState->nextUpdateToSend = boost::none;
}
diff --git a/src/mongo/s/mongos_topology_coordinator.cpp b/src/mongo/s/mongos_topology_coordinator.cpp
index a397d40f12f..c1f048e6221 100644
--- a/src/mongo/s/mongos_topology_coordinator.cpp
+++ b/src/mongo/s/mongos_topology_coordinator.cpp
@@ -171,11 +171,11 @@ std::shared_ptr<const MongosHelloResponse> MongosTopologyCoordinator::awaitHello
LOGV2_DEBUG(4695502,
1,
"Waiting for a hello response from a topology change or until deadline",
- "deadline"_attr = deadline.get(),
+ "deadline"_attr = deadline.value(),
"currentMongosTopologyVersionCounter"_attr = _topologyVersion.getCounter());
auto statusWithHello =
- futureGetNoThrowWithDeadline(opCtx, future, deadline.get(), opCtx->getTimeoutError());
+ futureGetNoThrowWithDeadline(opCtx, future, deadline.value(), opCtx->getTimeoutError());
auto status = statusWithHello.getStatus();
setCustomErrorInHelloResponseMongoS.execute([&](const BSONObj& data) {
diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp
index edd41446142..bc8bca89730 100644
--- a/src/mongo/s/query/cluster_aggregate.cpp
+++ b/src/mongo/s/query/cluster_aggregate.cpp
@@ -387,7 +387,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// After this rewriting, the encryption info does not need to be kept around.
pipeline = processFLEPipelineS(opCtx,
namespaces.executionNss,
- request.getEncryptionInformation().get(),
+ request.getEncryptionInformation().value(),
std::move(pipeline));
request.setEncryptionInformation(boost::none);
}
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 3361c1dace5..4f6006141fc 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -240,7 +240,7 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
// we pass down to the logical session cache and vivify the record (updating last use).
if (cursorGuard->getLsid()) {
auto vivifyCursorStatus =
- LogicalSessionCache::get(opCtx)->vivify(opCtx, cursorGuard->getLsid().get());
+ LogicalSessionCache::get(opCtx)->vivify(opCtx, cursorGuard->getLsid().value());
if (!vivifyCursorStatus.isOK()) {
return vivifyCursorStatus;
}
diff --git a/src/mongo/s/query/cluster_exchange_test.cpp b/src/mongo/s/query/cluster_exchange_test.cpp
index 96815019657..706149f9dc2 100644
--- a/src/mongo/s/query/cluster_exchange_test.cpp
+++ b/src/mongo/s/query/cluster_exchange_test.cpp
@@ -183,7 +183,7 @@ TEST_F(ClusterExchangeTest, GroupFollowedByMergeIsEligbleForExchange) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("_id" << MINKEY));
@@ -219,8 +219,8 @@ TEST_F(ClusterExchangeTest, RenamesAreEligibleForExchange) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("_id" << MINKEY));
@@ -258,8 +258,8 @@ TEST_F(ClusterExchangeTest, MatchesAreEligibleForExchange) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("_id" << MINKEY));
@@ -302,8 +302,8 @@ TEST_F(ClusterExchangeTest, SortThenGroupIsEligibleForExchange) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("x" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("x" << MINKEY));
@@ -348,8 +348,8 @@ TEST_F(ClusterExchangeTest, SortThenGroupIsEligibleForExchangeHash) {
BSON("x"
<< "hashed"));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("x" << MINKEY));
@@ -428,8 +428,8 @@ TEST_F(ClusterExchangeTest, WordCountUseCaseExample) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 3UL);
ASSERT_BSONOBJ_EQ(boundaries[0], BSON("_id" << MINKEY));
@@ -495,8 +495,8 @@ TEST_F(ClusterExchangeTest, WordCountUseCaseExampleShardedByWord) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 2UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), 4UL);
ASSERT_EQ(consumerIds.size(), 3UL);
@@ -579,8 +579,8 @@ TEST_F(ClusterExchangeTest, CompoundShardKeyThreeShards) {
ASSERT(exchangeSpec->exchangeSpec.getPolicy() == ExchangePolicyEnum::kKeyRange);
ASSERT_BSONOBJ_EQ(exchangeSpec->exchangeSpec.getKey(), BSON("_id" << 1 << "_id" << 1));
ASSERT_EQ(exchangeSpec->consumerShards.size(), 3UL); // One for each shard.
- const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().get();
- const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().get();
+ const auto& boundaries = exchangeSpec->exchangeSpec.getBoundaries().value();
+ const auto& consumerIds = exchangeSpec->exchangeSpec.getConsumerIds().value();
ASSERT_EQ(boundaries.size(), chunks.size() + 1);
ASSERT_EQ(consumerIds.size(), chunks.size());
diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
index 6142cff16d2..931cb63ad43 100644
--- a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
+++ b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp
@@ -110,7 +110,7 @@ StatusWith<MigrationSecondaryThrottleOptions> MigrationSecondaryThrottleOptions:
writeConcernBSON = writeConcernElem.Obj().getOwned();
}
- invariant(writeConcernBSON.is_initialized());
+ invariant(writeConcernBSON.has_value());
// Make sure the write concern parses correctly
auto sw = WriteConcernOptions::parse(*writeConcernBSON);
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index ad884f498d9..5e1c3530309 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -259,7 +259,7 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
- ASSERT_FALSE(query->getLimit().is_initialized());
+ ASSERT_FALSE(query->getLimit().has_value());
checkReadConcern(request.cmdObj,
VectorClock::kInitialComponentTime.asTimestamp(),
diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp
index 4c4ae5aa1be..290dc7bd6ba 100644
--- a/src/mongo/s/transaction_router.cpp
+++ b/src/mongo/s/transaction_router.cpp
@@ -410,7 +410,7 @@ void TransactionRouter::Observer::_reportTransactionState(OperationContext* opCt
}
bool TransactionRouter::Observer::_atClusterTimeHasBeenSet() const {
- return o().atClusterTime.is_initialized() && o().atClusterTime->timeHasBeenSet();
+ return o().atClusterTime.has_value() && o().atClusterTime->timeHasBeenSet();
}
const LogicalSessionId& TransactionRouter::Observer::_sessionId() const {
@@ -588,7 +588,7 @@ bool TransactionRouter::AtClusterTime::canChange(StmtId currentStmtId) const {
}
bool TransactionRouter::Router::mustUseAtClusterTime() const {
- return o().atClusterTime.is_initialized();
+ return o().atClusterTime.has_value();
}
LogicalTime TransactionRouter::Router::getSelectedAtClusterTime() const {
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index a61ee3dd4bf..4c2061dd07f 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -293,7 +293,7 @@ int getEncryptionInformationSize(const BatchedCommandRequest& req) {
if (!req.getWriteCommandRequestBase().getEncryptionInformation()) {
return 0;
}
- return req.getWriteCommandRequestBase().getEncryptionInformation().get().toBSON().objsize();
+ return req.getWriteCommandRequestBase().getEncryptionInformation().value().toBSON().objsize();
}
} // namespace
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index 5b1bca59b5d..c03abf4db62 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -263,7 +263,7 @@ BatchedCommandRequest BatchedCommandRequest::buildDeleteOp(const NamespaceString
entry.setMulti(multiDelete);
if (hint) {
- entry.setHint(hint.get());
+ entry.setHint(hint.value());
}
return entry;
}()});
@@ -295,7 +295,7 @@ BatchedCommandRequest BatchedCommandRequest::buildUpdateOp(const NamespaceString
entry.setUpsert(upsert);
entry.setMulti(multi);
if (hint) {
- entry.setHint(hint.get());
+ entry.setHint(hint.value());
}
return entry;
}()});
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index b9527bf9d45..39249363a66 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -384,7 +384,7 @@ void BatchedCommandResponse::unsetErrDetails() {
}
bool BatchedCommandResponse::isErrDetailsSet() const {
- return _writeErrors.is_initialized();
+ return _writeErrors.has_value();
}
size_t BatchedCommandResponse::sizeErrDetails() const {
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index 4cac3bbc9ff..a02d7aa0688 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -391,7 +391,7 @@ MozJSImplScope::MozRuntime::MozRuntime(const MozJSScriptEngine* engine,
//
// TODO: What if we are running on a platform with very
// large pages, like 4MB?
- const auto available_stack_space = available.get();
+ const auto available_stack_space = available.value();
#if defined(__powerpc64__) && defined(MONGO_CONFIG_DEBUG_BUILD)
// From experimentation, we need a larger reservation of 96k since debug ppc64le
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index a2ad8272d9e..93f1a8fc150 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -628,7 +628,7 @@ void MongoExternalInfo::construct(JSContext* cx, JS::CallArgs args) {
// the global retryWrites value. This is checked in sessions.js by using the injected
// _shouldRetryWrites() function, which returns true if the --retryWrites flag was passed.
if (retryWrites) {
- o.setBoolean(InternedString::_retryWrites, retryWrites.get());
+ o.setBoolean(InternedString::_retryWrites, retryWrites.value());
}
args.rval().setObjectOrNull(thisv);
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index 3c90565d355..baf5fdbf002 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -818,7 +818,7 @@ std::unique_ptr<DBClientBase> createEncryptedDBClientBase(std::unique_ptr<DBClie
// IDL does not perform a deep copy of BSONObjs when parsing, so we must get an
// owned copy of the schemaMap.
if (encryptionOptions.getSchemaMap()) {
- encryptionOptions.setSchemaMap(encryptionOptions.getSchemaMap().get().getOwned());
+ encryptionOptions.setSchemaMap(encryptionOptions.getSchemaMap().value().getOwned());
}
// This logic tries to extract the client from the args. If the connection object is defined
diff --git a/src/mongo/shell/kms_aws.cpp b/src/mongo/shell/kms_aws.cpp
index f0a7850cbfa..5d81566348a 100644
--- a/src/mongo/shell/kms_aws.cpp
+++ b/src/mongo/shell/kms_aws.cpp
@@ -131,7 +131,7 @@ void AWSKMSService::initRequest(kms_request_t* request, StringData host, StringD
if (!_config.sessionToken.value_or("").empty()) {
// TODO: move this into kms-message
uassertKmsRequest(kms_request_add_header_field(
- request, "X-Amz-Security-Token", _config.sessionToken.get().c_str()));
+ request, "X-Amz-Security-Token", _config.sessionToken.value().c_str()));
}
}
@@ -264,7 +264,7 @@ SecureVector<uint8_t> AWSKMSService::decrypt(ConstDataRange cdr, BSONObj masterK
boost::optional<std::string> toString(boost::optional<StringData> str) {
if (str) {
- return {str.get().toString()};
+ return {str.value().toString()};
}
return boost::none;
}
@@ -279,7 +279,7 @@ std::unique_ptr<KMSService> AWSKMSService::create(const AwsKMS& config) {
// the CA file.
if (!config.getUrl().value_or("").empty()) {
params.sslCAFile = sslGlobalParams.sslCAFile;
- awsKMS->_server = parseUrl(config.getUrl().get());
+ awsKMS->_server = parseUrl(config.getUrl().value());
}
awsKMS->_sslManager = SSLManagerInterface::create(params, false);
diff --git a/src/mongo/shell/kms_azure.cpp b/src/mongo/shell/kms_azure.cpp
index 1673f966396..459e1f812ae 100644
--- a/src/mongo/shell/kms_azure.cpp
+++ b/src/mongo/shell/kms_azure.cpp
@@ -147,7 +147,7 @@ std::unique_ptr<KMSService> AzureKMSService::create(const AzureKMS& config) {
// Leave the CA file empty so we default to system CA but for local testing allow it to
// inherit the CA file.
params.sslCAFile = sslGlobalParams.sslCAFile;
- identityPlatformHostAndPort = parseUrl(config.getIdentityPlatformEndpoint().get());
+ identityPlatformHostAndPort = parseUrl(config.getIdentityPlatformEndpoint().value());
}
azureKMS->_sslManager = SSLManagerInterface::create(params, false);
diff --git a/src/mongo/shell/kms_gcp.cpp b/src/mongo/shell/kms_gcp.cpp
index dfe439bd85e..81ec2737f7e 100644
--- a/src/mongo/shell/kms_gcp.cpp
+++ b/src/mongo/shell/kms_gcp.cpp
@@ -284,7 +284,7 @@ std::unique_ptr<KMSService> GCPKMSService::create(const GcpKMS& config) {
// Leave the CA file empty so we default to system CA but for local testing allow it to
// inherit the CA file.
params.sslCAFile = sslGlobalParams.sslCAFile;
- oauthHostAndPort = parseUrl(config.getEndpoint().get());
+ oauthHostAndPort = parseUrl(config.getEndpoint().value());
}
gcpKMS->_sslManager = SSLManagerInterface::create(params, false);
diff --git a/src/mongo/shell/mongo_main.cpp b/src/mongo/shell/mongo_main.cpp
index 7b1dd9dce47..0f5f45f1941 100644
--- a/src/mongo/shell/mongo_main.cpp
+++ b/src/mongo/shell/mongo_main.cpp
@@ -681,7 +681,7 @@ bool mechanismRequiresPassword(const MongoURI& uri) {
if (const auto authMechanisms = uri.getOption("authMechanism")) {
constexpr std::array<StringData, 2> passwordlessMechanisms{auth::kMechanismGSSAPI,
auth::kMechanismMongoX509};
- const std::string& authMechanism = authMechanisms.get();
+ const std::string& authMechanism = authMechanisms.value();
for (const auto& mechanism : passwordlessMechanisms) {
if (mechanism.toString() == authMechanism) {
return false;
@@ -787,14 +787,14 @@ int mongo_main(int argc, char* argv[]) {
if (const auto authMechanisms = parsedURI.getOption("authMechanism")) {
std::stringstream ss;
ss << "DB.prototype._defaultAuthenticationMechanism = \""
- << str::escape(authMechanisms.get()) << "\";" << std::endl;
+ << str::escape(authMechanisms.value()) << "\";" << std::endl;
mongo::shell_utils::dbConnect += ss.str();
}
if (const auto gssapiServiveName = parsedURI.getOption("gssapiServiceName")) {
std::stringstream ss;
ss << "DB.prototype._defaultGssapiServiceName = \""
- << str::escape(gssapiServiveName.get()) << "\";" << std::endl;
+ << str::escape(gssapiServiveName.value()) << "\";" << std::endl;
mongo::shell_utils::dbConnect += ss.str();
}
diff --git a/src/mongo/transport/session_asio.cpp b/src/mongo/transport/session_asio.cpp
index 6f1dd7745f3..7d5daa4d59f 100644
--- a/src/mongo/transport/session_asio.cpp
+++ b/src/mongo/transport/session_asio.cpp
@@ -719,7 +719,7 @@ Future<bool> TransportLayerASIO::ASIOSession::maybeHandshakeSSLForIngress(
};
return doHandshake().then([this](size_t size) {
if (_sslSocket->get_sni()) {
- auto sniName = _sslSocket->get_sni().get();
+ auto sniName = _sslSocket->get_sni().value();
LOGV2_DEBUG(
4908000, 2, "Client connected with SNI extension", "sniName"_attr = sniName);
} else {
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 7b9ab8e80dd..493ef7850a1 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -719,7 +719,7 @@ StatusWith<TransportLayerASIO::ASIOSessionHandle> TransportLayerASIO::_doSyncCon
std::shared_ptr<const transport::SSLConnectionContext> transientSSLContext;
#ifdef MONGO_CONFIG_SSL
if (transientSSLParams) {
- auto statusOrContext = createTransientSSLContext(transientSSLParams.get());
+ auto statusOrContext = createTransientSSLContext(transientSSLParams.value());
uassertStatusOK(statusOrContext.getStatus());
transientSSLContext = std::move(statusOrContext.getValue());
}
diff --git a/src/mongo/util/exit.cpp b/src/mongo/util/exit.cpp
index 14359a797f9..2a34fb9707f 100644
--- a/src/mongo/util/exit.cpp
+++ b/src/mongo/util/exit.cpp
@@ -71,7 +71,7 @@ void runTasks(decltype(shutdownTasks) tasks, const ShutdownTaskArgs& shutdownArg
// prevent multiple threads from attempting to log that they are exiting. The quickExit() function
// has its own 'quickExitMutex' to prohibit multiple threads from attempting to call _exit().
MONGO_COMPILER_NORETURN void logAndQuickExit_inlock() {
- ExitCode code = shutdownExitCode.get();
+ ExitCode code = shutdownExitCode.value();
LOGV2(23138, "Shutting down with code: {exitCode}", "Shutting down", "exitCode"_attr = code);
quickExit(code);
}
@@ -93,7 +93,7 @@ ExitCode waitForShutdown() {
return shutdownStarted && !shutdownTasksInProgress;
});
- return shutdownExitCode.get();
+ return shutdownExitCode.value();
}
void registerShutdownTask(unique_function<void(const ShutdownTaskArgs&)> task) {
@@ -115,7 +115,7 @@ void shutdown(ExitCode code, const ShutdownTaskArgs& shutdownArgs) {
// Re-entrant calls to shutdown are not allowed.
invariant(shutdownTasksThreadId != stdx::this_thread::get_id());
- ExitCode originallyRequestedCode = shutdownExitCode.get();
+ ExitCode originallyRequestedCode = shutdownExitCode.value();
if (code != originallyRequestedCode) {
LOGV2(23139,
"While running shutdown tasks with the intent to exit with code "
diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
index 1a9de937a6e..08c97811d08 100644
--- a/src/mongo/util/net/ssl_manager.cpp
+++ b/src/mongo/util/net/ssl_manager.cpp
@@ -363,10 +363,10 @@ void logSSLInfo(const SSLInformationToLog& info,
logCert(info.server, "Server", logNumPEM);
}
if (info.cluster.has_value()) {
- logCert(info.cluster.get(), "Cluster", logNumCluster);
+ logCert(info.cluster.value(), "Cluster", logNumCluster);
}
if (info.crl.has_value()) {
- logCRL(info.crl.get(), logNumCrl);
+ logCRL(info.crl.value(), logNumCrl);
}
}
diff --git a/src/mongo/util/net/ssl_parameters.cpp b/src/mongo/util/net/ssl_parameters.cpp
index 2d6784a5539..60ed9f0921e 100644
--- a/src/mongo/util/net/ssl_parameters.cpp
+++ b/src/mongo/util/net/ssl_parameters.cpp
@@ -128,7 +128,7 @@ void TLSCATrustsSetParameter::append(OperationContext*,
BSONArrayBuilder trusts;
- for (const auto& cait : sslGlobalParams.tlsCATrusts.get()) {
+ for (const auto& cait : sslGlobalParams.tlsCATrusts.value()) {
BSONArrayBuilder roles;
for (const auto& rolename : cait.second) {
diff --git a/src/mongo/util/testing_proctor.cpp b/src/mongo/util/testing_proctor.cpp
index 82bc8fffe2e..d64741f4ea3 100644
--- a/src/mongo/util/testing_proctor.cpp
+++ b/src/mongo/util/testing_proctor.cpp
@@ -49,7 +49,7 @@ bool TestingProctor::isEnabled() const {
uassert(ErrorCodes::NotYetInitialized,
"Cannot check whether testing diagnostics is enabled before it is initialized",
isInitialized());
- return _diagnosticsEnabled.get();
+ return _diagnosticsEnabled.value();
}
void TestingProctor::setEnabled(bool enable) {
@@ -60,7 +60,7 @@ void TestingProctor::setEnabled(bool enable) {
uassert(ErrorCodes::AlreadyInitialized,
"Cannot alter testing diagnostics once initialized",
- _diagnosticsEnabled.get() == enable);
+ _diagnosticsEnabled.value() == enable);
LOGV2(4672601, "Overriding testing diagnostics", "enabled"_attr = enable);
}
diff --git a/src/mongo/util/tracing_support.cpp b/src/mongo/util/tracing_support.cpp
index 5ce6b4f6133..afc60501ca0 100644
--- a/src/mongo/util/tracing_support.cpp
+++ b/src/mongo/util/tracing_support.cpp
@@ -156,7 +156,7 @@ void TracerProvider::initialize(std::unique_ptr<ClockSource> clkSource) { // NO
TracerProvider& TracerProvider::get() { // NOLINT
auto& provider = getTraceProvider();
invariant(provider.has_value(), "not initialized");
- return provider.get();
+ return provider.value();
}
std::shared_ptr<Tracer> TracerProvider::getTracer(std::string name) {
diff --git a/src/mongo/util/tracing_support_test.cpp b/src/mongo/util/tracing_support_test.cpp
index 92e6d3b42df..5e889ba2c1a 100644
--- a/src/mongo/util/tracing_support_test.cpp
+++ b/src/mongo/util/tracing_support_test.cpp
@@ -130,7 +130,7 @@ TEST(TracingSupportTest, BasicUsage) {
<< startTime + 4 * kSpanDuration))
<< "stopped" << startTime + 4 * kSpanDuration))
<< "stopped" << startTime + 4 * kSpanDuration));
- ASSERT_BSONOBJ_EQ(expected, trace.get());
+ ASSERT_BSONOBJ_EQ(expected, trace.value());
}
} // namespace mongo