summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavis Haupt <davis.haupt@mongodb.com>2022-06-23 14:55:08 +0000
committerDavis Haupt <davis.haupt@mongodb.com>2022-06-23 14:55:08 +0000
commit0d2029091f28a5e7e6c5fc225937dabf197cdd8b (patch)
treeb7a8866d3dfc859d0d047331e36ed61e9dcc7346
parent88d665074f679e024d729f8f0f62eebf2a00bab6 (diff)
parent57e39f6e07a7aee960f98c4f57c575ff6a203934 (diff)
downloadmongo-0d2029091f28a5e7e6c5fc225937dabf197cdd8b.tar.gz
Merge remote-tracking branch 'origin/master' into davish/SERVER-63099
-rw-r--r--SConstruct31
-rw-r--r--buildscripts/moduleconfig.py16
-rw-r--r--buildscripts/resmokelib/mongod_fuzzer_configs.py5
-rw-r--r--dump_python3.15756.corebin102465536 -> 0 bytes
-rw-r--r--jstests/aggregation/collection_uuid_coll_stats_index_stats.js3
-rw-r--r--jstests/auth/lib/commands_lib.js25
-rw-r--r--jstests/concurrency/fsm_workload_helpers/chunks.js9
-rw-r--r--jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js4
-rw-r--r--jstests/core/api_version_new_50_language_features.js1
-rw-r--r--jstests/core/api_version_new_51_language_features.js4
-rw-r--r--jstests/core/api_version_new_52_language_features.js2
-rw-r--r--jstests/core/collection_uuid_coll_mod.js3
-rw-r--r--jstests/core/collection_uuid_drop.js3
-rw-r--r--jstests/core/collection_uuid_find.js4
-rw-r--r--jstests/core/collection_uuid_index_commands.js3
-rw-r--r--jstests/core/collection_uuid_rename_collection.js3
-rw-r--r--jstests/core/collection_uuid_write_commands.js6
-rw-r--r--jstests/core/geo_parse_err.js124
-rw-r--r--jstests/core/index3.js18
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/libs/cluster_server_parameter_utils.js53
-rw-r--r--jstests/libs/test_background_ops.js51
-rw-r--r--jstests/noPassthrough/change_streams_cluster_parameter.js146
-rw-r--r--jstests/noPassthrough/create_indexes_in_txn_errors_if_already_in_progress.js5
-rw-r--r--jstests/noPassthrough/plan_cache_group_lookup.js236
-rw-r--r--jstests/noPassthrough/plan_cache_replan_group_lookup.js186
-rw-r--r--jstests/replsets/cluster_server_parameter_commands_replset.js3
-rw-r--r--jstests/sharding/cluster_server_parameter_commands_sharded.js3
-rw-r--r--jstests/sharding/compound_hashed_shard_key_sharding_cmds.js6
-rw-r--r--jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js4
-rw-r--r--jstests/sharding/recover_multiple_migrations_on_stepup.js8
-rw-r--r--src/mongo/client/client_deprecated.cpp68
-rw-r--r--src/mongo/client/client_deprecated.h92
-rw-r--r--src/mongo/client/dbclient_base.cpp61
-rw-r--r--src/mongo/client/dbclient_base.h24
-rw-r--r--src/mongo/client/dbclient_connection.cpp61
-rw-r--r--src/mongo/client/dbclient_connection.h33
-rw-r--r--src/mongo/client/dbclient_cursor.cpp216
-rw-r--r--src/mongo/client/dbclient_cursor.h121
-rw-r--r--src/mongo/client/dbclient_mockcursor.cpp2
-rw-r--r--src/mongo/client/dbclient_mockcursor.h3
-rw-r--r--src/mongo/client/dbclient_rs.cpp255
-rw-r--r--src/mongo/client/dbclient_rs.h13
-rw-r--r--src/mongo/client/dbclient_rs_test.cpp223
-rw-r--r--src/mongo/db/SConscript11
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/throttle_cursor_test.cpp27
-rw-r--r--src/mongo/db/change_stream_change_collection_manager.cpp28
-rw-r--r--src/mongo/db/change_streams_cluster_parameter.cpp62
-rw-r--r--src/mongo/db/change_streams_cluster_parameter.h42
-rw-r--r--src/mongo/db/change_streams_cluster_parameter.idl (renamed from src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl)41
-rw-r--r--src/mongo/db/change_streams_cluster_parameter_test.cpp78
-rw-r--r--src/mongo/db/cloner.cpp45
-rw-r--r--src/mongo/db/cloner.h2
-rw-r--r--src/mongo/db/commands/cqf/cqf_aggregate.cpp10
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp10
-rw-r--r--src/mongo/db/commands/run_aggregate.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp7
-rw-r--r--src/mongo/db/db_raii.cpp8
-rw-r--r--src/mongo/db/db_raii.h1
-rw-r--r--src/mongo/db/exec/multi_plan.cpp9
-rw-r--r--src/mongo/db/exec/plan_cache_util.cpp12
-rw-r--r--src/mongo/db/exec/plan_cache_util.h20
-rw-r--r--src/mongo/db/exec/sbe/SConscript1
-rw-r--r--src/mongo/db/exec/sbe/sbe_test.cpp80
-rw-r--r--src/mongo/db/exec/sbe/stages/branch.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/branch.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/bson_scan.cpp14
-rw-r--r--src/mongo/db/exec/sbe/stages/bson_scan.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/check_bounds.cpp14
-rw-r--r--src/mongo/db/exec/sbe/stages/check_bounds.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/co_scan.cpp9
-rw-r--r--src/mongo/db/exec/sbe/stages/co_scan.h4
-rw-r--r--src/mongo/db/exec/sbe/stages/column_scan.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/column_scan.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/exchange.cpp19
-rw-r--r--src/mongo/db/exec/sbe/stages/exchange.h10
-rw-r--r--src/mongo/db/exec/sbe/stages/filter.h13
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_agg.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_agg.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_join.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_join.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_lookup.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/hash_lookup.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/ix_scan.cpp11
-rw-r--r--src/mongo/db/exec/sbe/stages/ix_scan.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/limit_skip.cpp7
-rw-r--r--src/mongo/db/exec/sbe/stages/limit_skip.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/loop_join.cpp14
-rw-r--r--src/mongo/db/exec/sbe/stages/loop_join.h6
-rw-r--r--src/mongo/db/exec/sbe/stages/makeobj.cpp10
-rw-r--r--src/mongo/db/exec/sbe/stages/makeobj.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/merge_join.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/merge_join.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/project.cpp12
-rw-r--r--src/mongo/db/exec/sbe/stages/project.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.cpp23
-rw-r--r--src/mongo/db/exec/sbe/stages/scan.h9
-rw-r--r--src/mongo/db/exec/sbe/stages/sort.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/sort.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/sorted_merge.cpp14
-rw-r--r--src/mongo/db/exec/sbe/stages/sorted_merge.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/spool.cpp27
-rw-r--r--src/mongo/db/exec/sbe/stages/spool.h16
-rw-r--r--src/mongo/db/exec/sbe/stages/stages.h29
-rw-r--r--src/mongo/db/exec/sbe/stages/traverse.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/traverse.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/union.cpp12
-rw-r--r--src/mongo/db/exec/sbe/stages/union.h3
-rw-r--r--src/mongo/db/exec/sbe/stages/unique.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/unique.h5
-rw-r--r--src/mongo/db/exec/sbe/stages/unwind.cpp8
-rw-r--r--src/mongo/db/exec/sbe/stages/unwind.h3
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.cpp30
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.h19
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp22
-rw-r--r--src/mongo/db/exec/write_stage_common.h2
-rw-r--r--src/mongo/db/fle_crud.cpp1
-rw-r--r--src/mongo/db/geo/geoparser.cpp121
-rw-r--r--src/mongo/db/namespace_string.cpp1
-rw-r--r--src/mongo/db/ops/SConscript1
-rw-r--r--src/mongo/db/ops/write_ops.cpp39
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp19
-rw-r--r--src/mongo/db/pipeline/abt/pipeline_test.cpp36
-rw-r--r--src/mongo/db/pipeline/aggregation_context_fixture.h9
-rw-r--r--src/mongo/db/pipeline/change_stream_pre_image_helpers.cpp38
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp20
-rw-r--r--src/mongo/db/pipeline/document_source_lookup_test.cpp39
-rw-r--r--src/mongo/db/pipeline/document_source_union_with_test.cpp136
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp3
-rw-r--r--src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp6
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp50
-rw-r--r--src/mongo/db/query/canonical_query.cpp7
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp34
-rw-r--r--src/mongo/db/query/canonical_query_encoder.h6
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp174
-rw-r--r--src/mongo/db/query/explain.cpp44
-rw-r--r--src/mongo/db/query/explain.h22
-rw-r--r--src/mongo/db/query/get_executor.cpp40
-rw-r--r--src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp39
-rw-r--r--src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp54
-rw-r--r--src/mongo/db/query/optimizer/rewrites/const_eval.cpp2
-rw-r--r--src/mongo/db/query/optimizer/utils/utils.cpp183
-rw-r--r--src/mongo/db/query/optimizer/utils/utils.h10
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.cpp72
-rw-r--r--src/mongo/db/query/plan_cache_key_factory.h11
-rw-r--r--src/mongo/db/query/query_request_helper.h5
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp17
-rw-r--r--src/mongo/db/query/sbe_multi_planner.cpp30
-rw-r--r--src/mongo/db/query/sbe_plan_cache.cpp13
-rw-r--r--src/mongo/db/query/sbe_plan_cache.h134
-rw-r--r--src/mongo/db/query/sbe_sub_planner.cpp5
-rw-r--r--src/mongo/db/repl/apply_ops.cpp1
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp45
-rw-r--r--src/mongo/db/repl/collection_cloner.h6
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp15
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test.cpp36
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp76
-rw-r--r--src/mongo/db/repl/oplog_fetcher.h9
-rw-r--r--src/mongo/db/repl/oplog_fetcher_test.cpp43
-rw-r--r--src/mongo/db/repl/tenant_collection_cloner.cpp55
-rw-r--r--src/mongo/db/repl/tenant_collection_cloner.h6
-rw-r--r--src/mongo/db/repl/tenant_file_cloner.cpp13
-rw-r--r--src/mongo/db/repl/tenant_file_cloner.h2
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp9
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp27
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp3
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp17
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp4
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp43
-rw-r--r--src/mongo/db/s/collmod_coordinator.h4
-rw-r--r--src/mongo/db/s/commit_chunk_migration.idl85
-rw-r--r--src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp47
-rw-r--r--src/mongo/db/s/compact_structured_encryption_data_coordinator.h3
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp109
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp47
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp120
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp27
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp29
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp25
-rw-r--r--src/mongo/db/s/create_collection_coordinator.h6
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp25
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.h6
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp24
-rw-r--r--src/mongo/db/s/drop_database_coordinator.h7
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp22
-rw-r--r--src/mongo/db/s/move_primary_coordinator.cpp22
-rw-r--r--src/mongo/db/s/move_primary_coordinator.h6
-rw-r--r--src/mongo/db/s/operation_sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.cpp22
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.h4
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp28
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.h4
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp21
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp26
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp9
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp14
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/s/set_allow_migrations_coordinator.cpp20
-rw-r--r--src/mongo/db/s/set_allow_migrations_coordinator.h6
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp3
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp12
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h52
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_server_status.cpp10
-rw-r--r--src/mongo/db/s/sharding_write_router_bm.cpp2
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp4
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp49
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h12
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp14
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.h15
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp39
-rw-r--r--src/mongo/s/SConscript3
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp7
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp10
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp30
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp36
-rw-r--r--src/mongo/s/catalog_cache_test.cpp15
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.cpp10
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp6
-rw-r--r--src/mongo/s/chunk_manager_refresh_bm.cpp6
-rw-r--r--src/mongo/s/chunk_map_test.cpp11
-rw-r--r--src/mongo/s/chunk_test.cpp13
-rw-r--r--src/mongo/s/chunk_version.h4
-rw-r--r--src/mongo/s/chunk_version_test.cpp46
-rw-r--r--src/mongo/s/commands/cluster_commands.idl49
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp251
-rw-r--r--src/mongo/s/comparable_chunk_version_test.cpp50
-rw-r--r--src/mongo/s/query/sharded_agg_test_fixture.h2
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_test.cpp93
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_type.cpp172
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_type.h109
-rw-r--r--src/mongo/s/request_types/move_chunk_request_test.cpp6
-rw-r--r--src/mongo/s/routing_table_history_test.cpp94
-rw-r--r--src/mongo/s/s_sharding_server_status.cpp10
-rw-r--r--src/mongo/s/stale_shard_version_helpers_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp220
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp6
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp41
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp34
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp1
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp21
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.h12
-rw-r--r--src/mongo/shell/shardingtest.js9
-rw-r--r--src/mongo/util/assert_util.h31
-rw-r--r--src/mongo/util/assert_util_test.cpp14
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/btree/bt_split.c11
-rw-r--r--src/third_party/wiredtiger/src/docs/explain-isolation.dox4
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-txn.dox111
-rw-r--r--src/third_party/wiredtiger/src/docs/transactions_api.dox6
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/configs/cache_resize_default.txt4
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.cpp8
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.h4
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/src/main/thread_worker.cpp9
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/tests/cache_resize.cpp26
-rw-r--r--src/third_party/wiredtiger/test/cppsuite/tests/test_template.cpp4
-rwxr-xr-xsrc/third_party/wiredtiger/test/evergreen.yml11
271 files changed, 3779 insertions, 3883 deletions
diff --git a/SConstruct b/SConstruct
index f4675381567..387397f26d2 100644
--- a/SConstruct
+++ b/SConstruct
@@ -2922,15 +2922,25 @@ if get_option("system-boost-lib-search-suffixes") is not None:
# discover modules, and load the (python) module for each module's build.py
mongo_modules = moduleconfig.discover_modules('src/mongo/db/modules', get_option('modules'))
-if get_option('ninja') != 'disabled':
- for module in mongo_modules:
- if hasattr(module, 'NinjaFile'):
- env.FatalError(
- textwrap.dedent("""\
- ERROR: Ninja tool option '--ninja' should not be used with the ninja module.
- Remove the ninja module directory or use '--modules= ' to select no modules.
- If using enterprise module, explicitly set '--modules=<name-of-enterprise-module>' to exclude the ninja module."""
- ))
+has_ninja_module = False
+for module in mongo_modules:
+ if hasattr(module, 'NinjaFile'):
+ has_ninja_module = True
+ break
+
+if get_option('ninja') != 'disabled' and has_ninja_module:
+ env.FatalError(
+ textwrap.dedent("""\
+ ERROR: Ninja tool option '--ninja' should not be used with the ninja module.
+ Using both options simultaneously may clobber build.ninja files.
+ Remove the ninja module directory or use '--modules= ' to select no modules.
+ If using enterprise module, explicitly set '--modules=<name-of-enterprise-module>' to exclude the ninja module."""
+ ))
+
+if has_ninja_module:
+ print(
+ "WARNING: You are attempting to use the unsupported/legacy ninja module, instead of the integrated ninja generator. You are strongly encouraged to remove the ninja module from your module list and invoke scons with --ninja generate-ninja"
+ )
# --- check system ---
ssl_provider = None
@@ -3549,7 +3559,8 @@ def doConfigure(myenv):
if myenv.ToolchainIs('msvc'):
if get_option('cxx-std') == "17":
- myenv.AppendUnique(CCFLAGS=['/std:c++17'])
+ myenv.AppendUnique(CCFLAGS=['/std:c++17',
+ '/Zc:lambda']) # /Zc:lambda is implied by /std:c++20
elif get_option('cxx-std') == "20":
myenv.AppendUnique(CCFLAGS=['/std:c++20'])
else:
diff --git a/buildscripts/moduleconfig.py b/buildscripts/moduleconfig.py
index b31a9dbf8db..b4d0bba0490 100644
--- a/buildscripts/moduleconfig.py
+++ b/buildscripts/moduleconfig.py
@@ -33,16 +33,26 @@ import os
def discover_modules(module_root, allowed_modules):
+ # pylint: disable=too-many-branches
"""Scan module_root for subdirectories that look like MongoDB modules.
Return a list of imported build.py module objects.
"""
found_modules = []
+ found_module_names = []
if allowed_modules is not None:
allowed_modules = allowed_modules.split(',')
+ # When `--modules=` is passed, the split on empty string is represented
+ # in memory as ['']
+ if allowed_modules == ['']:
+ allowed_modules = []
if not os.path.isdir(module_root):
+ if allowed_modules:
+ raise RuntimeError(
+ f"Requested the following modules: {allowed_modules}, but the module root '{module_root}' could not be found. Check the module root, or remove the module from the scons invocation."
+ )
return found_modules
for name in os.listdir(module_root):
@@ -66,11 +76,17 @@ def discover_modules(module_root, allowed_modules):
if getattr(module, "name", None) is None:
module.name = name
found_modules.append(module)
+ found_module_names.append(name)
finally:
fp.close()
except (FileNotFoundError, IOError):
pass
+ if allowed_modules is not None:
+ missing_modules = set(allowed_modules) - set(found_module_names)
+ if missing_modules:
+ raise RuntimeError(f"Failed to locate all modules. Could not find: {missing_modules}")
+
return found_modules
diff --git a/buildscripts/resmokelib/mongod_fuzzer_configs.py b/buildscripts/resmokelib/mongod_fuzzer_configs.py
index ec84d6c5a4e..a9750997fef 100644
--- a/buildscripts/resmokelib/mongod_fuzzer_configs.py
+++ b/buildscripts/resmokelib/mongod_fuzzer_configs.py
@@ -11,13 +11,16 @@ def generate_eviction_configs(rng):
eviction_trigger = rng.randint(eviction_target + 1, 99)
# Fuzz eviction_dirty_target and trigger both as relative and absolute values
- target_bytes_min = 10 * 1024 * 1024 # 10MB
+ target_bytes_min = 50 * 1024 * 1024 # 50MB # 5% of 1GB default cache size on Evergreen
target_bytes_max = 256 * 1024 * 1024 # 256MB # 1GB default cache size on Evergreen
eviction_dirty_target = rng.choice(
[rng.randint(5, 50), rng.randint(target_bytes_min, target_bytes_max)])
trigger_max = 75 if eviction_dirty_target <= 50 else target_bytes_max
eviction_dirty_trigger = rng.randint(eviction_dirty_target + 1, trigger_max)
+ assert eviction_dirty_trigger > eviction_dirty_target
+ assert eviction_dirty_trigger <= trigger_max
+
close_idle_time_secs = rng.randint(1, 100)
close_handle_minimum = rng.randint(0, 1000)
close_scan_interval = rng.randint(1, 100)
diff --git a/dump_python3.15756.core b/dump_python3.15756.core
deleted file mode 100644
index c2e4a6c0961..00000000000
--- a/dump_python3.15756.core
+++ /dev/null
Binary files differ
diff --git a/jstests/aggregation/collection_uuid_coll_stats_index_stats.js b/jstests/aggregation/collection_uuid_coll_stats_index_stats.js
index 5aa92524652..b0779a310f4 100644
--- a/jstests/aggregation/collection_uuid_coll_stats_index_stats.js
+++ b/jstests/aggregation/collection_uuid_coll_stats_index_stats.js
@@ -49,10 +49,11 @@ const testCommand = function(cmd, cmdObj) {
jsTestLog("The command '" + cmd +
"' fails when the provided UUID corresponds to a different collection, even if the " +
"provided namespace does not exist.");
- coll2.drop();
+ assert.commandWorked(testDB.runCommand({drop: coll2.getName()}));
res =
assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.CollectionUUIDMismatch);
validateErrorResponse(res, testDB.getName(), uuid, coll2.getName(), coll.getName());
+ assert(!testDB.getCollectionNames().includes(coll2.getName()));
jsTestLog("The command '" + cmd + "' succeeds on view when no UUID is provided.");
const viewName = "view";
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index d4941a18b1f..f67108737f0 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -3028,8 +3028,27 @@ var authCommandsLib = {
]
},
{
- testname: "_configsvrCommitChunkMigration",
- command: {_configsvrCommitChunkMigration: "x.y"},
+ testname: "_configsvrCommitChunkMigration",
+ command: {
+ _configsvrCommitChunkMigration: "db.fooHashed",
+ fromShard: "move_chunk_basic-rs0",
+ toShard: "move_chunk_basic-rs1",
+ migratedChunk: {
+ lastmod: {
+ e: new ObjectId('62b052ac7f5653479a67a54f'),
+ t: new Timestamp(1655722668, 22),
+ v: new Timestamp(1, 0)
+ },
+ min: {_id: MinKey},
+ max: {_id: -4611686018427387902}
+ },
+ fromShardCollectionVersion: {
+ e: new ObjectId('62b052ac7f5653479a67a54f'),
+ t: new Timestamp(1655722668, 22),
+ v: new Timestamp(1, 3)
+ },
+ validAfter: new Timestamp(1655722670, 6)
+ },
skipSharded: true,
expectFail: true,
testcases: [
@@ -5010,7 +5029,7 @@ var authCommandsLib = {
},
{
testname: "s_moveChunk",
- command: {moveChunk: "test.x"},
+ command: {moveChunk: "test.x", find:{}, to:"a"},
skipUnlessSharded: true,
testcases: [
{
diff --git a/jstests/concurrency/fsm_workload_helpers/chunks.js b/jstests/concurrency/fsm_workload_helpers/chunks.js
index caa84a6c38c..2c71eda6a87 100644
--- a/jstests/concurrency/fsm_workload_helpers/chunks.js
+++ b/jstests/concurrency/fsm_workload_helpers/chunks.js
@@ -70,13 +70,16 @@ var ChunkHelper = (function() {
moveChunk: db[collName].getFullName(),
bounds: bounds,
to: toShard,
- _waitForDelete: waitForDelete
};
+ if (waitForDelete != null) {
+ cmd._waitForDelete = waitForDelete;
+ }
+
// Using _secondaryThrottle adds coverage for additional waits for write concern on the
// recipient during cloning.
- if (secondaryThrottle) {
- cmd._secondaryThrottle = true;
+ if (secondaryThrottle != null) {
+ cmd._secondaryThrottle = secondaryThrottle;
cmd.writeConcern = {w: "majority"}; // _secondaryThrottle requires a write concern.
}
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
index b1d4929b764..9f4ff6a31e1 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
@@ -2,8 +2,10 @@
/**
* Verifies the transactions server status metrics on mongos while running transactions.
+ * Temporarily disabled for BF-24311.
*
- * @tags: [requires_sharding, assumes_snapshot_transactions, uses_transactions]
+ * @tags: [__TEMPORARILY_DISABLED__, requires_sharding, assumes_snapshot_transactions,
+ * uses_transactions]
*/
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
diff --git a/jstests/core/api_version_new_50_language_features.js b/jstests/core/api_version_new_50_language_features.js
index cda462fdff3..8d73124b2eb 100644
--- a/jstests/core/api_version_new_50_language_features.js
+++ b/jstests/core/api_version_new_50_language_features.js
@@ -2,6 +2,7 @@
* Tests that language features introduced in version 4.9 or 5.0 are included in API Version 1.
*
* @tags: [
+ * requires_fcv_60,
* uses_api_parameters,
* ]
*/
diff --git a/jstests/core/api_version_new_51_language_features.js b/jstests/core/api_version_new_51_language_features.js
index a86d6007b41..48e73d7e052 100644
--- a/jstests/core/api_version_new_51_language_features.js
+++ b/jstests/core/api_version_new_51_language_features.js
@@ -2,8 +2,8 @@
* Tests that language features introduced in version 5.1 are included in API Version 1.
*
* @tags: [
- * requires_fcv_51,
- * uses_api_parameters,
+ * requires_fcv_60,
+ * uses_api_parameters
* ]
*/
diff --git a/jstests/core/api_version_new_52_language_features.js b/jstests/core/api_version_new_52_language_features.js
index 91fe800f6d8..f37666157dd 100644
--- a/jstests/core/api_version_new_52_language_features.js
+++ b/jstests/core/api_version_new_52_language_features.js
@@ -2,7 +2,7 @@
* Tests that language features introduced in version 5.2 are included in API Version 1.
*
* @tags: [
- * requires_fcv_52,
+ * requires_fcv_60,
* uses_api_parameters,
* ]
*/
diff --git a/jstests/core/collection_uuid_coll_mod.js b/jstests/core/collection_uuid_coll_mod.js
index 52da75bb345..2e984d81713 100644
--- a/jstests/core/collection_uuid_coll_mod.js
+++ b/jstests/core/collection_uuid_coll_mod.js
@@ -57,7 +57,7 @@ assert.eq(res.actualCollection, null);
// 5. The command fails when the provided UUID corresponds to a different collection, even if the
// provided namespace does not exist.
-coll2.drop();
+assert.commandWorked(testDB.runCommand({drop: coll2.getName()}));
res = assert.commandFailedWithCode(
testDB.runCommand({collMod: coll2.getName(), collectionUUID: uuid}),
ErrorCodes.CollectionUUIDMismatch);
@@ -65,4 +65,5 @@ assert.eq(res.db, testDB.getName());
assert.eq(res.collectionUUID, uuid);
assert.eq(res.expectedCollection, coll2.getName());
assert.eq(res.actualCollection, coll.getName());
+assert(!testDB.getCollectionNames().includes(coll2.getName()));
})();
diff --git a/jstests/core/collection_uuid_drop.js b/jstests/core/collection_uuid_drop.js
index 276f591afdf..747a4b114ed 100644
--- a/jstests/core/collection_uuid_drop.js
+++ b/jstests/core/collection_uuid_drop.js
@@ -58,13 +58,14 @@ assert.eq(res.actualCollection, null);
// The command fails when the provided UUID corresponds to a different collection, even if the
// provided namespace does not exist.
-coll2.drop();
+assert.commandWorked(testDB.runCommand({drop: coll2.getName()}));
res = assert.commandFailedWithCode(testDB.runCommand({drop: coll2.getName(), collectionUUID: uuid}),
ErrorCodes.CollectionUUIDMismatch);
assert.eq(res.db, testDB.getName());
assert.eq(res.collectionUUID, uuid);
assert.eq(res.expectedCollection, coll2.getName());
assert.eq(res.actualCollection, coll.getName());
+assert(!testDB.getCollectionNames().includes(coll2.getName()));
// The command fails when the provided UUID corresponds to a different collection, even if the
// provided namespace is a view.
diff --git a/jstests/core/collection_uuid_find.js b/jstests/core/collection_uuid_find.js
index f3ce69f0e34..93d6d011266 100644
--- a/jstests/core/collection_uuid_find.js
+++ b/jstests/core/collection_uuid_find.js
@@ -56,13 +56,15 @@ assert.eq(res.actualCollection, null);
// The command fails when the provided UUID corresponds to a different collection, even if the
// provided namespace does not exist.
-coll2.drop();
+assert.commandWorkedOrFailedWithCode(testDB.runCommand({drop: coll2.getName()}),
+ ErrorCodes.NamespaceNotFound);
res = assert.commandFailedWithCode(testDB.runCommand({find: coll2.getName(), collectionUUID: uuid}),
ErrorCodes.CollectionUUIDMismatch);
assert.eq(res.db, testDB.getName());
assert.eq(res.collectionUUID, uuid);
assert.eq(res.expectedCollection, coll2.getName());
assert.eq(res.actualCollection, coll.getName());
+assert(!testDB.getCollectionNames().includes(coll2.getName()));
// The command fails when the provided UUID corresponds to a different collection, even if the
// provided namespace is a view.
diff --git a/jstests/core/collection_uuid_index_commands.js b/jstests/core/collection_uuid_index_commands.js
index a323859144c..76ee83d8336 100644
--- a/jstests/core/collection_uuid_index_commands.js
+++ b/jstests/core/collection_uuid_index_commands.js
@@ -82,10 +82,11 @@ const testCommand = function(cmd, cmdObj) {
jsTestLog("The command '" + cmd +
"' fails when the provided UUID corresponds to a different collection, even if the " +
"provided namespace does not exist.");
- coll2.drop();
+ assert.commandWorked(testDB.runCommand({drop: coll2.getName()}));
res =
assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.CollectionUUIDMismatch);
validateErrorResponse(res, testDB.getName(), uuid, coll2.getName(), coll.getName());
+ assert(!testDB.getCollectionNames().includes(coll2.getName()));
jsTestLog("Only collections in the same database are specified by actualCollection.");
const otherDB = testDB.getSiblingDB(testDB.getName() + '_2');
diff --git a/jstests/core/collection_uuid_rename_collection.js b/jstests/core/collection_uuid_rename_collection.js
index 3dc99fe98ab..85e8507c9d2 100644
--- a/jstests/core/collection_uuid_rename_collection.js
+++ b/jstests/core/collection_uuid_rename_collection.js
@@ -162,7 +162,7 @@ assert.eq(res.actualCollection, null);
// The command fails when the provided UUID corresponds to a different collection, even if the
// provided source namespace does not exist.
-coll2.drop();
+assert.commandWorked(testDB.runCommand({drop: coll2.getName()}));
res = assert.commandFailedWithCode(testDB.adminCommand({
renameCollection: coll2.getFullName(),
to: coll3.getFullName(),
@@ -174,6 +174,7 @@ assert.eq(res.db, testDB.getName());
assert.eq(res.collectionUUID, uuid(coll));
assert.eq(res.expectedCollection, coll2.getName());
assert.eq(res.actualCollection, coll.getName());
+assert(!testDB.getCollectionNames().includes(coll2.getName()));
// The collectionUUID parameter cannot be provided when renaming a collection between databases.
const otherDBColl = db.getSiblingDB(jsTestName() + '_2').coll;
diff --git a/jstests/core/collection_uuid_write_commands.js b/jstests/core/collection_uuid_write_commands.js
index 03bd0b09ae7..0ab9794df6f 100644
--- a/jstests/core/collection_uuid_write_commands.js
+++ b/jstests/core/collection_uuid_write_commands.js
@@ -57,10 +57,12 @@ var testCommand = function(cmd, cmdObj) {
jsTestLog("The command '" + cmd +
"' fails when the provided UUID corresponds to a different collection, even if the " +
"provided namespace does not exist.");
- coll2.drop();
+ assert.commandWorkedOrFailedWithCode(testDB.runCommand({drop: coll2.getName()}),
+ ErrorCodes.NamespaceNotFound);
res =
assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.CollectionUUIDMismatch);
validateErrorResponse(res, testDB.getName(), uuid, coll2.getName(), coll.getName());
+ assert(!testDB.getCollectionNames().includes(coll2.getName()));
jsTestLog("Only collections in the same database are specified by actualCollection.");
const otherDB = testDB.getSiblingDB(testDB.getName() + '_2');
@@ -75,5 +77,7 @@ var testCommand = function(cmd, cmdObj) {
testCommand("insert", {insert: "", documents: [{inserted: true}]});
testCommand("update", {update: "", updates: [{q: {_id: 0}, u: {$set: {updated: true}}}]});
+testCommand("update",
+ {update: "", updates: [{q: {_id: 0}, u: {$set: {updated: true}}, upsert: true}]});
testCommand("delete", {delete: "", deletes: [{q: {_id: 0}, limit: 1}]});
})();
diff --git a/jstests/core/geo_parse_err.js b/jstests/core/geo_parse_err.js
new file mode 100644
index 00000000000..73bc451bd7c
--- /dev/null
+++ b/jstests/core/geo_parse_err.js
@@ -0,0 +1,124 @@
+/**
+ * Test the error messages users get when creating geo objects. For example:
+ * - Do we get the error message we expect when:
+ * - We insert something of a different type than an array of doubles for coordinates?
+ * - When the number of loops in a simple polygon exceeds 1?
+ * @tags: [
+ * multiversion_incompatible
+ * ]
+ */
+
+(function() {
+"use strict";
+let t = db.geo_parse_err;
+t.drop();
+
+const indexname = "2dsphere";
+const bigCRS = {
+ type: "name",
+ properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
+};
+
+t.createIndex({loc: indexname});
+
+// parseFlatPoint
+let err = t.insert({loc: {type: "Point", coordinates: "hello"}});
+assert.includes(err.getWriteError().errmsg,
+ 'Point must be an array or object, instead got type string');
+
+err = t.insert({loc: {type: "Point", coordinates: ["hello", 5]}});
+assert.includes(err.getWriteError().errmsg,
+ "Point must only contain numeric elements, instead got type string");
+
+err = t.insert({loc: {type: "Point", coordinates: [5 / 0, 5]}});
+assert.includes(err.getWriteError().errmsg, "Point coordinates must be finite numbers");
+
+// parseGeoJSONCoordinate
+err = t.insert({loc: {type: "LineString", coordinates: [5, 5]}});
+assert.includes(err.getWriteError().errmsg,
+ "GeoJSON coordinates must be an array, instead got type double");
+
+// parseArrayOfCoordinates
+err = t.insert({loc: {type: "LineString", coordinates: 5}});
+assert.includes(err.getWriteError().errmsg,
+ "GeoJSON coordinates must be an array of coordinates, instead got type double");
+// isLoopClosed
+err = t.insert({loc: {type: "Polygon", coordinates: [[[0, 0], [1, 2], [2, 3]]]}});
+assert.includes(err.getWriteError().errmsg,
+ "Loop is not closed, first vertex does not equal last vertex:");
+
+// parseGeoJSONPolygonCoordinates
+err = t.insert({loc: {type: "Polygon", coordinates: "hi"}});
+assert.includes(err.getWriteError().errmsg,
+ "Polygon coordinates must be an array, instead got type string");
+
+err = t.insert({loc: {type: "Polygon", coordinates: [[[0, 0], [1, 2], [0, 0]]]}});
+assert.includes(err.getWriteError().errmsg,
+ "Loop must have at least 3 different vertices, 2 unique vertices were provided:");
+
+// parseBigSimplePolygonCoordinates
+err = t.insert({loc: {type: "Polygon", coordinates: "", crs: bigCRS}});
+assert.includes(err.getWriteError().errmsg,
+ "Coordinates of polygon must be an array, instead got type string");
+
+err = t.insert({
+ loc: {
+ type: "Polygon",
+ coordinates:
+ [[[10.0, 10.0], [-10.0, 10.0], [-10.0, -10.0], [10.0, -10.0], [10.0, 10.0]], []],
+ crs: bigCRS
+ }
+});
+assert.includes(err.getWriteError().errmsg,
+ "Only one simple loop is allowed in a big polygon, instead provided 2");
+err = t.insert({
+ loc: {type: "Polygon", coordinates: [[[10.0, 10.0], [-10.0, 10.0], [10.0, 10.0]]], crs: bigCRS}
+});
+assert.includes(err.getWriteError().errmsg,
+ "Loop must have at least 3 different vertices, 2 unique vertices were provided:");
+
+// parseGeoJSONCRS
+const bigPoly20 = [[[10.0, 10.0], [-10.0, 10.0], [10.0, 10.0]]];
+
+err = t.insert({loc: {type: "Polygon", coordinates: bigPoly20, crs: {type: "name"}}});
+
+assert.includes(err.getWriteError().errmsg,
+ "CRS must have field \"properties\" which is an object, instead got type missing");
+
+err = t.insert({
+ loc: {
+ type: "Polygon",
+ coordinates: bigPoly20,
+ crs: {type: "name", properties: {nam: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}}
+ }
+});
+assert.includes(err.getWriteError().errmsg,
+ "In CRS, \"properties.name\" must be a string, instead got type missing");
+
+// parseMultiPolygon
+err = t.insert({loc: {type: "MultiPolygon", coordinates: ""}});
+
+assert.includes(err.getWriteError().errmsg,
+ "MultiPolygon coordinates must be an array, instead got type string");
+
+// Geometry collection
+err = t.insert({
+ loc: {
+ type: "GeometryCollection",
+ geometries: [
+ {
+ type: "MultiPoint",
+ coordinates: [
+ [-73.9580, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.9814, 40.7681]
+ ]
+ },
+ 5
+ ]
+ }
+});
+assert.includes(err.getWriteError().errmsg,
+ "Element 1 of \"geometries\" must be an object, instead got type double:");
+})(); \ No newline at end of file
diff --git a/jstests/core/index3.js b/jstests/core/index3.js
deleted file mode 100644
index cc5ad3b0796..00000000000
--- a/jstests/core/index3.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Cannot implicitly shard accessed collections because of extra shard key index in sharded
-// collection.
-// @tags: [assumes_no_implicit_index_creation]
-
-t = db.index3;
-t.drop();
-
-assert(t.getIndexes().length == 0);
-
-t.createIndex({name: 1});
-
-t.save({name: "a"});
-
-t.createIndex({name: 1});
-
-assert(t.getIndexes().length == 2);
-
-assert(t.validate().valid);
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 3aadf9a5171..d0a15afe8f6 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -495,7 +495,7 @@ let viewsCommandTests = {
expectedErrorCode: ErrorCodes.NamespaceNotSharded,
},
moveChunk: {
- command: {moveChunk: "test.view"},
+ command: {moveChunk: "test.view", find: {}, to: "a"},
skipStandalone: true,
isAdminCommand: true,
expectFailure: true,
diff --git a/jstests/libs/cluster_server_parameter_utils.js b/jstests/libs/cluster_server_parameter_utils.js
index 2191108ab0c..072fac11d9a 100644
--- a/jstests/libs/cluster_server_parameter_utils.js
+++ b/jstests/libs/cluster_server_parameter_utils.js
@@ -6,13 +6,13 @@
* it to the end of nonTestClusterParameterNames.
* 2. Add the clusterParameter document that's expected as default to the end of
* testOnlyClusterParametersDefault if it's test-only. Otherwise, add it to the end of
- * nonTestOnlyClusterParametersDefault.
+ * nonTestClusterParametersDefault.
* 3. Add the clusterParameter document that setClusterParameter is expected to insert after its
* first invocation to the end of testOnlyClusterParametersInsert if it's test-only. Otherwise,
- * add it to the end of nonTestOnlyClusterParametersInsert.
+ * add it to the end of nonTestClusterParametersInsert.
* 4. Add the clusterParameter document that setClusterParameter is expected to update to after its
* second invocation to the end of testOnlyClusterParametersUpdate if it's test-only. Otherwise,
- * add it to the end of nonTestOnlyClusterParametersUpdate.
+ * add it to the end of nonTestClusterParametersUpdate.
*
*/
@@ -21,9 +21,7 @@ const testOnlyClusterParameterNames = [
"testIntClusterParameter",
"testBoolClusterParameter",
];
-const nonTestClusterParameterNames = [
- "changeStreamOptions",
-];
+const nonTestClusterParameterNames = ["changeStreamOptions", "changeStreams"];
const clusterParameterNames = testOnlyClusterParameterNames.concat(nonTestClusterParameterNames);
const testOnlyClusterParametersDefault = [
@@ -40,12 +38,15 @@ const testOnlyClusterParametersDefault = [
boolData: false,
},
];
-const nonTestClusterParametersDefault = [{
- _id: "changeStreamOptions",
- preAndPostImages: {
- expireAfterSeconds: "off",
+const nonTestClusterParametersDefault = [
+ {
+ _id: "changeStreamOptions",
+ preAndPostImages: {
+ expireAfterSeconds: "off",
+ },
},
-}];
+ {_id: "changeStreams", enabled: false, expireAfterSeconds: NumberLong(0)}
+];
const clusterParametersDefault =
testOnlyClusterParametersDefault.concat(nonTestClusterParametersDefault);
@@ -63,12 +64,19 @@ const testOnlyClusterParametersInsert = [
boolData: true,
},
];
-const nonTestClusterParametersInsert = [{
- _id: "changeStreamOptions",
- preAndPostImages: {
- expireAfterSeconds: 30,
+const nonTestClusterParametersInsert = [
+ {
+ _id: "changeStreamOptions",
+ preAndPostImages: {
+ expireAfterSeconds: 30,
+ },
},
-}];
+ {
+ _id: "changeStreams",
+ enabled: true,
+ expireAfterSeconds: 30,
+ }
+];
const clusterParametersInsert =
testOnlyClusterParametersInsert.concat(nonTestClusterParametersInsert);
@@ -86,12 +94,15 @@ const testOnlyClusterParametersUpdate = [
boolData: false,
},
];
-const nonTestClusterParametersUpdate = [{
- _id: "changeStreamOptions",
- preAndPostImages: {
- expireAfterSeconds: "off",
+const nonTestClusterParametersUpdate = [
+ {
+ _id: "changeStreamOptions",
+ preAndPostImages: {
+ expireAfterSeconds: "off",
+ },
},
-}];
+ {_id: "changeStreams", enabled: false, expireAfterSeconds: NumberLong(0)}
+];
const clusterParametersUpdate =
testOnlyClusterParametersUpdate.concat(nonTestClusterParametersUpdate);
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index f0bf0ced476..f08ab644b98 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -76,29 +76,7 @@ var getResult = function(mongo, name) {
return mongo.getCollection("config.testResult").findOne({_id: name});
};
-/**
- * Overrides the parallel shell code in mongo
- */
-function startParallelShell(jsCode, port) {
- if (TestData) {
- jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
- }
-
- var x;
- if (port) {
- x = startMongoProgramNoConnect("mongo", "--port", port, "--eval", jsCode);
- } else {
- x = startMongoProgramNoConnect("mongo", "--eval", jsCode, db ? db.getMongo().host : null);
- }
-
- return function() {
- jsTestLog("Waiting for shell " + x + "...");
- waitProgram(x);
- jsTestLog("Shell " + x + " finished.");
- };
-}
-
-startParallelOps = function(mongo, proc, args, context) {
+var startParallelOps = function(mongo, proc, args, context) {
var procName = proc.name + "-" + new ObjectId();
var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
.getTimestamp()
@@ -201,31 +179,24 @@ startParallelOps = function(mongo, proc, args, context) {
db = oldDB;
- var join = function() {
+ var join = function(options = {}) {
+ const {checkExitSuccess = true} = options;
+ delete options.checkExitSuccess;
setFinished(mongo, procName, true);
- rawJoin();
+ rawJoin(options);
+
result = getResult(mongo, procName);
assert.neq(result, null);
- if (result.err)
+ if (!checkExitSuccess) {
+ return result;
+ } else if (checkExitSuccess && result.err) {
throw Error("Error in parallel ops " + procName + " : " + tojson(result.err));
-
- else
+ } else {
return result.result;
- };
-
- join.isFinished = function() {
- return isFinished(mongo, procName);
- };
-
- join.setFinished = function(finished) {
- return setFinished(mongo, procName, finished);
- };
-
- join.waitForLock = function(name) {
- return waitForLock(mongo, name);
+ }
};
return join;
diff --git a/jstests/noPassthrough/change_streams_cluster_parameter.js b/jstests/noPassthrough/change_streams_cluster_parameter.js
new file mode 100644
index 00000000000..e76ed12cb04
--- /dev/null
+++ b/jstests/noPassthrough/change_streams_cluster_parameter.js
@@ -0,0 +1,146 @@
+// Tests the 'changeStreams' cluster-wide configuration parameter on the replica sets and the
+// sharded cluster.
+// @tags: [
+// featureFlagClusterWideConfig,
+// requires_replication,
+// requires_sharding,
+// multiversion_incompatible,
+// featureFlagServerlessChangeStreams
+// featureFlagMongoStore
+// ]
+(function() {
+"use strict";
+
+// Verifies that the 'getClusterParameter' on the 'changeStreams' cluster-wide parameter returns the
+// expected response.
+function assertGetResponse(db, expectedChangeStreamParam) {
+ const response = assert.commandWorked(db.runCommand({getClusterParameter: "changeStreams"}));
+ const enabled = response.clusterParameters[0].enabled;
+ assert.eq(enabled, expectedChangeStreamParam.enabled, response);
+ if (enabled) {
+ // TODO SERVER-67145: For some reason the default 'expireAfterSeconds' is not serialized in
+ // mongoS.
+ assert.eq(response.clusterParameters[0].expireAfterSeconds,
+ expectedChangeStreamParam.expireAfterSeconds,
+ response);
+ }
+}
+
+// Tests the 'changeStreams' cluster-wide configuration parameter with the 'admin' database.
+function testWithAdminDB(conn) {
+ const adminDB = conn.getDB("admin");
+
+ // Change streams are initialy disabled.
+ assertGetResponse(adminDB, {enabled: false, expireAfterSeconds: NumberLong(0)});
+
+ // TODO SERVER-67293: Make 'enabled' field requiered; setting 'changeStreams' parameter without
+ // 'enabled' field should fail.
+ // TODO SERVER-67146: The expected error on missing 'enabled' field should be 'BadValue' or
+ // 'InvaludClusterParameter'.
+
+ // Invalid string value for the 'enabled' parameter should fail.
+ assert.commandFailedWithCode(
+ adminDB.runCommand({setClusterParameter: {changeStreams: {enabled: "yes"}}}),
+ ErrorCodes.TypeMismatch);
+
+ // Enabling change streams without 'expireAfterSeconds' should fail.
+ assert.commandFailedWithCode(
+ adminDB.runCommand({setClusterParameter: {changeStreams: {enabled: true}}}),
+ ErrorCodes.BadValue);
+
+ // Invalid string value for the 'expireAfterSeconds' parameter should fail.
+ assert.commandFailedWithCode(
+ adminDB.runCommand(
+ {setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: "off"}}}),
+ ErrorCodes.TypeMismatch);
+
+ // A negative value of 'expireAfterSeconds' should fail.
+ assert.commandFailedWithCode(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(-1)}}
+ }),
+ ErrorCodes.BadValue);
+
+ // A zero value of 'expireAfterSeconds' should fail.
+ assert.commandFailedWithCode(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(0)}}
+ }),
+ ErrorCodes.BadValue);
+
+ // Enabling change streams with success.
+ assert.commandWorked(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(3600)}}
+ }));
+ assertGetResponse(adminDB, {enabled: true, expireAfterSeconds: NumberLong(3600)});
+
+ // Modifying expireAfterSeconds while enabled should succeed.
+ assert.commandWorked(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(100)}}
+ }));
+ assertGetResponse(adminDB, {enabled: true, expireAfterSeconds: NumberLong(100)});
+
+ // Disabling with (non-zero) 'expireAfterSeconds' should fail.
+ assert.commandFailedWithCode(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: false, expireAfterSeconds: NumberLong(1)}}
+ }),
+ ErrorCodes.BadValue);
+
+ // Disabling without 'expireAfterSeconds' should succeed.
+ assert.commandWorked(
+ adminDB.runCommand({setClusterParameter: {changeStreams: {enabled: false}}}));
+ assertGetResponse(adminDB, {enabled: false, expireAfterSeconds: NumberLong(0)});
+
+ // Disabling again should succeed.
+ assert.commandWorked(
+ adminDB.runCommand({setClusterParameter: {changeStreams: {enabled: false}}}));
+ assertGetResponse(adminDB, {enabled: false, expireAfterSeconds: NumberLong(0)});
+}
+
+function testWithoutAdminDB(conn) {
+ const db = conn.getDB(jsTestName());
+ assert.commandFailedWithCode(db.runCommand({getClusterParameter: "changeStreams"}),
+ ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(db.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(3600)}}
+ }),
+ ErrorCodes.Unauthorized);
+}
+
+// Tests the set and get change streams parameter on the replica-set.
+{
+ const rst = new ReplSetTest({name: "replSet", nodes: 2});
+ rst.startSet();
+ rst.initiate();
+
+ const primary = rst.getPrimary();
+ const secondary = rst.getSecondaries()[0];
+
+ // Verify that the set and get commands cannot be issued on database other than the 'admin'.
+ [primary, secondary].forEach(conn => {
+ testWithoutAdminDB(conn);
+ });
+
+ // Tests the set and get commands on the primary node.
+ testWithAdminDB(primary);
+
+ rst.stopSet();
+}
+
+// Tests the set and get change streams parameter on the sharded cluster.
+{
+ const st = new ShardingTest({shards: 1, mongos: 1});
+ const adminDB = st.rs0.getPrimary().getDB("admin");
+
+ // Test that setClusterParameter cannot be issued directly on shards in the sharded cluster,
+ // while getClusterParameter can.
+ assert.commandFailedWithCode(adminDB.runCommand({
+ setClusterParameter: {changeStreams: {enabled: true, expireAfterSeconds: NumberLong(3600)}}
+ }),
+ ErrorCodes.NotImplemented);
+ assertGetResponse(adminDB, {enabled: false, expireAfterSeconds: NumberLong(0)});
+
+ // Run the set and get commands on the mongoS.
+ testWithAdminDB(st.s);
+
+ st.stop();
+}
+}());
diff --git a/jstests/noPassthrough/create_indexes_in_txn_errors_if_already_in_progress.js b/jstests/noPassthrough/create_indexes_in_txn_errors_if_already_in_progress.js
index ff4f0480d2a..6a465fc1d2e 100644
--- a/jstests/noPassthrough/create_indexes_in_txn_errors_if_already_in_progress.js
+++ b/jstests/noPassthrough/create_indexes_in_txn_errors_if_already_in_progress.js
@@ -76,14 +76,15 @@ try {
"Starting a parallel shell to run a transaction with a second index build request...");
joinSecondIndexBuild = startParallelShell(
funWithArgs(runFailedIndexBuildInTxn, dbName, collName, indexSpecB, 2), primary.port);
-
+ // We wait to observe the second attempt to build the index fails while the
+ // hangAfterSettingUpIndexBuild is preventing the first attempt from completing successfully.
+ joinSecondIndexBuild();
} finally {
assert.commandWorked(
testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
}
joinFirstIndexBuild();
-joinSecondIndexBuild();
// We should have the _id index and the 'the_b_1_index' index just built.
assert.eq(testColl.getIndexes().length, 2);
diff --git a/jstests/noPassthrough/plan_cache_group_lookup.js b/jstests/noPassthrough/plan_cache_group_lookup.js
new file mode 100644
index 00000000000..c23b7cfd566
--- /dev/null
+++ b/jstests/noPassthrough/plan_cache_group_lookup.js
@@ -0,0 +1,236 @@
+/**
+ * Test that plans with $group and $lookup lowered to SBE are cached and invalidated correctly.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+
+const conn = MongoRunner.runMongod();
+const db = conn.getDB("test");
+const coll = db.plan_cache_pipeline;
+const foreignColl = db.plan_cache_pipeline_foreign;
+
+if (!checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is not enabled");
+ MongoRunner.stopMongod(conn);
+ return;
+}
+
+const sbeFullEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const sbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, a1: 1}));
+assert.commandWorked(coll.createIndex({a: 1, a2: 1}));
+function setupForeignColl(index) {
+ foreignColl.drop();
+ assert.commandWorked(foreignColl.insert({b: 1}));
+ if (index) {
+ assert.commandWorked(foreignColl.createIndex(index));
+ }
+}
+assert.commandWorked(db.setProfilingLevel(2));
+
+/**
+ * Assert that the last aggregation command has a corresponding plan cache entry with the desired
+ * properties. 'version' is 1 if it's classic cache, 2 if it's SBE cache. 'isActive' is true if the
+ * cache entry is active. 'fromMultiPlanner' is true if the query part of aggregation has been
+ * multi-planned. 'forcesClassicEngine' is true if the query is forced to use classic engine.
+ */
+function assertCacheUsage({version, fromMultiPlanner, isActive, forcesClassicEngine = false}) {
+ const profileObj = getLatestProfilerEntry(
+ db, {op: "command", "command.pipeline": {$exists: true}, ns: coll.getFullName()});
+ assert.eq(fromMultiPlanner, !!profileObj.fromMultiPlanner, profileObj);
+
+ const entries = coll.getPlanCache().list();
+ assert.eq(entries.length, 1, entries);
+ const entry = entries[0];
+ assert.eq(entry.version, version, entry);
+ assert.eq(entry.isActive, isActive, entry);
+ assert.eq(entry.planCacheKey, profileObj.planCacheKey, entry);
+
+ const explain = coll.explain().aggregate(profileObj.command.pipeline);
+ const queryPlanner = explain.hasOwnProperty("queryPlanner")
+ ? explain.queryPlanner
+ : explain.stages[0].$cursor.queryPlanner;
+ if (!forcesClassicEngine) {
+ assert(queryPlanner.winningPlan.hasOwnProperty("slotBasedPlan"), explain);
+ }
+ assert.eq(queryPlanner.planCacheKey, entry.planCacheKey, explain);
+
+ return entry;
+}
+
+/**
+ * Run the pipeline three times, assert that we have the following plan cache entries of "version".
+ * 1. The pipeline runs from the multi-planner, saving an inactive cache entry.
+ * 2. The pipeline runs from the multi-planner, activating the cache entry.
+ * 3. The pipeline runs from cached solution planner, using the active cache entry.
+ */
+function testLoweredPipeline({pipeline, version, forcesClassicEngine = false}) {
+ let results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, 1, results);
+ const entry = assertCacheUsage(
+ {version: version, fromMultiPlanner: true, isActive: false, forcesClassicEngine});
+
+ results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, 1, results);
+ let nextEntry = assertCacheUsage(
+ {version: version, fromMultiPlanner: true, isActive: true, forcesClassicEngine});
+ assert.eq(entry.planCacheKey, nextEntry.planCacheKey, {entry, nextEntry});
+
+ results = coll.aggregate(pipeline).toArray();
+ assert.eq(results.length, 1, results);
+ nextEntry = assertCacheUsage(
+ {version: version, fromMultiPlanner: false, isActive: true, forcesClassicEngine});
+ assert.eq(entry.planCacheKey, nextEntry.planCacheKey, {entry, nextEntry});
+
+ return nextEntry;
+}
+
+const multiPlanningQueryStage = {
+ $match: {a: 1}
+};
+const lookupStage = {
+ $lookup: {from: foreignColl.getName(), localField: "a", foreignField: "b", as: "matched"}
+};
+const groupStage = {
+ $group: {_id: "$a", out: {"$sum": 1}}
+};
+
+(function testLoweredPipelineCombination() {
+ setupForeignColl();
+
+ coll.getPlanCache().clear();
+ testLoweredPipeline(
+ {pipeline: [multiPlanningQueryStage, lookupStage], version: sbePlanCacheEnabled ? 2 : 1});
+
+ // TODO SERVER-61507: Update tests on $group when it's integrated to the SBE cache.
+ coll.getPlanCache().clear();
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, groupStage], version: 1});
+
+ coll.getPlanCache().clear();
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage, groupStage], version: 1});
+
+ coll.getPlanCache().clear();
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, groupStage, lookupStage], version: 1});
+})();
+
+(function testPartiallyLoweredPipeline() {
+ coll.getPlanCache().clear();
+ setupForeignColl();
+ testLoweredPipeline({
+ pipeline: [multiPlanningQueryStage, lookupStage, {$_internalInhibitOptimization: {}}],
+ version: sbePlanCacheEnabled ? 2 : 1
+ });
+})();
+
+(function testNonExistentForeignCollectionCache() {
+ if (!sbePlanCacheEnabled) {
+ jsTestLog(
+ "Skipping testNonExistentForeignCollectionCache when SBE plan cache is not enabled");
+ return;
+ }
+
+ coll.getPlanCache().clear();
+ foreignColl.drop();
+ const entryWithoutForeignColl =
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+
+ coll.getPlanCache().clear();
+ setupForeignColl();
+ const entryWithForeignColl =
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+
+ assert.neq(entryWithoutForeignColl.planCacheKey,
+ entryWithForeignColl.planCacheKey,
+ {entryWithoutForeignColl, entryWithForeignColl});
+ assert.eq(entryWithoutForeignColl.queryHash,
+ entryWithForeignColl.queryHash,
+ {entryWithoutForeignColl, entryWithForeignColl});
+})();
+
+(function testForeignCollectionDropCacheInvalidation() {
+ if (!sbePlanCacheEnabled) {
+ jsTestLog(
+ "Skipping testForeignCollectionDropCacheInvalidation when SBE plan cache is not enabled");
+ return;
+ }
+
+ coll.getPlanCache().clear();
+ setupForeignColl();
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+
+ foreignColl.drop();
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+})();
+
+(function testForeignIndexDropCacheInvalidation() {
+ if (!sbePlanCacheEnabled) {
+ jsTestLog(
+ "Skipping testForeignIndexDropCacheInvalidation when SBE plan cache is not enabled");
+ return;
+ }
+
+ coll.getPlanCache().clear();
+ setupForeignColl({b: 1} /* index */);
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+
+ assert.commandWorked(foreignColl.dropIndex({b: 1}));
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+})();
+
+(function testForeignIndexBuildCacheInvalidation() {
+ if (!sbePlanCacheEnabled) {
+ jsTestLog(
+ "Skipping testForeignIndexBuildCacheInvalidation when SBE plan cache is not enabled");
+ return;
+ }
+
+ coll.getPlanCache().clear();
+ setupForeignColl({b: 1} /* index */);
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+
+ assert.commandWorked(foreignColl.createIndex({c: 1}));
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+})();
+
+(function testLookupSbeAndClassicPlanCacheKey() {
+ if (!sbeFullEnabled || !sbePlanCacheEnabled) {
+ jsTestLog(
+ "Skipping testLookupWithClassicPlanCache when SBE full or SBE plan cache is not enabled");
+ return;
+ }
+
+ setupForeignColl({b: 1} /* index */);
+
+ // When using SBE engine, the plan cache key of $match vs. $match + $lookup should be different.
+ coll.getPlanCache().clear();
+ let matchEntry = testLoweredPipeline({pipeline: [multiPlanningQueryStage], version: 2});
+
+ coll.getPlanCache().clear();
+ let lookupEntry =
+ testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
+ assert.neq(matchEntry.planCacheKey, lookupEntry.planCacheKey, {matchEntry, lookupEntry});
+
+ // When using classic engine, the plan cache key of $match vs. $match + $lookup should be the
+ // same.
+ assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: true}));
+
+ coll.getPlanCache().clear();
+ matchEntry = testLoweredPipeline(
+ {pipeline: [multiPlanningQueryStage], version: 1, forcesClassicEngine: true});
+
+ coll.getPlanCache().clear();
+ lookupEntry = testLoweredPipeline(
+ {pipeline: [multiPlanningQueryStage, lookupStage], version: 1, forcesClassicEngine: true});
+ assert.eq(matchEntry.planCacheKey, lookupEntry.planCacheKey, {matchEntry, lookupEntry});
+
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: false}));
+})();
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/plan_cache_replan_group_lookup.js b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
index e6b2dd13abd..8b0dee3cb2c 100644
--- a/jstests/noPassthrough/plan_cache_replan_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
@@ -19,32 +19,37 @@ const coll = db.plan_cache_replan_group_lookup;
const foreignCollName = "foreign";
coll.drop();
+const sbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const sbeFullEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+
function getPlansForCacheEntry(match) {
const matchingCacheEntries = coll.getPlanCache().list([{$match: match}]);
assert.eq(matchingCacheEntries.length, 1, coll.getPlanCache().list());
return matchingCacheEntries[0];
}
-function planHasIxScanStageForKey(planStats, keyPattern) {
+function planHasIxScanStageForIndex(planStats, indexName) {
const stage = getPlanStage(planStats, "IXSCAN");
if (stage === null) {
return false;
}
- return bsonWoCompare(keyPattern, stage.keyPattern) === 0;
+ return indexName === stage.indexName;
}
-function assertCacheUsage(
- multiPlanning, cacheEntryIsActive, cachedIndex, pipeline, aggOptions = {}) {
+function assertCacheUsage(multiPlanning,
+ cacheEntryVersion,
+ cacheEntryIsActive,
+ cachedIndexName,
+ pipeline,
+ aggOptions = {}) {
const profileObj = getLatestProfilerEntry(db, {op: "command", ns: coll.getFullName()});
const queryHash = profileObj.queryHash;
const planCacheKey = profileObj.planCacheKey;
assert.eq(multiPlanning, !!profileObj.fromMultiPlanner);
const entry = getPlansForCacheEntry({queryHash: queryHash});
- // TODO(SERVER-61507): Convert the assertion to SBE cache once lowered $lookup integrates
- // with SBE plan cache.
- assert.eq(entry.version, 1);
+ assert.eq(cacheEntryVersion, entry.version);
assert.eq(cacheEntryIsActive, entry.isActive);
// If the entry is active, we should have a plan cache key.
@@ -59,7 +64,11 @@ function assertCacheUsage(
: explain.stages[0].$cursor.queryPlanner.planCacheKey;
assert.eq(explainKey, entry.planCacheKey);
}
- assert.eq(planHasIxScanStageForKey(getCachedPlan(entry.cachedPlan), cachedIndex), true, entry);
+ if (cacheEntryVersion === 2) {
+ assert(entry.cachedPlan.stages.includes(cachedIndexName), entry);
+ } else {
+ assert(planHasIxScanStageForIndex(getCachedPlan(entry.cachedPlan), cachedIndexName), entry);
+ }
}
assert.commandWorked(db.setProfilingLevel(2));
@@ -79,22 +88,35 @@ for (let i = 1000; i < 1100; i++) {
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1}));
-function setUpActiveCacheEntry(pipeline, cachedIndex) {
+function setUpActiveCacheEntry(pipeline, cacheEntryVersion, cachedIndexName) {
// For the first run, the query should go through multiplanning and create inactive cache entry.
assert.eq(2, coll.aggregate(pipeline).toArray()[0].n);
- assertCacheUsage(true /*multiPlanning*/, false /*cacheEntryIsActive*/, cachedIndex, pipeline);
+ assertCacheUsage(true /*multiPlanning*/,
+ cacheEntryVersion,
+ false /*cacheEntryIsActive*/,
+ cachedIndexName,
+ pipeline);
// After the second run, the inactive cache entry should be promoted to an active entry.
assert.eq(2, coll.aggregate(pipeline).toArray()[0].n);
- assertCacheUsage(true /*multiPlanning*/, true /*cacheEntryIsActive*/, cachedIndex, pipeline);
+ assertCacheUsage(true /*multiPlanning*/,
+ cacheEntryVersion,
+ true /*cacheEntryIsActive*/,
+ cachedIndexName,
+ pipeline);
// For the third run, the active cached query should be used.
assert.eq(2, coll.aggregate(pipeline).toArray()[0].n);
- assertCacheUsage(false /*multiPlanning*/, true /*cacheEntryIsActive*/, cachedIndex, pipeline);
+ assertCacheUsage(false /*multiPlanning*/,
+ cacheEntryVersion,
+ true /*cacheEntryIsActive*/,
+ cachedIndexName,
+ pipeline);
}
function testFn(aIndexPipeline,
bIndexPipeline,
+ cacheEntryVersion,
setUpFn = undefined,
tearDownFn = undefined,
explainFn = undefined) {
@@ -107,7 +129,7 @@ function testFn(aIndexPipeline,
explainFn(bIndexPipeline);
}
- setUpActiveCacheEntry(aIndexPipeline, {a: 1} /* cachedIndex */);
+ setUpActiveCacheEntry(aIndexPipeline, cacheEntryVersion, "a_1" /* cachedIndexName */);
// Now run the other pipeline, which has the same query shape but is faster with a different
// index. It should trigger re-planning of the query.
@@ -115,15 +137,17 @@ function testFn(aIndexPipeline,
// The other pipeline again, The cache should be used now.
assertCacheUsage(true /*multiPlanning*/,
+ cacheEntryVersion,
true /*cacheEntryIsActive*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
bIndexPipeline);
// Run it once again so that the cache entry is reused.
assert.eq(3, coll.aggregate(bIndexPipeline).toArray()[0].n);
assertCacheUsage(false /*multiPlanning*/,
+ cacheEntryVersion,
true /*cacheEntryIsActive*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
bIndexPipeline);
if (tearDownFn) {
@@ -144,7 +168,9 @@ const bIndexPredicate = [{$match: {a: 1, b: 1042}}];
// $group tests.
const groupSuffix = [{$group: {_id: "$c"}}, {$count: "n"}];
-testFn(aIndexPredicate.concat(groupSuffix), bIndexPredicate.concat(groupSuffix));
+testFn(aIndexPredicate.concat(groupSuffix),
+ bIndexPredicate.concat(groupSuffix),
+ 1 /* cacheEntryVersion */);
// $lookup tests.
const lookupStage =
@@ -188,9 +214,13 @@ function verifyCorrectLookupAlgorithmUsed(targetJoinAlgorithm, pipeline, aggOpti
}
}
+// TODO SERVER-61507: The following test cases are $lookup followed by $group. Update them when
+// $group is integrated with SBE plan cache.
+//
// NLJ.
testFn(aLookup,
bLookup,
+ 1 /* cacheEntryVersion */,
createLookupForeignColl,
dropLookupForeignColl,
(pipeline) =>
@@ -199,6 +229,7 @@ testFn(aLookup,
// INLJ.
testFn(aLookup,
bLookup,
+ 1 /* cacheEntryVersion */,
() => {
createLookupForeignColl();
assert.commandWorked(db[foreignCollName].createIndex({foreignKey: 1}));
@@ -208,7 +239,7 @@ testFn(aLookup,
verifyCorrectLookupAlgorithmUsed("IndexedLoopJoin", pipeline, {allowDiskUse: false}));
// HJ.
-testFn(aLookup, bLookup, () => {
+testFn(aLookup, bLookup, 1 /* cacheEntryVersion */, () => {
createLookupForeignColl();
}, dropLookupForeignColl, (pipeline) => verifyCorrectLookupAlgorithmUsed("HashJoin", pipeline, {
allowDiskUse: true
@@ -221,29 +252,38 @@ testFn(aLookup, bLookup, () => {
createLookupForeignColl();
assert.commandWorked(db[foreignCollName].createIndex({foreignKey: 1}));
verifyCorrectLookupAlgorithmUsed("IndexedLoopJoin", aLookup, {allowDiskUse: true});
-setUpActiveCacheEntry(aLookup, {a: 1} /* cachedIndex */);
+setUpActiveCacheEntry(aLookup, 1 /* cacheEntryVersion */, "a_1" /* cachedIndexName */);
// Drop the index. This should result in using the active plan, but switching to HJ.
assert.commandWorked(db[foreignCollName].dropIndex({foreignKey: 1}));
verifyCorrectLookupAlgorithmUsed("HashJoin", aLookup, {allowDiskUse: true});
assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
-assertCacheUsage(
- false /*multiPlanning*/, true /*cacheEntryIsActive*/, {a: 1} /*cachedIndex*/, aLookup);
+assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
+ true /*cacheEntryIsActive*/,
+ "a_1" /*cachedIndexName*/,
+ aLookup);
// Set 'allowDiskUse' to 'false'. This should still result in using the active plan, but switching
// to NLJ.
verifyCorrectLookupAlgorithmUsed("NestedLoopJoin", aLookup, {allowDiskUse: false});
assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
-assertCacheUsage(
- false /*multiPlanning*/, true /*cacheEntryIsActive*/, {a: 1} /*cachedIndex*/, aLookup);
+assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
+ true /*cacheEntryIsActive*/,
+ "a_1" /*cachedIndexName*/,
+ aLookup);
// Drop the foreign collection. This should still result in using the active plan with a special
// empty collection plan.
dropLookupForeignColl();
verifyCorrectLookupAlgorithmUsed("NonExistentForeignCollection", aLookup, {allowDiskUse: true});
assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
-assertCacheUsage(
- false /*multiPlanning*/, true /*cacheEntryIsActive*/, {a: 1} /*cachedIndex*/, aLookup);
+assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
+ true /*cacheEntryIsActive*/,
+ "a_1" /*cachedIndexName*/,
+ aLookup);
// Verify that changing the plan for the right side does not trigger a replan.
const foreignColl = db[foreignCollName];
@@ -280,15 +320,17 @@ verifyCorrectLookupAlgorithmUsed(
runLookupQuery({allowDiskUse: false});
assertCacheUsage(true /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
false /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: false});
runLookupQuery({allowDiskUse: false});
assertCacheUsage(true /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: false});
@@ -300,17 +342,39 @@ assert.commandWorked(foreignColl.dropIndex({c: 1}));
verifyCorrectLookupAlgorithmUsed(
"NestedLoopJoin", avoidReplanLookupPipeline, {allowDiskUse: false});
+// If SBE plan cache is enabled, after dropping index, the $lookup plan cache will be invalidated.
+// We will need to rerun the multi-planner.
+if (sbePlanCacheEnabled) {
+ runLookupQuery({allowDiskUse: false});
+ assertCacheUsage(true /*multiPlanning*/,
+ sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ false /*activeCacheEntry*/,
+ "b_1" /*cachedIndexName*/,
+ avoidReplanLookupPipeline,
+ {allowDiskUse: false});
+
+ runLookupQuery({allowDiskUse: false});
+ assertCacheUsage(true /*multiPlanning*/,
+ sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ true /*activeCacheEntry*/,
+ "b_1" /*cachedIndexName*/,
+ avoidReplanLookupPipeline,
+ {allowDiskUse: false});
+}
+
runLookupQuery({allowDiskUse: false});
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled && sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: false});
runLookupQuery({allowDiskUse: false});
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled && sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: false});
@@ -318,16 +382,38 @@ assertCacheUsage(false /*multiPlanning*/,
// replanning the cached query.
verifyCorrectLookupAlgorithmUsed("HashJoin", avoidReplanLookupPipeline, {allowDiskUse: true});
+// If SBE plan cache is enabled, using different 'allowDiskUse' option will result in
+// different plan cache key.
+if (sbePlanCacheEnabled) {
+ runLookupQuery({allowDiskUse: true});
+ assertCacheUsage(true /*multiPlanning*/,
+ 2 /* cacheEntryVersion */,
+ false /*activeCacheEntry*/,
+ "b_1" /*cachedIndexName*/,
+ avoidReplanLookupPipeline,
+ {allowDiskUse: true});
+
+ runLookupQuery({allowDiskUse: true});
+ assertCacheUsage(true /*multiPlanning*/,
+ 2 /* cacheEntryVersion */,
+ true /*activeCacheEntry*/,
+ "b_1" /*cachedIndexName*/,
+ avoidReplanLookupPipeline,
+ {allowDiskUse: true});
+}
+
runLookupQuery({allowDiskUse: true});
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: true});
runLookupQuery({allowDiskUse: true});
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: true});
@@ -351,23 +437,27 @@ verifyCorrectLookupAlgorithmUsed("IndexedLoopJoin", avoidReplanLookupPipeline);
// Set up an active cache entry.
runLookupQuery();
assertCacheUsage(true /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
false /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(true /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ sbePlanCacheEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
// Disable $lookup pushdown. This should not invalidate the cache entry, but it should prevent
@@ -380,7 +470,7 @@ let explain = coll.explain().aggregate(avoidReplanLookupPipeline);
const eqLookupNodes = getAggPlanStages(explain, "EQ_LOOKUP");
assert.eq(eqLookupNodes.length, 0, "expected no EQ_LOOKUP nodes; got " + tojson(explain));
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (sbePlanCacheEnabled) {
runLookupQuery();
const profileObj = getLatestProfilerEntry(db, {op: "command", ns: coll.getFullName()});
const matchingCacheEntries =
@@ -390,13 +480,15 @@ if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
// When the SBE plan cache is disabled, we will be able to reuse the same cache entry.
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
}
@@ -415,23 +507,27 @@ if (checkSBEEnabled(db)) {
// Set up an active cache entry.
runGroupQuery();
assertCacheUsage(true /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
false /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
runGroupQuery();
assertCacheUsage(true /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
runGroupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
runGroupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
// Disable $group pushdown. This should not invalidate the cache entry, but it should prevent $group
@@ -443,7 +539,7 @@ explain = coll.explain().aggregate(avoidReplanLookupPipeline);
groupNodes = getAggPlanStages(explain, "GROUP");
assert.eq(groupNodes.length, 0);
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (sbePlanCacheEnabled) {
runGroupQuery();
const profileObj = getLatestProfilerEntry(db, {op: "command", ns: coll.getFullName()});
const matchingCacheEntries =
@@ -453,13 +549,15 @@ if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
// When the SBE plan cache is disabled, we will be able to reuse the same cache entry.
runGroupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
runGroupQuery();
assertCacheUsage(false /*multiPlanning*/,
+ 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
- {b: 1} /*cachedIndex*/,
+ "b_1" /*cachedIndexName*/,
avoidReplanGroupPipeline);
}
diff --git a/jstests/replsets/cluster_server_parameter_commands_replset.js b/jstests/replsets/cluster_server_parameter_commands_replset.js
index ccf809765fa..79e1356f47c 100644
--- a/jstests/replsets/cluster_server_parameter_commands_replset.js
+++ b/jstests/replsets/cluster_server_parameter_commands_replset.js
@@ -3,7 +3,8 @@
*
* @tags: [
* does_not_support_stepdowns,
- * requires_replication
+ * requires_replication,
+ * multiversion_incompatible
* ]
*/
(function() {
diff --git a/jstests/sharding/cluster_server_parameter_commands_sharded.js b/jstests/sharding/cluster_server_parameter_commands_sharded.js
index 0b1c0a47c3b..c1547d0c3ac 100644
--- a/jstests/sharding/cluster_server_parameter_commands_sharded.js
+++ b/jstests/sharding/cluster_server_parameter_commands_sharded.js
@@ -4,7 +4,8 @@
* @tags: [
* does_not_support_stepdowns,
* requires_replication,
- * requires_sharding
+ * requires_sharding,
+ * multiversion_incompatible
* ]
*/
(function() {
diff --git a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
index 4be26f1c18d..09885c4b3d7 100644
--- a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
+++ b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
@@ -133,9 +133,11 @@ function testMoveChunk(shardKey) {
// Error if either of the bounds is not a valid shard key.
assert.commandFailedWithCode(
- st.s0.adminCommand({moveChunk: ns, bounds: [NaN, aChunk.max], to: shard1}), 10065);
+ st.s0.adminCommand({moveChunk: ns, bounds: [NaN, aChunk.max], to: shard1}),
+ ErrorCodes.TypeMismatch);
assert.commandFailedWithCode(
- st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, NaN], to: shard1}), 10065);
+ st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, NaN], to: shard1}),
+ ErrorCodes.TypeMismatch);
assert.commandWorked(
st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
diff --git a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
index 9753683ebb3..a805b82883a 100644
--- a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
+++ b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
@@ -58,7 +58,7 @@ let logPrefix = "While the shard is not part of a sharded cluster: ";
let shardServer = new ReplSetTest(
{name: "shardServer", nodes: 1, nodeOptions: {shardsvr: ""}, useHostName: true});
shardServer.startSet();
-shardServer.initiate();
+shardServer.initiateWithHighElectionTimeout();
jsTestLog(logPrefix + "Adding an arbiter node that will change IDWC to (w:1) should succeed.");
let arbiter = shardServer.add();
@@ -78,7 +78,7 @@ logPrefix = "While the shard is part of a sharded cluster: ";
shardServer = new ReplSetTest(
{name: "shardServer", nodes: 1, nodeOptions: {shardsvr: ""}, useHostName: true});
shardServer.startSet();
-shardServer.initiate();
+shardServer.initiateWithHighElectionTimeout();
const st = new ShardingTest({shards: 0, mongos: 1});
var admin = st.getDB('admin');
diff --git a/jstests/sharding/recover_multiple_migrations_on_stepup.js b/jstests/sharding/recover_multiple_migrations_on_stepup.js
index 03095e58864..828dac143cf 100644
--- a/jstests/sharding/recover_multiple_migrations_on_stepup.js
+++ b/jstests/sharding/recover_multiple_migrations_on_stepup.js
@@ -57,9 +57,8 @@ joinMoveChunk1();
// Start a second migration on a different collection, wait until it persists it's recovery document
// and then step down the donor.
var moveChunkHangAtStep3Failpoint = configureFailPoint(st.rs0.getPrimary(), "moveChunkHangAtStep3");
-// NOTE: The test doesn't join this parallel migration to avoid the check on its outcome,
-// which is not deterministic when executed in a configsvr stepdown suite (SERVER-62419)
-moveChunkParallel(staticMongod, st.s0.host, {_id: 0}, null, nsB, st.shard1.shardName);
+var joinMoveChunk2 =
+ moveChunkParallel(staticMongod, st.s0.host, {_id: 0}, null, nsB, st.shard1.shardName);
moveChunkHangAtStep3Failpoint.wait();
@@ -74,6 +73,9 @@ assert.eq(2, st.rs0.getPrimary().getDB('config')['migrationCoordinators'].countD
// Stepdown the donor shard
assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 5, force: true}));
moveChunkHangAtStep3Failpoint.off();
+// NOTE: checkExitSuccess is false because the outcome is not deterministic when executed in a
+// configsvr stepdown suite (SERVER-62419)
+joinMoveChunk2({checkExitSuccess: false});
// Check that the donor shard has been able to recover the shard version for both collections.
assert.eq(0, collA.find().itcount());
diff --git a/src/mongo/client/client_deprecated.cpp b/src/mongo/client/client_deprecated.cpp
index e8bbf0542f3..2df1d1be3ac 100644
--- a/src/mongo/client/client_deprecated.cpp
+++ b/src/mongo/client/client_deprecated.cpp
@@ -201,74 +201,6 @@ Status initFindFromOpQueryObj(const BSONObj& querySettings, FindCommandRequest*
} // namespace
-const BSONField<BSONObj> Query::ReadPrefField("$readPreference");
-
-void Query::makeComplex() {
- if (isComplex())
- return;
- BSONObjBuilder b;
- b.append("query", obj);
- obj = b.obj();
-}
-
-Query& Query::sort(const BSONObj& s) {
- appendComplex("orderby", s);
- return *this;
-}
-
-Query& Query::hint(BSONObj keyPattern) {
- appendComplex("$hint", keyPattern);
- return *this;
-}
-
-Query& Query::readPref(ReadPreference pref, const BSONArray& tags) {
- appendComplex(ReadPrefField.name().c_str(),
- ReadPreferenceSetting(pref, TagSet(tags)).toInnerBSON());
- return *this;
-}
-
-bool Query::isComplex(bool* hasDollar) const {
- return isComplexQueryObj(obj, hasDollar);
-}
-
-Query& Query::appendElements(BSONObj elements) {
- makeComplex();
- BSONObjBuilder b(std::move(obj));
- b.appendElements(elements);
- obj = b.obj();
- return *this;
-}
-
-Query& Query::requestResumeToken(bool enable) {
- appendComplex("$_requestResumeToken", enable);
- return *this;
-}
-
-Query& Query::resumeAfter(BSONObj point) {
- appendComplex("$_resumeAfter", point);
- return *this;
-}
-
-Query& Query::maxTimeMS(long long timeout) {
- appendComplex("$maxTimeMS", timeout);
- return *this;
-}
-
-Query& Query::term(long long value) {
- appendComplex("term", value);
- return *this;
-}
-
-Query& Query::readConcern(BSONObj rc) {
- appendComplex("readConcern", rc);
- return *this;
-}
-
-Query& Query::readOnce(bool enable) {
- appendComplex("$readOnce", enable);
- return *this;
-}
-
void initFindFromLegacyOptions(BSONObj bsonOptions, int options, FindCommandRequest* findCommand) {
invariant(findCommand);
BSONObj filter = filterFromOpQueryObj(bsonOptions);
diff --git a/src/mongo/client/client_deprecated.h b/src/mongo/client/client_deprecated.h
index d8eb80e5afa..fa4509c62f8 100644
--- a/src/mongo/client/client_deprecated.h
+++ b/src/mongo/client/client_deprecated.h
@@ -41,98 +41,6 @@ namespace mongo {
* added because OP_QUERY is no longer supported by the shell or server.
*/
namespace client_deprecated {
-
-/**
- * Represents a subset of query settings, such as sort, hint, etc. It is only used in the context of
- * the deprecated query API in 'DBClientBase', which has been superseded by `DBClientBase::find()`
- * and friends. Additional uses of this class should not be added to the code base!
- */
-class Query {
-public:
- static const BSONField<BSONObj> ReadPrefField;
-
- /**
- * Creating a Query object from raw BSON is on its way out. Please don't add new callers under
- * any circumstances.
- */
- static Query fromBSONDeprecated(const BSONObj& b) {
- Query q;
- q.obj = b;
- return q;
- }
-
- Query() : obj(BSONObj()) {}
-
- /** Add a sort (ORDER BY) criteria to the query expression.
- @param sortPattern the sort order template. For example to order by name ascending, time
- descending:
- { name : 1, ts : -1 }
- i.e.
- BSON( "name" << 1 << "ts" << -1 )
- or
- fromjson(" name : 1, ts : -1 ")
- */
- Query& sort(const BSONObj& sortPattern);
-
- /** Provide a hint to the query.
- @param keyPattern Key pattern for the index to use.
- Example:
- hint("{ts:1}")
- */
- Query& hint(BSONObj keyPattern);
-
- /**
- * Sets the read preference for this query.
- *
- * @param pref the read preference mode for this query.
- * @param tags the set of tags to use for this query.
- */
- Query& readPref(ReadPreference pref, const BSONArray& tags);
-
- /**
- * A temporary accessor that returns a reference to the internal BSON object. No new callers
- * should be introduced!
- * NB: must be implemented in the header because db/query/query_request cannot link against
- * client/client_query.
- */
- const BSONObj& getFullSettingsDeprecated() const {
- return obj;
- }
-
- /**
- * The setters below were added to make the contents of the Query's settings internal BSON
- * explicit. They will be reviewed and deprecated/removed as appropriate.
- */
- Query& appendElements(BSONObj elements);
- Query& requestResumeToken(bool enable);
- Query& resumeAfter(BSONObj point);
- Query& maxTimeMS(long long timeout);
- Query& term(long long value);
- Query& readConcern(BSONObj rc);
- Query& readOnce(bool enable);
-
-private:
- BSONObj obj;
-
- /**
- * @return true if this query has an orderby, hint, or some other field
- */
- bool isComplex(bool* hasDollar = nullptr) const;
-
- void makeComplex();
- template <class T>
- void appendComplex(const char* fieldName, const T& val) {
- makeComplex();
- BSONObjBuilder b(std::move(obj));
- b.append(fieldName, val);
- obj = b.obj();
- }
-};
-
-inline std::ostream& operator<<(std::ostream& s, const Query& q) {
- return s << q.getFullSettingsDeprecated().toString();
-}
-
/**
* WARNING: This function exists only to support special code paths that use an OP_QUERY-style query
* representation (even though the OP_QUERY wire protocol message itself is no longer supported). Do
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index 5c2238ebec9..2c2bcf36412 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -584,31 +584,6 @@ bool DBClientBase::exists(const string& ns) {
const uint64_t DBClientBase::INVALID_SOCK_CREATION_TIME = std::numeric_limits<uint64_t>::max();
-unique_ptr<DBClientCursor> DBClientBase::query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- unique_ptr<DBClientCursor> c(new DBClientCursor(this,
- nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj));
- if (c->init())
- return c;
- return nullptr;
-}
-
std::unique_ptr<DBClientCursor> DBClientBase::find(FindCommandRequest findRequest,
const ReadPreferenceSetting& readPref,
ExhaustMode exhaustMode) {
@@ -651,46 +626,12 @@ BSONObj DBClientBase::findOne(const NamespaceStringOrUUID& nssOrUuid, BSONObj fi
unique_ptr<DBClientCursor> DBClientBase::getMore(const string& ns, long long cursorId) {
unique_ptr<DBClientCursor> c(
- new DBClientCursor(this, NamespaceString(ns), cursorId, 0 /* limit */, 0 /* options */));
+ new DBClientCursor(this, NamespaceString(ns), cursorId, false /*isExhaust*/));
if (c->init())
return c;
return nullptr;
}
-unsigned long long DBClientBase::query_DEPRECATED(
- std::function<void(DBClientCursorBatchIterator&)> f,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- // mask options
- queryOptions &= (int)(QueryOption_NoCursorTimeout | QueryOption_SecondaryOk);
-
- unique_ptr<DBClientCursor> c(this->query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- 0,
- 0,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj));
- // query_DEPRECATED() throws on network error so OK to uassert with numeric code here.
- uassert(16090, "socket error for mapping query", c.get());
-
- unsigned long long n = 0;
-
- while (c->more()) {
- DBClientCursorBatchIterator i(*c);
- f(i);
- n += i.n();
- }
- return n;
-}
-
namespace {
OpMsgRequest createInsertRequest(const string& ns,
const vector<BSONObj>& v,
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index 0a1c7bdd77d..3c933f6d3d3 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -35,7 +35,6 @@
#include "mongo/base/string_data.h"
#include "mongo/client/authenticate.h"
#include "mongo/client/client_api_version_parameters_gen.h"
-#include "mongo/client/client_deprecated.h"
#include "mongo/client/connection_string.h"
#include "mongo/client/dbclient_cursor.h"
#include "mongo/client/index_spec.h"
@@ -578,29 +577,6 @@ public:
BSONObj findOne(const NamespaceStringOrUUID& nssOrUuid, BSONObj filter);
/**
- * Legacy find API. Do not add new callers! Use the 'find*()' methods above instead.
- */
- virtual std::unique_ptr<DBClientCursor> query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings = client_deprecated::Query(),
- int limit = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none);
- virtual unsigned long long query_DEPRECATED(
- std::function<void(DBClientCursorBatchIterator&)> f,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings = client_deprecated::Query(),
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = QueryOption_Exhaust,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none);
-
- /**
* Don't use this - called automatically by DBClientCursor for you.
* 'cursorId': Id of cursor to retrieve.
* Returns an handle to a previously allocated cursor.
diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp
index c3651edb01c..1b87829c1cd 100644
--- a/src/mongo/client/dbclient_connection.cpp
+++ b/src/mongo/client/dbclient_connection.cpp
@@ -625,67 +625,6 @@ uint64_t DBClientConnection::getSockCreationMicroSec() const {
}
}
-unsigned long long DBClientConnection::query_DEPRECATED(
- std::function<void(DBClientCursorBatchIterator&)> f,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- if (!(queryOptions & QueryOption_Exhaust)) {
- return DBClientBase::query_DEPRECATED(f,
- nsOrUuid,
- filter,
- querySettings,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj);
- }
-
- // mask options
- queryOptions &=
- (int)(QueryOption_NoCursorTimeout | QueryOption_SecondaryOk | QueryOption_Exhaust);
-
- unique_ptr<DBClientCursor> c(this->query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- 0,
- 0,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj));
- // Note that this->query will throw for network errors, so it is OK to return a numeric
- // error code here.
- uassert(13386, "socket error for mapping query", c.get());
-
- unsigned long long n = 0;
-
- try {
- while (1) {
- while (c->moreInCurrentBatch()) {
- DBClientCursorBatchIterator i(*c);
- f(i);
- n += i.n();
- }
-
- if (!c->more())
- break;
- }
- } catch (std::exception&) {
- /* connection CANNOT be used anymore as more data may be on the way from the server.
- we have to reconnect.
- */
- _markFailed(kEndSession);
- throw;
- }
-
- return n;
-}
-
DBClientConnection::DBClientConnection(bool _autoReconnect,
double so_timeout,
MongoURI uri,
diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h
index 45ffcf97b78..61096ba59b3 100644
--- a/src/mongo/client/dbclient_connection.h
+++ b/src/mongo/client/dbclient_connection.h
@@ -62,7 +62,6 @@ struct RemoteCommandResponse;
}
class DBClientCursor;
-class DBClientCursorBatchIterator;
/**
* A basic connection to the database.
@@ -142,38 +141,6 @@ public:
*/
void logout(const std::string& dbname, BSONObj& info) override;
- std::unique_ptr<DBClientCursor> query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings = client_deprecated::Query(),
- int limit = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none) override {
- checkConnection();
- return DBClientBase::query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj);
- }
-
- unsigned long long query_DEPRECATED(
- std::function<void(DBClientCursorBatchIterator&)>,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none) override;
-
using DBClientBase::runCommandWithTarget;
std::pair<rpc::UniqueReply, DBClientBase*> runCommandWithTarget(OpMsgRequest request) override;
std::pair<rpc::UniqueReply, std::shared_ptr<DBClientBase>> runCommandWithTarget(
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 33fb9e86508..dc6a32acd07 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -72,137 +72,32 @@ BSONObj addMetadata(DBClientBase* client, BSONObj command) {
}
}
-Message assembleCommandRequest(DBClientBase* cli,
+Message assembleCommandRequest(DBClientBase* client,
StringData database,
- int legacyQueryOptions,
- BSONObj legacyQuery) {
- auto request = rpc::upconvertRequest(database, std::move(legacyQuery), legacyQueryOptions);
- request.body = addMetadata(cli, std::move(request.body));
- return request.serialize();
-}
-
-Message assembleFromFindCommandRequest(DBClientBase* client,
- StringData database,
- const FindCommandRequest& request,
- const ReadPreferenceSetting& readPref) {
- BSONObj findCmd = request.toBSON(BSONObj());
-
+ BSONObj commandObj,
+ const ReadPreferenceSetting& readPref) {
// Add the $readPreference field to the request.
{
- BSONObjBuilder builder{findCmd};
+ BSONObjBuilder builder{commandObj};
readPref.toContainingBSON(&builder);
- findCmd = builder.obj();
+ commandObj = builder.obj();
}
- findCmd = addMetadata(client, std::move(findCmd));
- auto opMsgRequest = OpMsgRequest::fromDBAndBody(database, findCmd);
+ commandObj = addMetadata(client, std::move(commandObj));
+ auto opMsgRequest = OpMsgRequest::fromDBAndBody(database, commandObj);
return opMsgRequest.serialize();
}
-
-std::unique_ptr<FindCommandRequest> fromLegacyQuery(NamespaceStringOrUUID nssOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- const BSONObj& proj,
- int ntoskip,
- int queryOptions) {
- auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid));
-
- client_deprecated::initFindFromLegacyOptions(
- querySettings.getFullSettingsDeprecated(), queryOptions, findCommand.get());
-
- findCommand->setFilter(filter.getOwned());
-
- if (!proj.isEmpty()) {
- findCommand->setProjection(proj.getOwned());
- }
- if (ntoskip) {
- findCommand->setSkip(ntoskip);
- }
-
- uassertStatusOK(query_request_helper::validateFindCommandRequest(*findCommand));
-
- return findCommand;
-}
-
-int queryOptionsFromFindCommand(const FindCommandRequest& findCmd,
- const ReadPreferenceSetting& readPref,
- bool isExhaust) {
- int queryOptions = 0;
- if (readPref.canRunOnSecondary()) {
- queryOptions = queryOptions | QueryOption_SecondaryOk;
- }
- if (findCmd.getTailable()) {
- queryOptions = queryOptions | QueryOption_CursorTailable;
- }
- if (findCmd.getNoCursorTimeout()) {
- queryOptions = queryOptions | QueryOption_NoCursorTimeout;
- }
- if (findCmd.getAwaitData()) {
- queryOptions = queryOptions | QueryOption_AwaitData;
- }
- if (findCmd.getAllowPartialResults()) {
- queryOptions = queryOptions | QueryOption_PartialResults;
- }
- if (isExhaust) {
- queryOptions = queryOptions | QueryOption_Exhaust;
- }
- return queryOptions;
-}
-
} // namespace
-Message DBClientCursor::initFromLegacyRequest() {
- auto findCommand = fromLegacyQuery(_nsOrUuid,
- _filter,
- _querySettings,
- _fieldsToReturn ? *_fieldsToReturn : BSONObj(),
- _nToSkip,
- _opts);
-
- if (_limit) {
- findCommand->setLimit(_limit);
- }
- if (_batchSize) {
- findCommand->setBatchSize(_batchSize);
- }
-
- const BSONObj querySettings = _querySettings.getFullSettingsDeprecated();
- // We prioritize the readConcern parsed from the query object over '_readConcernObj'.
- if (!findCommand->getReadConcern()) {
- if (_readConcernObj) {
- findCommand->setReadConcern(_readConcernObj);
- } else {
- // If no readConcern was specified, initialize it to an empty readConcern object, ie.
- // equivalent to `readConcern: {}`. This ensures that mongos passes this empty
- // readConcern to shards.
- findCommand->setReadConcern(BSONObj());
- }
- }
-
- BSONObj cmd = findCommand->toBSON(BSONObj());
- if (auto readPref = querySettings["$readPreference"]) {
- // FindCommandRequest doesn't handle $readPreference.
- cmd = BSONObjBuilder(std::move(cmd)).append(readPref).obj();
- }
-
- return assembleCommandRequest(_client, _ns.db(), _opts, std::move(cmd));
-}
-
Message DBClientCursor::assembleInit() {
if (_cursorId) {
return assembleGetMore();
}
// We haven't gotten a cursorId yet so we need to issue the initial find command.
- if (_findRequest) {
- // The caller described their find command using the modern 'FindCommandRequest' API.
- return assembleFromFindCommandRequest(_client, _ns.db(), *_findRequest, _readPref);
- } else {
- // The caller used a legacy API to describe the find operation, which may include $-prefixed
- // directives in the format previously expected for an OP_QUERY. We need to upconvert this
- // OP_QUERY-inspired format to a find command.
- return initFromLegacyRequest();
- }
+ invariant(_findRequest);
+ BSONObj findCmd = _findRequest->toBSON(BSONObj());
+ return assembleCommandRequest(_client, _ns.db(), std::move(findCmd), _readPref);
}
Message DBClientCursor::assembleGetMore() {
@@ -217,10 +112,10 @@ Message DBClientCursor::assembleGetMore() {
getMoreRequest.setTerm(static_cast<std::int64_t>(*_term));
}
getMoreRequest.setLastKnownCommittedOpTime(_lastKnownCommittedOpTime);
- auto msg = assembleCommandRequest(_client, _ns.db(), _opts, getMoreRequest.toBSON({}));
+ auto msg = assembleCommandRequest(_client, _ns.db(), getMoreRequest.toBSON({}), _readPref);
// Set the exhaust flag if needed.
- if (_opts & QueryOption_Exhaust && msg.operation() == dbMsg) {
+ if (_isExhaust) {
OpMsg::setFlag(&msg, OpMsg::kExhaustSupported);
}
return msg;
@@ -251,8 +146,7 @@ bool DBClientCursor::init() {
void DBClientCursor::requestMore() {
// For exhaust queries, once the stream has been initiated we get data blasted to us
// from the remote server, without a need to send any more 'getMore' requests.
- const auto isExhaust = _opts & QueryOption_Exhaust;
- if (isExhaust && _connectionHasPendingReplies) {
+ if (_isExhaust && _connectionHasPendingReplies) {
return exhaustReceiveMore();
}
@@ -277,7 +171,7 @@ void DBClientCursor::requestMore() {
}
/**
- * With QueryOption_Exhaust, the server just blasts data at us. The end of a stream is marked with a
+ * For exhaust cursors, the server just blasts data at us. The end of a stream is marked with a
* cursor id of 0.
*/
void DBClientCursor::exhaustReceiveMore() {
@@ -295,9 +189,9 @@ BSONObj DBClientCursor::commandDataReceived(const Message& reply) {
invariant(op == opReply || op == dbMsg);
// Check if the reply indicates that it is part of an exhaust stream.
- const auto isExhaust = OpMsg::isFlagSet(reply, OpMsg::kMoreToCome);
- _connectionHasPendingReplies = isExhaust;
- if (isExhaust) {
+ const auto isExhaustReply = OpMsg::isFlagSet(reply, OpMsg::kMoreToCome);
+ _connectionHasPendingReplies = isExhaustReply;
+ if (isExhaustReply) {
_lastRequestId = reply.header().getId();
}
@@ -431,83 +325,20 @@ void DBClientCursor::attach(AScopedConnection* conn) {
DBClientCursor::DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj)
- : DBClientCursor(client,
- nsOrUuid,
- filter,
- querySettings,
- 0, // cursorId
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- {},
- readConcernObj,
- boost::none) {}
-
-DBClientCursor::DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
long long cursorId,
- int limit,
- int queryOptions,
+ bool isExhaust,
std::vector<BSONObj> initialBatch,
boost::optional<Timestamp> operationTime,
boost::optional<BSONObj> postBatchResumeToken)
- : DBClientCursor(client,
- nsOrUuid,
- BSONObj(), // filter
- client_deprecated::Query(),
- cursorId,
- limit,
- 0, // nToSkip
- nullptr, // fieldsToReturn
- queryOptions,
- 0,
- std::move(initialBatch), // batchSize
- boost::none,
- operationTime,
- postBatchResumeToken) {}
-
-DBClientCursor::DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- long long cursorId,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- std::vector<BSONObj> initialBatch,
- boost::optional<BSONObj> readConcernObj,
- boost::optional<Timestamp> operationTime,
- boost::optional<BSONObj> postBatchResumeToken)
: _batch{std::move(initialBatch)},
_client(client),
_originalHost(_client->getServerAddress()),
_nsOrUuid(nsOrUuid),
_ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())),
_cursorId(cursorId),
- _batchSize(batchSize == 1 ? 2 : batchSize),
- _limit(limit),
- _filter(filter),
- _querySettings(querySettings),
- _nToSkip(nToSkip),
- _fieldsToReturn(fieldsToReturn),
- _readConcernObj(readConcernObj),
- _opts(queryOptions),
+ _isExhaust(isExhaust),
_operationTime(operationTime),
- _postBatchResumeToken(postBatchResumeToken) {
- tassert(5746103, "DBClientCursor limit must be non-negative", _limit >= 0);
-}
+ _postBatchResumeToken(postBatchResumeToken) {}
DBClientCursor::DBClientCursor(DBClientBase* client,
FindCommandRequest findRequest,
@@ -518,10 +349,9 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
_nsOrUuid(findRequest.getNamespaceOrUUID()),
_ns(_nsOrUuid.nss() ? *_nsOrUuid.nss() : NamespaceString(_nsOrUuid.dbname())),
_batchSize(findRequest.getBatchSize().value_or(0)),
- _limit(findRequest.getLimit().value_or(0)),
_findRequest(std::move(findRequest)),
_readPref(readPref),
- _opts(queryOptionsFromFindCommand(*_findRequest, _readPref, isExhaust)) {
+ _isExhaust(isExhaust) {
// Internal clients should always pass an explicit readConcern. If the caller did not already
// pass a readConcern than we must explicitly initialize an empty readConcern so that it ends up
// in the serialized version of the find command which will be sent across the wire.
@@ -565,8 +395,7 @@ StatusWith<std::unique_ptr<DBClientCursor>> DBClientCursor::fromAggregationReque
return {std::make_unique<DBClientCursor>(client,
aggRequest.getNamespace(),
cursorId,
- 0,
- useExhaust ? QueryOption_Exhaust : 0,
+ useExhaust,
firstBatch,
operationTime,
postBatchResumeToken)};
@@ -594,5 +423,4 @@ void DBClientCursor::kill() {
_cursorId = 0;
}
-
} // namespace mongo
diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h
index 941eda47318..f13f861d96c 100644
--- a/src/mongo/client/dbclient_cursor.h
+++ b/src/mongo/client/dbclient_cursor.h
@@ -31,10 +31,8 @@
#include <stack>
-#include "mongo/client/client_deprecated.h"
+#include "mongo/client/read_preference.h"
#include "mongo/db/dbmessage.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/json.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/find_command_gen.h"
#include "mongo/rpc/message.h"
@@ -61,31 +59,26 @@ public:
bool secondaryOk,
bool useExhaust);
+ /**
+ * Constructs a 'DBClientCursor' that will be opened by issuing the find command described by
+ * 'findRequest'.
+ */
DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int bs,
- boost::optional<BSONObj> readConcernObj = boost::none);
+ FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref,
+ bool isExhaust);
+ /**
+ * Constructs a 'DBClientCursor' from a pre-existing cursor id.
+ */
DBClientCursor(DBClientBase* client,
const NamespaceStringOrUUID& nsOrUuid,
long long cursorId,
- int limit,
- int options,
+ bool isExhaust,
std::vector<BSONObj> initialBatch = {},
boost::optional<Timestamp> operationTime = boost::none,
boost::optional<BSONObj> postBatchResumeToken = boost::none);
- DBClientCursor(DBClientBase* client,
- FindCommandRequest findRequest,
- const ReadPreferenceSetting& readPref,
- bool isExhaust);
-
virtual ~DBClientCursor();
/**
@@ -170,11 +163,11 @@ public:
}
bool tailable() const {
- return (_opts & QueryOption_CursorTailable) != 0;
+ return _findRequest && _findRequest->getTailable();
}
bool tailableAwaitData() const {
- return tailable() && (_opts & QueryOption_AwaitData);
+ return tailable() && _findRequest->getAwaitData();
}
/**
@@ -277,21 +270,6 @@ protected:
Batch _batch;
private:
- DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- long long cursorId,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int bs,
- std::vector<BSONObj> initialBatch,
- boost::optional<BSONObj> readConcernObj,
- boost::optional<Timestamp> operationTime,
- boost::optional<BSONObj> postBatchResumeToken = boost::none);
-
void dataReceived(const Message& reply) {
bool retry;
std::string lazyHost;
@@ -312,13 +290,6 @@ private:
Message assembleInit();
Message assembleGetMore();
- /**
- * Constructs the initial find commmand request based on a legacy OP_QUERY-style description of
- * the find operation. Only used if the caller constructed the 'DBClientCursor' with the legacy
- * API.
- */
- Message initFromLegacyRequest();
-
DBClientBase* _client;
std::string _originalHost;
NamespaceStringOrUUID _nsOrUuid;
@@ -336,32 +307,16 @@ private:
bool _connectionHasPendingReplies = false;
int _lastRequestId = 0;
- int _batchSize;
- int _limit = 0;
+ int _batchSize = 0;
- // If the caller describes the find command being executed by this cursor as a
- // 'FindCommandRequest', then that request object and the associated read preference are set
- // here. Otherwise, if the caller uses the legacy OP_QUERY-inspired API, these members are
- // default-initialized but never used.
+ // A description of the find command provided by the caller which is used to open the cursor.
+ //
+ // Has a value of boost::none if the caller constructed this cursor using a pre-existing cursor
+ // id.
boost::optional<FindCommandRequest> _findRequest;
- ReadPreferenceSetting _readPref;
- // These data members are only used if the cursor was constructed using the legacy
- // OP_QUERY-inspired API. If the cursor was constructed using the 'FindCommandRequest'-based
- // API, these are initialized to their default values but never used.
- BSONObj _filter;
- client_deprecated::Query _querySettings;
- int _nToSkip = 0;
- const BSONObj* _fieldsToReturn = nullptr;
- boost::optional<BSONObj> _readConcernObj;
-
- // This has the same meaning as the flags bit vector from the no-longer-supported OP_QUERY wire
- // protocol message. However, it is initialized even if the caller constructed the cursor using
- // the 'FindCommandRequest`-based API.
- //
- // We should eventually stop using the OP_QUERY flags bit vector in server code, since OP_QUERY
- // is no longer supported.
- int _opts;
+ ReadPreferenceSetting _readPref;
+ bool _isExhaust;
Milliseconds _awaitDataTimeout = Milliseconds{0};
boost::optional<long long> _term;
@@ -370,38 +325,4 @@ private:
boost::optional<BSONObj> _postBatchResumeToken;
};
-/** iterate over objects in current batch only - will not cause a network call
- */
-class DBClientCursorBatchIterator {
-public:
- DBClientCursorBatchIterator(DBClientCursor& c) : _c(c), _n() {}
- bool moreInCurrentBatch() {
- return _c.moreInCurrentBatch();
- }
- BSONObj nextSafe() {
- massert(13383, "BatchIterator empty", moreInCurrentBatch());
- ++_n;
- return _c.nextSafe();
- }
- int n() const {
- return _n;
- }
- // getNamespaceString() will return the NamespaceString returned by the 'find' command.
- const NamespaceString& getNamespaceString() {
- return _c.getNamespaceString();
- }
-
- long long getCursorId() const {
- return _c.getCursorId();
- }
-
- boost::optional<BSONObj> getPostBatchResumeToken() const {
- return _c.getPostBatchResumeToken();
- }
-
-private:
- DBClientCursor& _c;
- int _n;
-};
-
} // namespace mongo
diff --git a/src/mongo/client/dbclient_mockcursor.cpp b/src/mongo/client/dbclient_mockcursor.cpp
index 7082f55517e..0e33d4360d1 100644
--- a/src/mongo/client/dbclient_mockcursor.cpp
+++ b/src/mongo/client/dbclient_mockcursor.cpp
@@ -42,7 +42,7 @@ DBClientMockCursor::DBClientMockCursor(mongo::DBClientBase* client,
const BSONArray& mockCollection,
const bool provideResumeToken,
unsigned long batchSize)
- : mongo::DBClientCursor(client, NamespaceString(), 0, 0, 0),
+ : mongo::DBClientCursor(client, NamespaceString(), 0 /*cursorId*/, false /*isExhaust*/),
_collectionArray(mockCollection),
_iter(_collectionArray),
_provideResumeToken(provideResumeToken),
diff --git a/src/mongo/client/dbclient_mockcursor.h b/src/mongo/client/dbclient_mockcursor.h
index 1138ee41286..7430a1aa3cb 100644
--- a/src/mongo/client/dbclient_mockcursor.h
+++ b/src/mongo/client/dbclient_mockcursor.h
@@ -35,9 +35,6 @@
namespace mongo {
-// DBClientMockCursor supports only a small subset of DBClientCursor operations.
-// It supports only iteration, including use of DBClientCursorBatchIterator. If a batchsize
-// is given, iteration is broken up into multiple batches at batchSize boundaries.
class DBClientMockCursor : public DBClientCursor {
public:
DBClientMockCursor(mongo::DBClientBase* client,
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index bf4259dd6ed..a48fe50a2fa 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -27,15 +27,13 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/client/dbclient_rs.h"
#include <memory>
#include <utility>
#include "mongo/bson/util/builder.h"
+#include "mongo/client/client_deprecated.h"
#include "mongo/client/connpool.h"
#include "mongo/client/dbclient_cursor.h"
#include "mongo/client/global_conn_pool.h"
@@ -87,42 +85,6 @@ public:
*/
const size_t MAX_RETRY = 3;
-/**
- * Extracts the read preference settings from the query document. Note that this method
- * assumes that the query is ok for secondaries so it defaults to
- * ReadPreference::SecondaryPreferred when nothing is specified. Supports the following
- * format:
- *
- * Format A (official format):
- * { query: <actual query>, $readPreference: <read pref obj> }
- *
- * Format B (unofficial internal format from mongos):
- * { <actual query>, $queryOptions: { $readPreference: <read pref obj> }}
- *
- * @param query the raw query document
- *
- * @return the read preference setting if a read preference exists, otherwise the default read
- * preference of Primary_Only. If the tags field was not present, it will contain one
- * empty tag document {} which matches any tag.
- *
- * @throws AssertionException if the read preference object is malformed
- */
-std::unique_ptr<ReadPreferenceSetting> _extractReadPref(
- const client_deprecated::Query& querySettings, int queryOptions) {
- // Default read pref is primary only or secondary preferred with secondaryOK
- const auto defaultReadPref = queryOptions & QueryOption_SecondaryOk
- ? ReadPreference::SecondaryPreferred
- : ReadPreference::PrimaryOnly;
-
- BSONObj readPrefContainingObj = querySettings.getFullSettingsDeprecated();
- if (auto elem = readPrefContainingObj["$queryOptions"]) {
- // The readPreference is embedded in the $queryOptions field.
- readPrefContainingObj = elem.Obj();
- }
- return std::make_unique<ReadPreferenceSetting>(uassertStatusOK(
- ReadPreferenceSetting::fromContainingBSON(readPrefContainingObj, defaultReadPref)));
-}
-
} // namespace
// --------------------------------
@@ -592,89 +554,6 @@ std::unique_ptr<DBClientCursor> DBClientReplicaSet::find(FindCommandRequest find
return checkPrimary()->find(std::move(findRequest), readPref, exhaustMode);
}
-unique_ptr<DBClientCursor> DBClientReplicaSet::query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(querySettings, queryOptions));
- invariant(nsOrUuid.nss());
- const string ns = nsOrUuid.nss()->ns();
- if (_isSecondaryQuery(ns, filter, *readPref)) {
- LOGV2_DEBUG(20133,
- 3,
- "dbclient_rs query using secondary or tagged node selection in {replicaSet}, "
- "read pref is {readPref} "
- "(primary : {primary}, lastTagged : {lastTagged})",
- "dbclient_rs query using secondary or tagged node selection",
- "replicaSet"_attr = _getMonitor()->getName(),
- "readPref"_attr = readPref->toString(),
- "primary"_attr =
- (_primary.get() != nullptr ? _primary->getServerAddress() : "[not cached]"),
- "lastTagged"_attr = (_lastSecondaryOkConn.get() != nullptr
- ? _lastSecondaryOkConn->getServerAddress()
- : "[not cached]"));
-
- string lastNodeErrMsg;
-
- for (size_t retry = 0; retry < MAX_RETRY; retry++) {
- try {
- DBClientConnection* conn = selectNodeUsingTags(readPref);
-
- if (conn == nullptr) {
- break;
- }
-
- unique_ptr<DBClientCursor> cursor = conn->query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj);
-
- return checkSecondaryQueryResult(std::move(cursor));
- } catch (const DBException& ex) {
- const Status status = ex.toStatus(str::stream() << "can't query replica set node "
- << _lastSecondaryOkHost);
- lastNodeErrMsg = status.reason();
- _invalidateLastSecondaryOkCache(status);
- }
- }
-
- StringBuilder assertMsg;
- assertMsg << "Failed to do query, no good nodes in " << _getMonitor()->getName();
- if (!lastNodeErrMsg.empty()) {
- assertMsg << ", last error: " << lastNodeErrMsg;
- }
-
- uasserted(16370, assertMsg.str());
- }
-
- LOGV2_DEBUG(20134,
- 3,
- "dbclient_rs query to primary node in {replicaSet}",
- "dbclient_rs query to primary node",
- "replicaSet"_attr = _getMonitor()->getName());
-
- return checkPrimary()->query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj);
-}
-
void DBClientReplicaSet::killCursor(const NamespaceString& ns, long long cursorID) {
// we should never call killCursor on a replica set connection
// since we don't know which server it belongs to
@@ -819,70 +698,6 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
if (!isRetry)
_lastClient = nullptr;
- const int lastOp = toSend.operation();
-
- if (lastOp == dbQuery) {
- // TODO: might be possible to do this faster by changing api
- DbMessage dm(toSend);
- QueryMessage qm(dm);
-
- shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(
- client_deprecated::Query::fromBSONDeprecated(qm.query), qm.queryOptions));
- if (_isSecondaryQuery(qm.ns, qm.query, *readPref)) {
- LOGV2_DEBUG(20141,
- 3,
- "dbclient_rs say using secondary or tagged node selection in {replicaSet}, "
- "read pref is {readPref} "
- "(primary : {primary}, lastTagged : {lastTagged})",
- "dbclient_rs say using secondary or tagged node selection",
- "replicaSet"_attr = _getMonitor()->getName(),
- "readPref"_attr = readPref->toString(),
- "primary"_attr = (_primary.get() != nullptr ? _primary->getServerAddress()
- : "[not cached]"),
- "lastTagged"_attr = (_lastSecondaryOkConn.get() != nullptr
- ? _lastSecondaryOkConn->getServerAddress()
- : "[not cached]"));
-
- string lastNodeErrMsg;
-
- for (size_t retry = 0; retry < MAX_RETRY; retry++) {
- try {
- DBClientConnection* conn = selectNodeUsingTags(readPref);
-
- if (conn == nullptr) {
- break;
- }
-
- if (actualServer != nullptr) {
- *actualServer = conn->getServerAddress();
- }
-
- conn->say(toSend);
-
- _lastClient = conn;
- } catch (const DBException& ex) {
- const Status status =
- ex.toStatus(str::stream() << "can't callLazy replica set node "
- << _lastSecondaryOkHost.toString());
- lastNodeErrMsg = status.reason();
- _invalidateLastSecondaryOkCache(status);
-
- continue;
- }
-
- return;
- }
-
- StringBuilder assertMsg;
- assertMsg << "Failed to call say, no good nodes in " << _getMonitor()->getName();
- if (!lastNodeErrMsg.empty()) {
- assertMsg << ", last error: " << lastNodeErrMsg;
- }
-
- uasserted(16380, assertMsg.str());
- }
- }
-
LOGV2_DEBUG(20142,
3,
"dbclient_rs say to primary node in {replicaSet}",
@@ -984,60 +799,6 @@ bool DBClientReplicaSet::call(Message& toSend,
Message& response,
bool assertOk,
string* actualServer) {
- const char* ns = nullptr;
-
- if (toSend.operation() == dbQuery) {
- // TODO: might be possible to do this faster by changing api
- DbMessage dm(toSend);
- QueryMessage qm(dm);
- ns = qm.ns;
-
- shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(
- client_deprecated::Query::fromBSONDeprecated(qm.query), qm.queryOptions));
- if (_isSecondaryQuery(ns, qm.query, *readPref)) {
- LOGV2_DEBUG(
- 20145,
- 3,
- "dbclient_rs call using secondary or tagged node selection in {replicaSet}, "
- "read pref is {readPref} "
- "(primary : {primary}, lastTagged : {lastTagged})",
- "dbclient_rs call using secondary or tagged node selection",
- "replicaSet"_attr = _getMonitor()->getName(),
- "readPref"_attr = readPref->toString(),
- "primary"_attr =
- (_primary.get() != nullptr ? _primary->getServerAddress() : "[not cached]"),
- "lastTagged"_attr = (_lastSecondaryOkConn.get() != nullptr
- ? _lastSecondaryOkConn->getServerAddress()
- : "[not cached]"));
-
- for (size_t retry = 0; retry < MAX_RETRY; retry++) {
- try {
- DBClientConnection* conn = selectNodeUsingTags(readPref);
-
- if (conn == nullptr) {
- return false;
- }
-
- if (actualServer != nullptr) {
- *actualServer = conn->getServerAddress();
- }
-
- return conn->call(toSend, response, assertOk, nullptr);
- } catch (const DBException& ex) {
- if (actualServer)
- *actualServer = "";
-
- const Status status = ex.toStatus();
- _invalidateLastSecondaryOkCache(status.withContext(
- str::stream() << "can't call replica set node " << _lastSecondaryOkHost));
- }
- }
-
- // Was not able to successfully send after max retries
- return false;
- }
- }
-
LOGV2_DEBUG(20146,
3,
"dbclient_rs call to primary node in {replicaSet}",
@@ -1051,20 +812,6 @@ bool DBClientReplicaSet::call(Message& toSend,
if (!m->call(toSend, response, assertOk, nullptr))
return false;
- if (ns) {
- QueryResult::View res = response.singleData().view2ptr();
- if (res.getNReturned() == 1) {
- BSONObj x(res.data());
- if (str::contains(ns, "$cmd")) {
- if (isNotPrimaryErrorString(x["errmsg"]))
- isNotPrimary();
- } else {
- if (isNotPrimaryErrorString(getErrField(x)))
- isNotPrimary();
- }
- }
- }
-
return true;
}
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index ebab6854ffc..fa796039f2c 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -58,7 +58,6 @@ typedef std::shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
class DBClientReplicaSet : public DBClientBase {
public:
using DBClientBase::find;
- using DBClientBase::query_DEPRECATED;
/** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet
* connections. */
@@ -93,18 +92,6 @@ public:
const ReadPreferenceSetting& readPref,
ExhaustMode exhaustMode) override;
- /** throws userassertion "no primary found" */
- std::unique_ptr<DBClientCursor> query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none) override;
-
void insert(const std::string& ns,
BSONObj obj,
bool ordered = true,
diff --git a/src/mongo/client/dbclient_rs_test.cpp b/src/mongo/client/dbclient_rs_test.cpp
index 2bbbc78858a..7053d8fe623 100644
--- a/src/mongo/client/dbclient_rs_test.cpp
+++ b/src/mongo/client/dbclient_rs_test.cpp
@@ -152,57 +152,16 @@ void assertNodeSelected(MockReplicaSet* replSet, ReadPreference rp, StringData h
assertOneOfNodesSelected(replSet, rp, std::vector<std::string>{host.toString()});
}
-/**
- * Runs a find operation against 'replConn' using both the modern 'find()' API and the deprecated
- * API. In both cases, verifies the results by passing the resulting cursor to 'assertionFunc'.
- *
- * The operation is a simple find command against the given NamespaceString with no arguments other
- * than 'readPref'.
- */
-void assertWithBothQueryApis(DBClientReplicaSet& replConn,
- const NamespaceString& nss,
- ReadPreference readPref,
- std::function<void(std::unique_ptr<DBClientCursor>)> assertionFunc) {
- std::unique_ptr<DBClientCursor> cursor =
- replConn.find(FindCommandRequest{nss}, ReadPreferenceSetting{readPref});
- assertionFunc(std::move(cursor));
-
- client_deprecated::Query readPrefHolder;
- readPrefHolder.readPref(readPref, BSONArray{});
- cursor = replConn.query_DEPRECATED(nss, BSONObj{}, readPrefHolder);
- assertionFunc(std::move(cursor));
-}
-
-/**
- * Runs a find operation against 'replConn' using both the modern 'find()' API and the deprecated
- * API. In both cases, verifies that the find operation throws an exception.
- *
- * The operation is a simple find command against the given NamespaceString with no arguments other
- * than 'readPref'.
- */
-void assertBothQueryApisThrow(DBClientReplicaSet& replConn,
- const NamespaceString& nss,
- ReadPreference readPref) {
- ASSERT_THROWS(replConn.find(FindCommandRequest{nss}, ReadPreferenceSetting{readPref}),
- AssertionException);
-
- client_deprecated::Query readPrefHolder;
- readPrefHolder.readPref(readPref, BSONArray{});
- ASSERT_THROWS(replConn.query_DEPRECATED(nss, BSONObj{}, readPrefHolder), AssertionException);
-}
-
TEST_F(BasicRS, QueryPrimary) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::PrimaryOnly,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(BasicRS, CommandPrimary) {
@@ -214,14 +173,11 @@ TEST_F(BasicRS, QuerySecondaryOnly) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::SecondaryOnly,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryOnly});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
TEST_F(BasicRS, CommandSecondaryOnly) {
@@ -234,13 +190,11 @@ TEST_F(BasicRS, QueryPrimaryPreferred) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::PrimaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(BasicRS, CommandPrimaryPreferred) {
@@ -252,14 +206,11 @@ TEST_F(BasicRS, QuerySecondaryPreferred) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::SecondaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor = replConn.find(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
TEST_F(BasicRS, CommandSecondaryPreferred) {
@@ -319,7 +270,10 @@ TEST_F(AllNodesDown, QueryPrimary) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(replConn, NamespaceString{IdentityNS}, ReadPreference::PrimaryOnly);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryOnly}),
+ AssertionException);
}
TEST_F(AllNodesDown, CommandPrimary) {
@@ -330,7 +284,10 @@ TEST_F(AllNodesDown, QuerySecondaryOnly) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(replConn, NamespaceString{IdentityNS}, ReadPreference::SecondaryOnly);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryOnly}),
+ AssertionException);
}
TEST_F(AllNodesDown, CommandSecondaryOnly) {
@@ -341,8 +298,10 @@ TEST_F(AllNodesDown, QueryPrimaryPreferred) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(
- replConn, NamespaceString{IdentityNS}, ReadPreference::PrimaryPreferred);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryPreferred}),
+ AssertionException);
}
TEST_F(AllNodesDown, CommandPrimaryPreferred) {
@@ -353,8 +312,10 @@ TEST_F(AllNodesDown, QuerySecondaryPreferred) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(
- replConn, NamespaceString{IdentityNS}, ReadPreference::SecondaryPreferred);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(replConn.find(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred}),
+ AssertionException);
}
TEST_F(AllNodesDown, CommandSecondaryPreferred) {
@@ -365,7 +326,9 @@ TEST_F(AllNodesDown, QueryNearest) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(replConn, NamespaceString{IdentityNS}, ReadPreference::Nearest);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::Nearest}),
+ AssertionException);
}
TEST_F(AllNodesDown, CommandNearest) {
@@ -409,7 +372,10 @@ TEST_F(PrimaryDown, QueryPrimary) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(replConn, NamespaceString{IdentityNS}, ReadPreference::PrimaryOnly);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryOnly}),
+ AssertionException);
}
TEST_F(PrimaryDown, CommandPrimary) {
@@ -421,14 +387,11 @@ TEST_F(PrimaryDown, QuerySecondaryOnly) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::SecondaryOnly,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryOnly});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
TEST_F(PrimaryDown, CommandSecondaryOnly) {
@@ -441,14 +404,11 @@ TEST_F(PrimaryDown, QueryPrimaryPreferred) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::PrimaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
TEST_F(PrimaryDown, CommandPrimaryPreferred) {
@@ -461,14 +421,11 @@ TEST_F(PrimaryDown, QuerySecondaryPreferred) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::SecondaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor = replConn.find(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
TEST_F(PrimaryDown, CommandSecondaryPreferred) {
@@ -480,14 +437,10 @@ TEST_F(PrimaryDown, Nearest) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::Nearest,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getSecondaries().front(),
- doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor = replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::Nearest});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str());
}
/**
@@ -529,13 +482,11 @@ TEST_F(SecondaryDown, QueryPrimary) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::PrimaryOnly,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(SecondaryDown, CommandPrimary) {
@@ -546,7 +497,10 @@ TEST_F(SecondaryDown, QuerySecondaryOnly) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertBothQueryApisThrow(replConn, NamespaceString{IdentityNS}, ReadPreference::SecondaryOnly);
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ ASSERT_THROWS(
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryOnly}),
+ AssertionException);
}
TEST_F(SecondaryDown, CommandSecondaryOnly) {
@@ -558,13 +512,11 @@ TEST_F(SecondaryDown, QueryPrimaryPreferred) {
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
// Note: IdentityNS contains the name of the server.
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::PrimaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor =
+ replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::PrimaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(SecondaryDown, CommandPrimaryPreferred) {
@@ -575,13 +527,11 @@ TEST_F(SecondaryDown, QuerySecondaryPreferred) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::SecondaryPreferred,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor = replConn.find(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(SecondaryDown, CommandSecondaryPreferred) {
@@ -592,13 +542,10 @@ TEST_F(SecondaryDown, QueryNearest) {
MockReplicaSet* replSet = getReplSet();
DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData());
- assertWithBothQueryApis(replConn,
- NamespaceString{IdentityNS},
- ReadPreference::Nearest,
- [&](std::unique_ptr<DBClientCursor> cursor) {
- BSONObj doc = cursor->next();
- ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
- });
+ FindCommandRequest findCmd{NamespaceString{IdentityNS}};
+ auto cursor = replConn.find(std::move(findCmd), ReadPreferenceSetting{ReadPreference::Nearest});
+ BSONObj doc = cursor->next();
+ ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str());
}
TEST_F(SecondaryDown, CommandNearest) {
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index ea0817030cc..f06e89c7c3d 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -510,6 +510,14 @@ env.Library(
)
env.Library(
+ target='change_streams_cluster_parameter',
+ source=['change_streams_cluster_parameter.idl', 'change_streams_cluster_parameter.cpp'],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/idl/cluster_server_parameter',
+ ],
+)
+
+env.Library(
target='change_stream_change_collection_manager',
source=[
'change_stream_change_collection_manager.cpp',
@@ -2508,6 +2516,7 @@ env.Library(
'$BUILD_DIR/mongo/client/clientdriver_minimal',
'$BUILD_DIR/mongo/db/change_stream_change_collection_manager',
'$BUILD_DIR/mongo/db/change_stream_options_manager',
+ '$BUILD_DIR/mongo/db/change_streams_cluster_parameter',
'$BUILD_DIR/mongo/db/pipeline/change_stream_expired_pre_image_remover',
'$BUILD_DIR/mongo/idl/cluster_server_parameter',
'$BUILD_DIR/mongo/idl/cluster_server_parameter_op_observer',
@@ -2680,6 +2689,7 @@ if wiredtiger:
source=[
'cancelable_operation_context_test.cpp',
'catalog_raii_test.cpp',
+ 'change_streams_cluster_parameter_test.cpp',
'client_strand_test.cpp',
'client_context_test.cpp',
'collection_index_usage_tracker_test.cpp',
@@ -2755,6 +2765,7 @@ if wiredtiger:
'$BUILD_DIR/mongo/db/catalog/import_collection_oplog_entry',
'$BUILD_DIR/mongo/db/catalog/index_build_entry_idl',
'$BUILD_DIR/mongo/db/catalog/local_oplog_info',
+ '$BUILD_DIR/mongo/db/change_streams_cluster_parameter',
'$BUILD_DIR/mongo/db/mongohasher',
'$BUILD_DIR/mongo/db/pipeline/change_stream_expired_pre_image_remover',
'$BUILD_DIR/mongo/db/query/common_query_enums_and_helpers',
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index a02d9350e93..2cae97495a4 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -537,6 +537,7 @@ env.Library(
'$BUILD_DIR/mongo/db/query/query_plan_cache',
'$BUILD_DIR/mongo/db/query/query_planner',
'$BUILD_DIR/mongo/db/update_index_data',
+ 'collection',
],
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/base',
diff --git a/src/mongo/db/catalog/throttle_cursor_test.cpp b/src/mongo/db/catalog/throttle_cursor_test.cpp
index 8de3f08fbeb..02999c2a739 100644
--- a/src/mongo/db/catalog/throttle_cursor_test.cpp
+++ b/src/mongo/db/catalog/throttle_cursor_test.cpp
@@ -67,7 +67,6 @@ public:
void setMaxMbPerSec(int maxMbPerSec);
Date_t getTime();
- int64_t getDifferenceInMillis(Date_t start, Date_t end);
SortedDataInterfaceThrottleCursor getIdIndex(const CollectionPtr& coll);
std::unique_ptr<DataThrottle> _dataThrottle;
@@ -114,10 +113,6 @@ Date_t ThrottleCursorTest::getTime() {
return operationContext()->getServiceContext()->getFastClockSource()->now();
}
-int64_t ThrottleCursorTest::getDifferenceInMillis(Date_t start, Date_t end) {
- return end.toMillisSinceEpoch() - start.toMillisSinceEpoch();
-}
-
SortedDataInterfaceThrottleCursor ThrottleCursorTest::getIdIndex(const CollectionPtr& coll) {
const IndexDescriptor* idDesc = coll->getIndexCatalog()->findIdIndex(operationContext());
const IndexCatalogEntry* idEntry = coll->getIndexCatalog()->getEntry(idDesc);
@@ -156,7 +151,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 20);
- ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
+ ASSERT_EQ(end - start, Milliseconds(kTickDelay * numRecords + kTickDelay));
}
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
@@ -187,7 +182,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 5000);
+ ASSERT_GTE(end - start, Milliseconds(5000));
}
// Using a throttle with a limit of 5MB per second, all operations should take at least 1
@@ -207,7 +202,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 1000);
+ ASSERT_GTE(end - start, Milliseconds(1000));
}
}
@@ -239,7 +234,7 @@ TEST_F(ThrottleCursorTestFastClock, TestSeekableRecordThrottleCursorOnLargeDocs1
Date_t end = getTime();
ASSERT_EQ(scanRecords, 0);
- ASSERT_GTE(getDifferenceInMillis(start, end), 10 * 1000);
+ ASSERT_GTE(end - start, Milliseconds(10 * 1000));
}
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOnLargeDocs5MBps) {
@@ -270,7 +265,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOnLargeDocs5MBps) {
Date_t end = getTime();
ASSERT_EQ(scanRecords, 0);
- ASSERT_GTE(getDifferenceInMillis(start, end), 2000);
+ ASSERT_GTE(end - start, Milliseconds(2000));
}
TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOff) {
@@ -297,7 +292,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
+ ASSERT_EQ(end - start, Milliseconds(kTickDelay * numRecords + kTickDelay));
}
TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
@@ -327,7 +322,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 5000);
+ ASSERT_GTE(end - start, Milliseconds(5000));
}
// Using a throttle with a limit of 5MB per second, all operations should take at least 1
@@ -347,7 +342,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 1000);
+ ASSERT_GTE(end - start, Milliseconds(1000));
}
}
@@ -390,7 +385,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 30);
- ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
+ ASSERT_EQ(end - start, Milliseconds(kTickDelay * numRecords + kTickDelay));
}
TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
@@ -425,7 +420,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 20);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 5000);
+ ASSERT_GTE(end - start, Milliseconds(5000));
}
// Using a throttle with a limit of 5MB per second, all operations should take at least 2
@@ -447,7 +442,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 20);
- ASSERT_TRUE(getDifferenceInMillis(start, end) >= 2000);
+ ASSERT_GTE(end - start, Milliseconds(2000));
}
}
diff --git a/src/mongo/db/change_stream_change_collection_manager.cpp b/src/mongo/db/change_stream_change_collection_manager.cpp
index e2872e3d815..d76d197c505 100644
--- a/src/mongo/db/change_stream_change_collection_manager.cpp
+++ b/src/mongo/db/change_stream_change_collection_manager.cpp
@@ -71,7 +71,9 @@ public:
* collection when the 'write()' method is called.
*/
void add(const TenantId& tenantId, InsertStatement insertStatement) {
- _tenantStatementsMap[tenantId].push_back(std::move(insertStatement));
+ if (_shouldAddEntry(insertStatement)) {
+ _tenantStatementsMap[tenantId].push_back(std::move(insertStatement));
+ }
}
/**
@@ -112,6 +114,30 @@ public:
}
private:
+ bool _shouldAddEntry(const InsertStatement& insertStatement) {
+ auto& oplogDoc = insertStatement.doc;
+
+ // TODO SERVER-65950 retreive tenant from the oplog.
+ // TODO SERVER-67170 avoid inspecting the oplog BSON object.
+
+ if (auto nssFieldElem = oplogDoc[repl::OplogEntry::kNssFieldName];
+ nssFieldElem && nssFieldElem.String() == "config.$cmd"_sd) {
+ if (auto objectFieldElem = oplogDoc[repl::OplogEntry::kObjectFieldName]) {
+ // The oplog entry might be a drop command on the change collection. Check if the
+ // drop request is for the already deleted change collection, as such do not attempt
+ // to write to the change collection if that is the case. This scenario is possible
+ // because 'WriteUnitOfWork' will stage the changes and while committing the staged
+ // 'CollectionImpl::insertDocuments' change the collection object might have already
+ // been deleted.
+ if (auto dropFieldElem = objectFieldElem["drop"_sd]) {
+ return dropFieldElem.String() != NamespaceString::kChangeCollectionName;
+ }
+ }
+ }
+
+ return true;
+ }
+
// Maps inserts statements for each tenant.
stdx::unordered_map<TenantId, std::vector<InsertStatement>, TenantId::Hasher>
_tenantStatementsMap;
diff --git a/src/mongo/db/change_streams_cluster_parameter.cpp b/src/mongo/db/change_streams_cluster_parameter.cpp
new file mode 100644
index 00000000000..c0ac9577f2e
--- /dev/null
+++ b/src/mongo/db/change_streams_cluster_parameter.cpp
@@ -0,0 +1,62 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
+
+#include "mongo/db/change_streams_cluster_parameter.h"
+
+#include "mongo/base/status.h"
+#include "mongo/db/change_streams_cluster_parameter_gen.h"
+#include "mongo/logv2/log.h"
+namespace mongo {
+
+Status validateChangeStreamsClusterParameter(
+ const ChangeStreamsClusterParameterStorage& clusterParameter) {
+ LOGV2_DEBUG(6594801,
+ 1,
+ "Validating change streams cluster parameter",
+ "enabled"_attr = clusterParameter.getEnabled(),
+ "expireAfterSeconds"_attr = clusterParameter.getExpireAfterSeconds());
+ if (clusterParameter.getEnabled()) {
+ if (clusterParameter.getExpireAfterSeconds() <= 0) {
+ return Status(ErrorCodes::BadValue,
+ "Expected a positive integer for 'expireAfterSeconds' field if 'enabled' "
+ "field is true");
+ }
+ } else {
+ if (clusterParameter.getExpireAfterSeconds() != 0) {
+ return Status(
+ ErrorCodes::BadValue,
+ "Expected a zero value for 'expireAfterSeconds' if 'enabled' field is false");
+ }
+ }
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/change_streams_cluster_parameter.h b/src/mongo/db/change_streams_cluster_parameter.h
new file mode 100644
index 00000000000..ebeedaa0e8b
--- /dev/null
+++ b/src/mongo/db/change_streams_cluster_parameter.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/status.h"
+namespace mongo {
+
+class ChangeStreamsClusterParameterStorage;
+
+/**
+ * Validates 'changeStreams' cluster-wide parameter.
+ */
+Status validateChangeStreamsClusterParameter(
+ const ChangeStreamsClusterParameterStorage& clusterParameter);
+} // namespace mongo
diff --git a/src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl b/src/mongo/db/change_streams_cluster_parameter.idl
index f5fb71095b0..74563d47752 100644
--- a/src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl
+++ b/src/mongo/db/change_streams_cluster_parameter.idl
@@ -27,15 +27,38 @@
#
global:
- cpp_namespace: "mongo::feature_flags"
+ cpp_namespace: "mongo"
+ cpp_includes:
+ - "mongo/db/change_streams_cluster_parameter.h"
imports:
- - "mongo/idl/basic_types.idl"
+ - "mongo/idl/basic_types.idl"
+ - "mongo/idl/cluster_server_parameter.idl"
-feature_flags:
- featureFlagNewWriteErrorExceptionFormat:
- description: Feature flag for enabling the new write error format which avoids serialising
- StaleShardVersion with the information of StaleConfig.
- cpp_varname: gFeatureFlagNewWriteErrorExceptionFormat
- default: true
- version: 6.0
+structs:
+ ChangeStreamsClusterParameterStorage:
+ description: "A specification for the 'changeStreams' cluster-wide configuration parameter
+ type."
+ inline_chained_structs: true
+ chained_structs:
+ ClusterServerParameter: clusterServerParameter
+ fields:
+ enabled:
+ description: "Enable or disable change streams."
+ type: bool
+ default: false
+ expireAfterSeconds:
+ description: "The number of seconds to retain the change events. This value will be a
+ non-zero positive value if the change stream is enabled and a zero value if the change
+ stream is disabled."
+ type: safeInt64
+ default: 0
+
+server_parameters:
+ changeStreams:
+ description: "The cluster-wide configuration parameter for the change stream in the serverless."
+ set_at: cluster
+ cpp_vartype: ChangeStreamsClusterParameterStorage
+ cpp_varname: gChangeStreamsClusterParameter
+ validator:
+ callback: validateChangeStreamsClusterParameter
diff --git a/src/mongo/db/change_streams_cluster_parameter_test.cpp b/src/mongo/db/change_streams_cluster_parameter_test.cpp
new file mode 100644
index 00000000000..80ef8d71da7
--- /dev/null
+++ b/src/mongo/db/change_streams_cluster_parameter_test.cpp
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/change_streams_cluster_parameter.h"
+#include "mongo/db/change_streams_cluster_parameter_gen.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+
+TEST(ValidateChangeStreamsClusterParameter, EnabledWithSuccess) {
+ ChangeStreamsClusterParameterStorage changeStreamClusterParam;
+ changeStreamClusterParam.setEnabled(true);
+ changeStreamClusterParam.setExpireAfterSeconds(3600);
+ auto result = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_TRUE(result.isOK());
+}
+
+TEST(ValidateChangeStreamsClusterParameter, EnabledWithNonPositiveExpireAfterSeconds) {
+ ChangeStreamsClusterParameterStorage changeStreamClusterParam;
+ changeStreamClusterParam.setEnabled(true);
+ changeStreamClusterParam.setExpireAfterSeconds(0);
+ auto resultZero = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_EQ(resultZero.code(), ErrorCodes::BadValue);
+
+ changeStreamClusterParam.setExpireAfterSeconds(-1);
+ auto resultNegative = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_EQ(resultNegative.code(), ErrorCodes::BadValue);
+}
+
+TEST(ValidateChangeStreamsClusterParameter, DisabledWithSuccess) {
+ ChangeStreamsClusterParameterStorage changeStreamClusterParam;
+ changeStreamClusterParam.setEnabled(false);
+ auto resultDefault = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_TRUE(resultDefault.isOK());
+
+ changeStreamClusterParam.setExpireAfterSeconds(0);
+ auto resultZero = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_TRUE(resultDefault.isOK());
+}
+
+TEST(ValidateChangeStreamsClusterParameter, DisabledWithNonZeroExpireAfterSeconds) {
+ ChangeStreamsClusterParameterStorage changeStreamClusterParam;
+ changeStreamClusterParam.setEnabled(false);
+ changeStreamClusterParam.setExpireAfterSeconds(1);
+ auto result = validateChangeStreamsClusterParameter(changeStreamClusterParam);
+ ASSERT_EQ(result.code(), ErrorCodes::BadValue);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 7c2ed6583d4..a6f394b481d 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -90,11 +90,11 @@ BSONObj Cloner::_getIdIndexSpec(const std::list<BSONObj>& indexSpecs) {
Cloner::Cloner() {}
-struct Cloner::Fun {
- Fun(OperationContext* opCtx, const std::string& dbName)
+struct Cloner::BatchHandler {
+ BatchHandler(OperationContext* opCtx, const std::string& dbName)
: lastLog(0), opCtx(opCtx), _dbName(dbName) {}
- void operator()(DBClientCursorBatchIterator& i) {
+ void operator()(DBClientCursor& cursor) {
boost::optional<Lock::DBLock> dbLock;
dbLock.emplace(opCtx, _dbName, MODE_X);
uassert(ErrorCodes::NotWritablePrimary,
@@ -128,7 +128,7 @@ struct Cloner::Fun {
});
}
- while (i.moreInCurrentBatch()) {
+ while (cursor.moreInCurrentBatch()) {
if (numSeen % 128 == 127) {
time_t now = time(nullptr);
if (now - lastLog >= 60) {
@@ -164,7 +164,7 @@ struct Cloner::Fun {
collection);
}
- BSONObj tmp = i.nextSafe();
+ BSONObj tmp = cursor.nextSafe();
/* assure object is valid. note this will slow us down a little. */
// We allow cloning of collections containing decimal data even if decimal is disabled.
@@ -245,23 +245,24 @@ void Cloner::_copy(OperationContext* opCtx,
logAttrs(nss),
"conn_getServerAddress"_attr = conn->getServerAddress());
- Fun f(opCtx, toDBName);
- f.numSeen = 0;
- f.nss = nss;
- f.from_options = from_opts;
- f.from_id_index = from_id_index;
- f.saveLast = time(nullptr);
-
- int options = QueryOption_NoCursorTimeout | QueryOption_Exhaust;
-
- conn->query_DEPRECATED(std::function<void(DBClientCursorBatchIterator&)>(f),
- nss,
- BSONObj{} /* filter */,
- client_deprecated::Query() /* querySettings */,
- nullptr,
- options,
- 0 /* batchSize */,
- repl::ReadConcernArgs::kLocal);
+ BatchHandler batchHandler{opCtx, toDBName};
+ batchHandler.numSeen = 0;
+ batchHandler.nss = nss;
+ batchHandler.from_options = from_opts;
+ batchHandler.from_id_index = from_id_index;
+ batchHandler.saveLast = time(nullptr);
+
+ FindCommandRequest findCmd{nss};
+ findCmd.setNoCursorTimeout(true);
+ findCmd.setReadConcern(repl::ReadConcernArgs::kLocal);
+ auto cursor = conn->find(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred},
+ ExhaustMode::kOn);
+
+ // Process the results of the cursor in batches.
+ while (cursor->more()) {
+ batchHandler(*cursor);
+ }
}
void Cloner::_copyIndexes(OperationContext* opCtx,
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 8d1d512fe1f..5cbb4d76337 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -104,7 +104,7 @@ private:
const std::list<BSONObj>& from_indexes,
DBClientBase* conn);
- struct Fun;
+ struct BatchHandler;
};
} // namespace mongo
diff --git a/src/mongo/db/commands/cqf/cqf_aggregate.cpp b/src/mongo/db/commands/cqf/cqf_aggregate.cpp
index 26f6bcf5cff..516a3f9ca2e 100644
--- a/src/mongo/db/commands/cqf/cqf_aggregate.cpp
+++ b/src/mongo/db/commands/cqf/cqf_aggregate.cpp
@@ -184,12 +184,16 @@ static opt::unordered_map<std::string, optimizer::IndexDefinition> buildIndexSpe
// TODO: simplify expression.
- PartialSchemaReqConversion conversion = convertExprToPartialSchemaReq(exprABT);
- if (!conversion._success || conversion._hasEmptyInterval) {
+ auto conversion = convertExprToPartialSchemaReq(exprABT, true /*isFilterContext*/);
+ if (!conversion || conversion->_hasEmptyInterval) {
// Unsatisfiable partial index filter?
continue;
}
- partialIndexReqMap = std::move(conversion._reqMap);
+ tassert(6624257,
+ "Should not be seeing a partial index filter where we need to over-approximate",
+ !conversion->_retainPredicate);
+
+ partialIndexReqMap = std::move(conversion->_reqMap);
}
// For now we assume distribution is Centralized.
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index f040bd9eea3..5deb5ecd339 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -99,7 +99,7 @@ void removePlanCacheEntriesByIndexFilterKeys(const stdx::unordered_set<uint32_t>
sbe::PlanCache* planCache) {
planCache->removeIf([&](const sbe::PlanCacheKey& key, const sbe::PlanCacheEntry& entry) {
return indexFilterKeys.contains(entry.indexFilterKey) &&
- key.getCollectionUuid() == collectionUuid;
+ key.getMainCollectionState().uuid == collectionUuid;
});
}
} // namespace
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 4ef229c36f4..3495ee127d6 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -40,7 +40,7 @@ namespace {
static const NamespaceString nss{"test.collection"_sd};
-PlanCacheKey makeKey(const CanonicalQuery& cq) {
+PlanCacheKey makeClassicKey(const CanonicalQuery& cq) {
CollectionMock coll(nss);
return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
}
@@ -106,7 +106,7 @@ TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) {
plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 4}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
- ASSERT_EQUALS(makeKey(*query), makeKey(*equivQuery));
+ ASSERT_EQUALS(makeClassicKey(*query), makeClassicKey(*equivQuery));
}
TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) {
@@ -124,7 +124,7 @@ TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted)
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> sortQuery = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*sortQuery));
+ ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*sortQuery));
}
// Regression test for SERVER-17158.
@@ -143,7 +143,7 @@ TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) {
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(makeKey(*sortQuery1), makeKey(*sortQuery2));
+ ASSERT_NOT_EQUALS(makeClassicKey(*sortQuery1), makeClassicKey(*sortQuery2));
}
TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnprojected) {
@@ -160,7 +160,7 @@ TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnproj
opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(makeKey(*query), makeKey(*projectionQuery));
+ ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*projectionQuery));
}
} // namespace
diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp
index 6230b6da99d..42053578913 100644
--- a/src/mongo/db/commands/run_aggregate.cpp
+++ b/src/mongo/db/commands/run_aggregate.cpp
@@ -1031,7 +1031,7 @@ Status runAggregate(OperationContext* opCtx,
// yet.
invariant(ctx);
Explain::explainStages(explainExecutor,
- ctx->getCollection(),
+ collections,
*(expCtx->explain),
BSON("optimizedPipeline" << true),
cmdObj,
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index d1bfe34e501..63bfeb73a03 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1461,8 +1461,11 @@ UsersInfoReply CmdUMCTyped<UsersInfoCommand, UMCInfoParams>::Invocation::typedRu
CommandHelpers::appendSimpleCommandStatus(bodyBuilder, true);
bodyBuilder.doneFast();
auto response = CursorResponse::parseFromBSONThrowing(replyBuilder.releaseBody());
- DBClientCursor cursor(
- &client, response.getNSS(), response.getCursorId(), 0, 0, response.releaseBatch());
+ DBClientCursor cursor(&client,
+ response.getNSS(),
+ response.getCursorId(),
+ false /*isExhaust*/,
+ response.releaseBatch());
while (cursor.more()) {
users.push_back(cursor.next().getOwned());
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index ddc53c40db1..688577f8e28 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -804,6 +804,14 @@ const CollectionPtr& AutoGetCollectionForReadMaybeLockFree::getCollection() cons
}
}
+bool AutoGetCollectionForReadMaybeLockFree::isAnySecondaryNamespaceAViewOrSharded() const {
+ if (_autoGet) {
+ return _autoGet->isAnySecondaryNamespaceAViewOrSharded();
+ } else {
+ return _autoGetLockFree->isAnySecondaryNamespaceAViewOrSharded();
+ }
+}
+
template <typename AutoGetCollectionForReadType>
AutoGetCollectionForReadCommandBase<AutoGetCollectionForReadType>::
AutoGetCollectionForReadCommandBase(
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index 117a9eba220..63bdf8c621d 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -311,6 +311,7 @@ public:
const CollectionPtr& getCollection() const;
const ViewDefinition* getView() const;
const NamespaceString& getNss() const;
+ bool isAnySecondaryNamespaceAViewOrSharded() const;
private:
boost::optional<AutoGetCollectionForRead> _autoGet;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 1db8860dc2e..0dbb0c4a405 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain.h"
+#include "mongo/db/query/multiple_collection_accessor.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/plan_ranker_util.h"
@@ -280,8 +281,12 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
}
- plan_cache_util::updatePlanCache(
- expCtx()->opCtx, collection(), _cachingMode, *_query, std::move(ranking), _candidates);
+ plan_cache_util::updatePlanCache(expCtx()->opCtx,
+ MultipleCollectionAccessor(collection()),
+ _cachingMode,
+ *_query,
+ std::move(ranking),
+ _candidates);
return Status::OK();
}
diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp
index 85d5c823849..a3fc5ff19d1 100644
--- a/src/mongo/db/exec/plan_cache_util.cpp
+++ b/src/mongo/db/exec/plan_cache_util.cpp
@@ -74,17 +74,17 @@ void logNotCachingNoData(std::string&& solution) {
} // namespace log_detail
void updatePlanCache(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
const CanonicalQuery& query,
const QuerySolution& solution,
const sbe::PlanStage& root,
const stage_builder::PlanStageData& data) {
- // TODO SERVER-61507: Integration between lowering parts of aggregation pipeline into the find
- // subsystem and the new SBE cache isn't implemented yet. Remove cq->pipeline().empty() check
- // once it's implemented.
- if (shouldCacheQuery(query) && collection && query.pipeline().empty() &&
+ // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is
+ // integrated with SBE plan cache.
+ if (shouldCacheQuery(query) && collections.getMainCollection() &&
+ canonical_query_encoder::canUseSbePlanCache(query) &&
feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
- auto key = plan_cache_key_factory::make<sbe::PlanCacheKey>(query, collection);
+ auto key = plan_cache_key_factory::make(query, collections);
auto plan = std::make_unique<sbe::CachedSbePlan>(root.clone(), data);
plan->indexFilterApplied = solution.indexFilterApplied;
sbe::getPlanCache(opCtx).setPinned(
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index 630458cbcd4..2fb16d8be89 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -32,6 +32,7 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/multiple_collection_accessor.h"
#include "mongo/db/query/plan_cache_debug_info.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_explainer_factory.h"
@@ -98,7 +99,7 @@ plan_cache_debug_info::DebugInfoSBE buildDebugInfo(const QuerySolution* solution
template <typename PlanStageType, typename ResultType, typename Data>
void updatePlanCache(
OperationContext* opCtx,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
PlanCachingMode cachingMode,
const CanonicalQuery& query,
std::unique_ptr<plan_ranker::PlanRankingDecision> ranking,
@@ -183,6 +184,7 @@ void updatePlanCache(
callbacks{query, buildDebugInfoFn};
winningPlan.solution->cacheData->indexFilterApplied =
winningPlan.solution->indexFilterApplied;
+ auto& collection = collections.getMainCollection();
uassertStatusOK(CollectionQueryInfo::get(collection)
.getPlanCache()
->set(plan_cache_key_factory::make<PlanCacheKey>(query, collection),
@@ -195,10 +197,10 @@ void updatePlanCache(
if (winningPlan.solution->cacheData != nullptr) {
if constexpr (std::is_same_v<PlanStageType, std::unique_ptr<sbe::PlanStage>>) {
- // TODO SERVER-61507: Integration between lowering parts of aggregation pipeline
- // into the find subsystem and the new SBE cache isn't implemented yet.
+ // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown
+ // is integrated with SBE plan cache.
if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() &&
- query.pipeline().empty()) {
+ canonical_query_encoder::canUseSbePlanCache(query)) {
tassert(6142201,
"The winning CandidatePlan should contain the original plan",
winningPlan.clonedPlan);
@@ -215,16 +217,16 @@ void updatePlanCache(
plan_cache_debug_info::DebugInfoSBE>
callbacks{query, buildDebugInfoFn};
uassertStatusOK(sbe::getPlanCache(opCtx).set(
- plan_cache_key_factory::make<sbe::PlanCacheKey>(query, collection),
+ plan_cache_key_factory::make(query, collections),
std::move(cachedPlan),
*rankingDecision,
opCtx->getServiceContext()->getPreciseClockSource()->now(),
&callbacks,
boost::none /* worksGrowthCoefficient */));
} else {
- // TODO(SERVER-61507, SERVER-64882): Fall back to use the classic plan cache.
- // Remove this branch after "gFeatureFlagSbePlanCache" is removed and lowering
- // parts of pipeline is integrated with SBE cache.
+ // TODO(SERVER-64882, SERVER-61507): Fall back to use the classic plan cache.
+ // Remove this branch after "gFeatureFlagSbePlanCache" is removed and $group
+ // pushdown is integrated with SBE plan cache.
cacheClassicPlan();
}
} else {
@@ -245,7 +247,7 @@ void updatePlanCache(
* the cache, the plan immediately becomes "active".
*/
void updatePlanCache(OperationContext* opCtx,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
const CanonicalQuery& query,
const QuerySolution& solution,
const sbe::PlanStage& root,
diff --git a/src/mongo/db/exec/sbe/SConscript b/src/mongo/db/exec/sbe/SConscript
index 6a2503d7f26..6ee97450f2b 100644
--- a/src/mongo/db/exec/sbe/SConscript
+++ b/src/mongo/db/exec/sbe/SConscript
@@ -223,6 +223,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/query/collation/collator_interface_mock',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/db/service_context_test_fixture',
+ '$BUILD_DIR/mongo/util/pcre_wrapper',
'sbe_plan_stage_test',
],
)
diff --git a/src/mongo/db/exec/sbe/sbe_test.cpp b/src/mongo/db/exec/sbe/sbe_test.cpp
index 323368a5334..5a577f02462 100644
--- a/src/mongo/db/exec/sbe/sbe_test.cpp
+++ b/src/mongo/db/exec/sbe/sbe_test.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/exec/sbe/values/value.h"
#include "mongo/db/exec/sbe/vm/vm.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/pcre.h"
namespace mongo::sbe {
@@ -421,6 +422,85 @@ TEST(SBEVM, ConvertBinDataToBsonObj) {
namespace {
+// The hex representation of memory addresses in the output of CodeFragment::toString() differs on
+// Linux and Windows machines so 'addrPattern' is used to cover both cases.
+static const std::string kLinuxAddrPattern{"(0x[a-f0-9]+)"};
+static const std::string kWindowsAddrPattern{"([A-F0-9]+)"};
+static const std::string kAddrPattern{"(" + kLinuxAddrPattern + "|" + kWindowsAddrPattern + ")"};
+
+// The beginning of the output from CodeFragment::toString() gives a range of the addresses that
+// 'pcPointer' will traverse.
+static const std::string kPcPointerRangePattern{"(\\[" + kAddrPattern + ")-(" + kAddrPattern +
+ ")\\])"};
+
+/**
+ * Creates a pcre pattern to match the instructions in the output of CodeFragment::toString(). Any
+ * arguments must be passed in a single comma separated string, and no arguments can be represented
+ * using an empty string.
+ */
+std::string instrPattern(std::string op, std::string args) {
+ return "(" + kAddrPattern + ": " + op + "\\(" + args + "\\); )";
+}
+} // namespace
+
+TEST(SBEVM, CodeFragmentToString) {
+ {
+ vm::CodeFragment code;
+ std::string toStringPattern{kPcPointerRangePattern + "( )"};
+
+ code.appendDiv();
+ toStringPattern += instrPattern("div", "");
+ code.appendMul();
+ toStringPattern += instrPattern("mul", "");
+ code.appendAdd();
+ toStringPattern += instrPattern("add", "");
+
+ std::string instrs = code.toString();
+
+ static const pcre::Regex validToStringOutput{toStringPattern};
+
+ ASSERT_TRUE(!!validToStringOutput.matchView(instrs));
+ }
+}
+
+TEST(SBEVM, CodeFragmentToStringArgs) {
+ {
+ vm::CodeFragment code;
+ std::string toStringPattern{kAddrPattern};
+
+ code.appendFillEmpty(vm::Instruction::True);
+ toStringPattern += instrPattern("fillEmptyConst", "k: True");
+ code.appendFillEmpty(vm::Instruction::Null);
+ toStringPattern += instrPattern("fillEmptyConst", "k: Null");
+ code.appendFillEmpty(vm::Instruction::False);
+ toStringPattern += instrPattern("fillEmptyConst", "k: False");
+
+ code.appendTraverseP(0xAA);
+ auto offsetP = 0xAA - code.instrs().size();
+ toStringPattern += instrPattern("traversePConst", "offset: " + std::to_string(offsetP));
+ code.appendTraverseF(0xBB, vm::Instruction::True);
+ auto offsetF = 0xBB - code.instrs().size();
+ toStringPattern +=
+ instrPattern("traverseFConst", "k: True, offset: " + std::to_string(offsetF));
+
+ auto [tag, val] = value::makeNewString("Hello world!");
+ value::ValueGuard guard{tag, val};
+ code.appendGetField(tag, val);
+ toStringPattern += instrPattern("getFieldConst", "value: \"Hello world!\"");
+
+ code.appendAdd();
+ toStringPattern += instrPattern("add", "");
+
+ std::string instrs = code.toString();
+
+ static const pcre::Regex validToStringOutput{toStringPattern};
+
+ ASSERT_TRUE(!!validToStringOutput.matchView(instrs));
+ }
+}
+
+namespace {
+
/**
* Fills bytes after the null terminator in the string with 'pattern'.
*
diff --git a/src/mongo/db/exec/sbe/stages/branch.cpp b/src/mongo/db/exec/sbe/stages/branch.cpp
index bec12b12ee2..adbbd533273 100644
--- a/src/mongo/db/exec/sbe/stages/branch.cpp
+++ b/src/mongo/db/exec/sbe/stages/branch.cpp
@@ -42,8 +42,9 @@ BranchStage::BranchStage(std::unique_ptr<PlanStage> inputThen,
value::SlotVector inputThenVals,
value::SlotVector inputElseVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId)
- : PlanStage("branch"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("branch"_sd, planNodeId, participateInTrialRunTracking),
_filter(std::move(filter)),
_inputThenVals(std::move(inputThenVals)),
_inputElseVals(std::move(inputElseVals)),
@@ -61,7 +62,8 @@ std::unique_ptr<PlanStage> BranchStage::clone() const {
_inputThenVals,
_inputElseVals,
_outputVals,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void BranchStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/branch.h b/src/mongo/db/exec/sbe/stages/branch.h
index 67b5af8a517..df813e762a4 100644
--- a/src/mongo/db/exec/sbe/stages/branch.h
+++ b/src/mongo/db/exec/sbe/stages/branch.h
@@ -52,7 +52,8 @@ public:
value::SlotVector inputThenVals,
value::SlotVector inputElseVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.cpp b/src/mongo/db/exec/sbe/stages/bson_scan.cpp
index c340071ba0e..3a4c3b50512 100644
--- a/src/mongo/db/exec/sbe/stages/bson_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/bson_scan.cpp
@@ -42,8 +42,9 @@ BSONScanStage::BSONScanStage(const char* bsonBegin,
boost::optional<value::SlotId> recordSlot,
std::vector<std::string> fields,
value::SlotVector vars,
- PlanNodeId planNodeId)
- : PlanStage("bsonscan"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("bsonscan"_sd, planNodeId, participateInTrialRunTracking),
_bsonBegin(bsonBegin),
_bsonEnd(bsonEnd),
_recordSlot(recordSlot),
@@ -52,8 +53,13 @@ BSONScanStage::BSONScanStage(const char* bsonBegin,
_bsonCurrent(bsonBegin) {}
std::unique_ptr<PlanStage> BSONScanStage::clone() const {
- return std::make_unique<BSONScanStage>(
- _bsonBegin, _bsonEnd, _recordSlot, _fields, _vars, _commonStats.nodeId);
+ return std::make_unique<BSONScanStage>(_bsonBegin,
+ _bsonEnd,
+ _recordSlot,
+ _fields,
+ _vars,
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void BSONScanStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.h b/src/mongo/db/exec/sbe/stages/bson_scan.h
index 7804bcd4149..79238f695a2 100644
--- a/src/mongo/db/exec/sbe/stages/bson_scan.h
+++ b/src/mongo/db/exec/sbe/stages/bson_scan.h
@@ -51,7 +51,8 @@ public:
boost::optional<value::SlotId> recordSlot,
std::vector<std::string> fields,
value::SlotVector vars,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/check_bounds.cpp b/src/mongo/db/exec/sbe/stages/check_bounds.cpp
index 483e9f50260..bc62b089005 100644
--- a/src/mongo/db/exec/sbe/stages/check_bounds.cpp
+++ b/src/mongo/db/exec/sbe/stages/check_bounds.cpp
@@ -39,8 +39,9 @@ CheckBoundsStage::CheckBoundsStage(std::unique_ptr<PlanStage> input,
value::SlotId inKeySlot,
value::SlotId inRecordIdSlot,
value::SlotId outSlot,
- PlanNodeId planNodeId)
- : PlanStage{"chkbounds"_sd, planNodeId},
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage{"chkbounds"_sd, planNodeId, participateInTrialRunTracking},
_params{std::move(params)},
_inKeySlot{inKeySlot},
_inRecordIdSlot{inRecordIdSlot},
@@ -49,8 +50,13 @@ CheckBoundsStage::CheckBoundsStage(std::unique_ptr<PlanStage> input,
}
std::unique_ptr<PlanStage> CheckBoundsStage::clone() const {
- return std::make_unique<CheckBoundsStage>(
- _children[0]->clone(), _params, _inKeySlot, _inRecordIdSlot, _outSlot, _commonStats.nodeId);
+ return std::make_unique<CheckBoundsStage>(_children[0]->clone(),
+ _params,
+ _inKeySlot,
+ _inRecordIdSlot,
+ _outSlot,
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void CheckBoundsStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/check_bounds.h b/src/mongo/db/exec/sbe/stages/check_bounds.h
index 29f52faa523..dbdf87938f7 100644
--- a/src/mongo/db/exec/sbe/stages/check_bounds.h
+++ b/src/mongo/db/exec/sbe/stages/check_bounds.h
@@ -76,7 +76,8 @@ public:
value::SlotId inKeySlot,
value::SlotId inRecordIdSlot,
value::SlotId outSlot,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/co_scan.cpp b/src/mongo/db/exec/sbe/stages/co_scan.cpp
index 73e89a5e87e..9666d03cf01 100644
--- a/src/mongo/db/exec/sbe/stages/co_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/co_scan.cpp
@@ -34,11 +34,14 @@
#include "mongo/db/exec/sbe/expressions/expression.h"
namespace mongo::sbe {
-CoScanStage::CoScanStage(PlanNodeId planNodeId, PlanYieldPolicy* yieldPolicy)
- : PlanStage("coscan"_sd, yieldPolicy, planNodeId) {}
+CoScanStage::CoScanStage(PlanNodeId planNodeId,
+ PlanYieldPolicy* yieldPolicy,
+ bool participateInTrialRunTracking)
+ : PlanStage("coscan"_sd, yieldPolicy, planNodeId, participateInTrialRunTracking) {}
std::unique_ptr<PlanStage> CoScanStage::clone() const {
- return std::make_unique<CoScanStage>(_commonStats.nodeId);
+ return std::make_unique<CoScanStage>(
+ _commonStats.nodeId, _yieldPolicy, _participateInTrialRunTracking);
}
void CoScanStage::prepare(CompileCtx& ctx) {}
value::SlotAccessor* CoScanStage::getAccessor(CompileCtx& ctx, value::SlotId slot) {
diff --git a/src/mongo/db/exec/sbe/stages/co_scan.h b/src/mongo/db/exec/sbe/stages/co_scan.h
index 4625b636a14..1f8c8d5404d 100644
--- a/src/mongo/db/exec/sbe/stages/co_scan.h
+++ b/src/mongo/db/exec/sbe/stages/co_scan.h
@@ -42,7 +42,9 @@ namespace mongo::sbe {
*/
class CoScanStage final : public PlanStage {
public:
- explicit CoScanStage(PlanNodeId, PlanYieldPolicy* yieldPolicy = nullptr);
+ explicit CoScanStage(PlanNodeId,
+ PlanYieldPolicy* yieldPolicy = nullptr,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/column_scan.cpp b/src/mongo/db/exec/sbe/stages/column_scan.cpp
index 8058307a916..24f769fa2c7 100644
--- a/src/mongo/db/exec/sbe/stages/column_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/column_scan.cpp
@@ -59,8 +59,9 @@ ColumnScanStage::ColumnScanStage(UUID collectionUuid,
std::vector<std::unique_ptr<EExpression>> pathExprs,
value::SlotId rowStoreSlot,
PlanYieldPolicy* yieldPolicy,
- PlanNodeId nodeId)
- : PlanStage("columnscan"_sd, yieldPolicy, nodeId),
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("columnscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking),
_collUuid(collectionUuid),
_columnIndexName(columnIndexName),
_fieldSlots(std::move(fieldSlots)),
@@ -89,7 +90,8 @@ std::unique_ptr<PlanStage> ColumnScanStage::clone() const {
std::move(pathExprs),
_rowStoreSlot,
_yieldPolicy,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void ColumnScanStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/column_scan.h b/src/mongo/db/exec/sbe/stages/column_scan.h
index d00d4641171..1efeef25bca 100644
--- a/src/mongo/db/exec/sbe/stages/column_scan.h
+++ b/src/mongo/db/exec/sbe/stages/column_scan.h
@@ -53,7 +53,8 @@ public:
std::vector<std::unique_ptr<EExpression>> pathExprs,
value::SlotId internalSlot,
PlanYieldPolicy* yieldPolicy,
- PlanNodeId nodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/exchange.cpp b/src/mongo/db/exec/sbe/stages/exchange.cpp
index 8cd7b065559..fdbb6531913 100644
--- a/src/mongo/db/exec/sbe/stages/exchange.cpp
+++ b/src/mongo/db/exec/sbe/stages/exchange.cpp
@@ -171,8 +171,9 @@ ExchangeConsumer::ExchangeConsumer(std::unique_ptr<PlanStage> input,
ExchangePolicy policy,
std::unique_ptr<EExpression> partition,
std::unique_ptr<EExpression> orderLess,
- PlanNodeId planNodeId)
- : PlanStage("exchange"_sd, planNodeId) {
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("exchange"_sd, planNodeId, participateInTrialRunTracking) {
_children.emplace_back(std::move(input));
_state = std::make_shared<ExchangeState>(
numOfProducers, std::move(fields), policy, std::move(partition), std::move(orderLess));
@@ -186,13 +187,16 @@ ExchangeConsumer::ExchangeConsumer(std::unique_ptr<PlanStage> input,
uassert(5922202, "partition expression must not be present", !_state->partitionExpr());
}
}
-ExchangeConsumer::ExchangeConsumer(std::shared_ptr<ExchangeState> state, PlanNodeId planNodeId)
- : PlanStage("exchange"_sd, planNodeId), _state(state) {
+ExchangeConsumer::ExchangeConsumer(std::shared_ptr<ExchangeState> state,
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("exchange"_sd, planNodeId, participateInTrialRunTracking), _state(state) {
_tid = _state->addConsumer(this);
_orderPreserving = _state->isOrderPreserving();
}
std::unique_ptr<PlanStage> ExchangeConsumer::clone() const {
- return std::make_unique<ExchangeConsumer>(_state, _commonStats.nodeId);
+ return std::make_unique<ExchangeConsumer>(
+ _state, _commonStats.nodeId, _participateInTrialRunTracking);
}
void ExchangeConsumer::prepare(CompileCtx& ctx) {
for (size_t idx = 0; idx < _state->fields().size(); ++idx) {
@@ -486,8 +490,9 @@ void ExchangeProducer::closePipes() {
ExchangeProducer::ExchangeProducer(std::unique_ptr<PlanStage> input,
std::shared_ptr<ExchangeState> state,
- PlanNodeId planNodeId)
- : PlanStage("exchangep"_sd, planNodeId), _state(state) {
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("exchangep"_sd, planNodeId, participateInTrialRunTracking), _state(state) {
_children.emplace_back(std::move(input));
_tid = _state->addProducer(this);
diff --git a/src/mongo/db/exec/sbe/stages/exchange.h b/src/mongo/db/exec/sbe/stages/exchange.h
index b94b4968f66..15928cd50fb 100644
--- a/src/mongo/db/exec/sbe/stages/exchange.h
+++ b/src/mongo/db/exec/sbe/stages/exchange.h
@@ -261,9 +261,12 @@ public:
ExchangePolicy policy,
std::unique_ptr<EExpression> partition,
std::unique_ptr<EExpression> orderLess,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
- ExchangeConsumer(std::shared_ptr<ExchangeState> state, PlanNodeId planNodeId);
+ ExchangeConsumer(std::shared_ptr<ExchangeState> state,
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
@@ -311,7 +314,8 @@ class ExchangeProducer final : public PlanStage {
public:
ExchangeProducer(std::unique_ptr<PlanStage> input,
std::shared_ptr<ExchangeState> state,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
static void start(OperationContext* opCtx,
CompileCtx& ctx,
diff --git a/src/mongo/db/exec/sbe/stages/filter.h b/src/mongo/db/exec/sbe/stages/filter.h
index 2120be1c062..059dd1c7ab4 100644
--- a/src/mongo/db/exec/sbe/stages/filter.h
+++ b/src/mongo/db/exec/sbe/stages/filter.h
@@ -58,16 +58,21 @@ class FilterStage final : public PlanStage {
public:
FilterStage(std::unique_ptr<PlanStage> input,
std::unique_ptr<EExpression> filter,
- PlanNodeId planNodeId)
- : PlanStage(IsConst ? "cfilter"_sd : (IsEof ? "efilter" : "filter"_sd), planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true)
+ : PlanStage(IsConst ? "cfilter"_sd : (IsEof ? "efilter" : "filter"_sd),
+ planNodeId,
+ participateInTrialRunTracking),
_filter(std::move(filter)) {
static_assert(!IsEof || !IsConst);
_children.emplace_back(std::move(input));
}
std::unique_ptr<PlanStage> clone() const final {
- return std::make_unique<FilterStage<IsConst, IsEof>>(
- _children[0]->clone(), _filter->clone(), _commonStats.nodeId);
+ return std::make_unique<FilterStage<IsConst, IsEof>>(_children[0]->clone(),
+ _filter->clone(),
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void prepare(CompileCtx& ctx) final {
diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.cpp b/src/mongo/db/exec/sbe/stages/hash_agg.cpp
index 1dcbc500ec8..f930d4b5e95 100644
--- a/src/mongo/db/exec/sbe/stages/hash_agg.cpp
+++ b/src/mongo/db/exec/sbe/stages/hash_agg.cpp
@@ -47,8 +47,9 @@ HashAggStage::HashAggStage(std::unique_ptr<PlanStage> input,
bool optimizedClose,
boost::optional<value::SlotId> collatorSlot,
bool allowDiskUse,
- PlanNodeId planNodeId)
- : PlanStage("group"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("group"_sd, planNodeId, participateInTrialRunTracking),
_gbs(std::move(gbs)),
_aggs(std::move(aggs)),
_collatorSlot(collatorSlot),
@@ -74,7 +75,8 @@ std::unique_ptr<PlanStage> HashAggStage::clone() const {
_optimizedClose,
_collatorSlot,
_allowDiskUse,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void HashAggStage::doSaveState(bool relinquishCursor) {
diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.h b/src/mongo/db/exec/sbe/stages/hash_agg.h
index 19fbca9d1c7..d200c4b9c3d 100644
--- a/src/mongo/db/exec/sbe/stages/hash_agg.h
+++ b/src/mongo/db/exec/sbe/stages/hash_agg.h
@@ -75,7 +75,8 @@ public:
bool optimizedClose,
boost::optional<value::SlotId> collatorSlot,
bool allowDiskUse,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/hash_join.cpp b/src/mongo/db/exec/sbe/stages/hash_join.cpp
index 86675029c0e..bad53262acb 100644
--- a/src/mongo/db/exec/sbe/stages/hash_join.cpp
+++ b/src/mongo/db/exec/sbe/stages/hash_join.cpp
@@ -44,8 +44,9 @@ HashJoinStage::HashJoinStage(std::unique_ptr<PlanStage> outer,
value::SlotVector innerCond,
value::SlotVector innerProjects,
boost::optional<value::SlotId> collatorSlot,
- PlanNodeId planNodeId)
- : PlanStage("hj"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("hj"_sd, planNodeId, participateInTrialRunTracking),
_outerCond(std::move(outerCond)),
_outerProjects(std::move(outerProjects)),
_innerCond(std::move(innerCond)),
@@ -68,7 +69,8 @@ std::unique_ptr<PlanStage> HashJoinStage::clone() const {
_innerCond,
_innerProjects,
_collatorSlot,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void HashJoinStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/hash_join.h b/src/mongo/db/exec/sbe/stages/hash_join.h
index ed4781116d9..a3997074db0 100644
--- a/src/mongo/db/exec/sbe/stages/hash_join.h
+++ b/src/mongo/db/exec/sbe/stages/hash_join.h
@@ -66,7 +66,8 @@ public:
value::SlotVector innerCond,
value::SlotVector innerProjects,
boost::optional<value::SlotId> collatorSlot,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp
index a65f2f8bd89..16e61d68630 100644
--- a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp
+++ b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp
@@ -47,8 +47,9 @@ HashLookupStage::HashLookupStage(std::unique_ptr<PlanStage> outer,
value::SlotVector innerProjects,
value::SlotMap<std::unique_ptr<EExpression>> innerAggs,
boost::optional<value::SlotId> collatorSlot,
- PlanNodeId planNodeId)
- : PlanStage("hash_lookup"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("hash_lookup"_sd, planNodeId, participateInTrialRunTracking),
_outerCond(outerCond),
_innerCond(innerCond),
_innerProjects(innerProjects),
@@ -72,7 +73,8 @@ std::unique_ptr<PlanStage> HashLookupStage::clone() const {
_innerProjects,
std::move(innerAggs),
_collatorSlot,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void HashLookupStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.h b/src/mongo/db/exec/sbe/stages/hash_lookup.h
index 2e3f0b34816..611c5603606 100644
--- a/src/mongo/db/exec/sbe/stages/hash_lookup.h
+++ b/src/mongo/db/exec/sbe/stages/hash_lookup.h
@@ -86,7 +86,8 @@ public:
value::SlotVector innerProjects,
value::SlotMap<std::unique_ptr<EExpression>> innerAggs,
boost::optional<value::SlotId> collatorSlot,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
index bfad6d9a2ae..2029ac4d356 100644
--- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp
@@ -50,8 +50,12 @@ IndexScanStage::IndexScanStage(UUID collUuid,
boost::optional<value::SlotId> seekKeySlotLow,
boost::optional<value::SlotId> seekKeySlotHigh,
PlanYieldPolicy* yieldPolicy,
- PlanNodeId nodeId)
- : PlanStage(seekKeySlotLow ? "ixseek"_sd : "ixscan"_sd, yieldPolicy, nodeId),
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage(seekKeySlotLow ? "ixseek"_sd : "ixscan"_sd,
+ yieldPolicy,
+ nodeId,
+ participateInTrialRunTracking),
_collUuid(collUuid),
_indexName(indexName),
_forward(forward),
@@ -81,7 +85,8 @@ std::unique_ptr<PlanStage> IndexScanStage::clone() const {
_seekKeySlotLow,
_seekKeySlotHigh,
_yieldPolicy,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void IndexScanStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.h b/src/mongo/db/exec/sbe/stages/ix_scan.h
index ce00ef17128..da61cb544ec 100644
--- a/src/mongo/db/exec/sbe/stages/ix_scan.h
+++ b/src/mongo/db/exec/sbe/stages/ix_scan.h
@@ -83,7 +83,8 @@ public:
boost::optional<value::SlotId> seekKeySlotLow,
boost::optional<value::SlotId> seekKeySlotHigh,
PlanYieldPolicy* yieldPolicy,
- PlanNodeId nodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.cpp b/src/mongo/db/exec/sbe/stages/limit_skip.cpp
index 359355582ac..8343f56ca96 100644
--- a/src/mongo/db/exec/sbe/stages/limit_skip.cpp
+++ b/src/mongo/db/exec/sbe/stages/limit_skip.cpp
@@ -37,8 +37,9 @@ namespace mongo::sbe {
LimitSkipStage::LimitSkipStage(std::unique_ptr<PlanStage> input,
boost::optional<long long> limit,
boost::optional<long long> skip,
- PlanNodeId planNodeId)
- : PlanStage(!skip ? "limit"_sd : "limitskip"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage(!skip ? "limit"_sd : "limitskip"_sd, planNodeId, participateInTrialRunTracking),
_limit(limit),
_skip(skip),
_current(0),
@@ -51,7 +52,7 @@ LimitSkipStage::LimitSkipStage(std::unique_ptr<PlanStage> input,
std::unique_ptr<PlanStage> LimitSkipStage::clone() const {
return std::make_unique<LimitSkipStage>(
- _children[0]->clone(), _limit, _skip, _commonStats.nodeId);
+ _children[0]->clone(), _limit, _skip, _commonStats.nodeId, _participateInTrialRunTracking);
}
void LimitSkipStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.h b/src/mongo/db/exec/sbe/stages/limit_skip.h
index f0f62b34239..7fc366a2174 100644
--- a/src/mongo/db/exec/sbe/stages/limit_skip.h
+++ b/src/mongo/db/exec/sbe/stages/limit_skip.h
@@ -50,7 +50,8 @@ public:
LimitSkipStage(std::unique_ptr<PlanStage> input,
boost::optional<long long> limit,
boost::optional<long long> skip,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/loop_join.cpp b/src/mongo/db/exec/sbe/stages/loop_join.cpp
index 6c49f2e700a..3df5e179a09 100644
--- a/src/mongo/db/exec/sbe/stages/loop_join.cpp
+++ b/src/mongo/db/exec/sbe/stages/loop_join.cpp
@@ -41,7 +41,8 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer,
value::SlotVector outerProjects,
value::SlotVector outerCorrelated,
std::unique_ptr<EExpression> predicate,
- PlanNodeId nodeId)
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
: LoopJoinStage(std::move(outer),
std::move(inner),
std::move(outerProjects),
@@ -49,7 +50,8 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer,
value::SlotVector{},
std::move(predicate),
JoinType::Inner,
- nodeId) {}
+ nodeId,
+ participateInTrialRunTracking) {}
LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer,
std::unique_ptr<PlanStage> inner,
@@ -58,8 +60,9 @@ LoopJoinStage::LoopJoinStage(std::unique_ptr<PlanStage> outer,
value::SlotVector innerProjects,
std::unique_ptr<EExpression> predicate,
JoinType joinType,
- PlanNodeId nodeId)
- : PlanStage("nlj"_sd, nodeId),
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("nlj"_sd, nodeId, participateInTrialRunTracking),
_outerProjects(std::move(outerProjects)),
_outerCorrelated(std::move(outerCorrelated)),
_innerProjects(std::move(innerProjects)),
@@ -80,7 +83,8 @@ std::unique_ptr<PlanStage> LoopJoinStage::clone() const {
_innerProjects,
_predicate ? _predicate->clone() : nullptr,
_joinType,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void LoopJoinStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/loop_join.h b/src/mongo/db/exec/sbe/stages/loop_join.h
index 076655bca4c..c69010071fd 100644
--- a/src/mongo/db/exec/sbe/stages/loop_join.h
+++ b/src/mongo/db/exec/sbe/stages/loop_join.h
@@ -63,7 +63,8 @@ public:
value::SlotVector outerProjects,
value::SlotVector outerCorrelated,
std::unique_ptr<EExpression> predicate,
- PlanNodeId nodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
LoopJoinStage(std::unique_ptr<PlanStage> outer,
std::unique_ptr<PlanStage> inner,
@@ -72,7 +73,8 @@ public:
value::SlotVector innerProjects,
std::unique_ptr<EExpression> predicate,
JoinType joinType,
- PlanNodeId nodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/makeobj.cpp b/src/mongo/db/exec/sbe/stages/makeobj.cpp
index cefd83d0035..0c84fde3083 100644
--- a/src/mongo/db/exec/sbe/stages/makeobj.cpp
+++ b/src/mongo/db/exec/sbe/stages/makeobj.cpp
@@ -46,8 +46,11 @@ MakeObjStageBase<O>::MakeObjStageBase(std::unique_ptr<PlanStage> input,
value::SlotVector projectVars,
bool forceNewObject,
bool returnOldObject,
- PlanNodeId planNodeId)
- : PlanStage(O == MakeObjOutputType::object ? "mkobj"_sd : "mkbson"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage(O == MakeObjOutputType::object ? "mkobj"_sd : "mkbson"_sd,
+ planNodeId,
+ participateInTrialRunTracking),
_objSlot(objSlot),
_rootSlot(rootSlot),
_fieldBehavior(fieldBehavior),
@@ -95,7 +98,8 @@ std::unique_ptr<PlanStage> MakeObjStageBase<O>::clone() const {
_projectVars,
_forceNewObject,
_returnOldObject,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
template <MakeObjOutputType O>
diff --git a/src/mongo/db/exec/sbe/stages/makeobj.h b/src/mongo/db/exec/sbe/stages/makeobj.h
index 1f2dc183d97..3034470b95a 100644
--- a/src/mongo/db/exec/sbe/stages/makeobj.h
+++ b/src/mongo/db/exec/sbe/stages/makeobj.h
@@ -87,7 +87,8 @@ public:
value::SlotVector projectVars,
bool forceNewObject,
bool returnOldObject,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
/**
* A convenience constructor that takes a set instead of a vector for 'fields' and
diff --git a/src/mongo/db/exec/sbe/stages/merge_join.cpp b/src/mongo/db/exec/sbe/stages/merge_join.cpp
index 170227e0575..d6f03af7502 100644
--- a/src/mongo/db/exec/sbe/stages/merge_join.cpp
+++ b/src/mongo/db/exec/sbe/stages/merge_join.cpp
@@ -76,8 +76,9 @@ MergeJoinStage::MergeJoinStage(std::unique_ptr<PlanStage> outer,
value::SlotVector innerKeys,
value::SlotVector innerProjects,
std::vector<value::SortDirection> sortDirs,
- PlanNodeId planNodeId)
- : PlanStage("mj"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("mj"_sd, planNodeId, participateInTrialRunTracking),
_outerKeys(std::move(outerKeys)),
_outerProjects(std::move(outerProjects)),
_innerKeys(std::move(innerKeys)),
@@ -104,7 +105,8 @@ std::unique_ptr<PlanStage> MergeJoinStage::clone() const {
_innerKeys,
_innerProjects,
_dirs,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void MergeJoinStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/merge_join.h b/src/mongo/db/exec/sbe/stages/merge_join.h
index b0f61cd677c..ff94784ac0d 100644
--- a/src/mongo/db/exec/sbe/stages/merge_join.h
+++ b/src/mongo/db/exec/sbe/stages/merge_join.h
@@ -62,7 +62,8 @@ public:
value::SlotVector innerKeys,
value::SlotVector innerProjects,
std::vector<value::SortDirection> sortDirs,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/project.cpp b/src/mongo/db/exec/sbe/stages/project.cpp
index 736110bc83a..c534c5c8cdc 100644
--- a/src/mongo/db/exec/sbe/stages/project.cpp
+++ b/src/mongo/db/exec/sbe/stages/project.cpp
@@ -37,8 +37,10 @@ namespace mongo {
namespace sbe {
ProjectStage::ProjectStage(std::unique_ptr<PlanStage> input,
value::SlotMap<std::unique_ptr<EExpression>> projects,
- PlanNodeId nodeId)
- : PlanStage("project"_sd, nodeId), _projects(std::move(projects)) {
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("project"_sd, nodeId, participateInTrialRunTracking),
+ _projects(std::move(projects)) {
_children.emplace_back(std::move(input));
}
@@ -47,8 +49,10 @@ std::unique_ptr<PlanStage> ProjectStage::clone() const {
for (auto& [k, v] : _projects) {
projects.emplace(k, v->clone());
}
- return std::make_unique<ProjectStage>(
- _children[0]->clone(), std::move(projects), _commonStats.nodeId);
+ return std::make_unique<ProjectStage>(_children[0]->clone(),
+ std::move(projects),
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void ProjectStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/project.h b/src/mongo/db/exec/sbe/stages/project.h
index 1754dd7d2a9..bf4e169c8c9 100644
--- a/src/mongo/db/exec/sbe/stages/project.h
+++ b/src/mongo/db/exec/sbe/stages/project.h
@@ -47,7 +47,8 @@ class ProjectStage final : public PlanStage {
public:
ProjectStage(std::unique_ptr<PlanStage> input,
value::SlotMap<std::unique_ptr<EExpression>> projects,
- PlanNodeId nodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp
index 678d3f84ef9..fbbc3a9ae0d 100644
--- a/src/mongo/db/exec/sbe/stages/scan.cpp
+++ b/src/mongo/db/exec/sbe/stages/scan.cpp
@@ -56,8 +56,10 @@ ScanStage::ScanStage(UUID collectionUuid,
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
ScanCallbacks scanCallbacks,
- bool useRandomCursor)
- : PlanStage(seekKeySlot ? "seek"_sd : "scan"_sd, yieldPolicy, nodeId),
+ bool useRandomCursor,
+ bool participateInTrialRunTracking)
+ : PlanStage(
+ seekKeySlot ? "seek"_sd : "scan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking),
_collUuid(collectionUuid),
_recordSlot(recordSlot),
_recordIdSlot(recordIdSlot),
@@ -98,7 +100,9 @@ std::unique_ptr<PlanStage> ScanStage::clone() const {
_forward,
_yieldPolicy,
_commonStats.nodeId,
- _scanCallbacks);
+ _scanCallbacks,
+ _useRandomCursor,
+ _participateInTrialRunTracking);
}
void ScanStage::prepare(CompileCtx& ctx) {
@@ -592,8 +596,9 @@ ParallelScanStage::ParallelScanStage(UUID collectionUuid,
value::SlotVector vars,
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
- ScanCallbacks callbacks)
- : PlanStage("pscan"_sd, yieldPolicy, nodeId),
+ ScanCallbacks callbacks,
+ bool participateInTrialRunTracking)
+ : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking),
_collUuid(collectionUuid),
_recordSlot(recordSlot),
_recordIdSlot(recordIdSlot),
@@ -621,8 +626,9 @@ ParallelScanStage::ParallelScanStage(const std::shared_ptr<ParallelState>& state
value::SlotVector vars,
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
- ScanCallbacks callbacks)
- : PlanStage("pscan"_sd, yieldPolicy, nodeId),
+ ScanCallbacks callbacks,
+ bool participateInTrialRunTracking)
+ : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking),
_collUuid(collectionUuid),
_recordSlot(recordSlot),
_recordIdSlot(recordIdSlot),
@@ -650,7 +656,8 @@ std::unique_ptr<PlanStage> ParallelScanStage::clone() const {
_vars,
_yieldPolicy,
_commonStats.nodeId,
- _scanCallbacks);
+ _scanCallbacks,
+ _participateInTrialRunTracking);
}
void ParallelScanStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/scan.h b/src/mongo/db/exec/sbe/stages/scan.h
index 37462ac5e14..ed138f6302e 100644
--- a/src/mongo/db/exec/sbe/stages/scan.h
+++ b/src/mongo/db/exec/sbe/stages/scan.h
@@ -108,7 +108,8 @@ public:
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
ScanCallbacks scanCallbacks,
- bool useRandomCursor = false);
+ bool useRandomCursor = false,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
@@ -227,7 +228,8 @@ public:
value::SlotVector vars,
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
- ScanCallbacks callbacks);
+ ScanCallbacks callbacks,
+ bool participateInTrialRunTracking = true);
ParallelScanStage(const std::shared_ptr<ParallelState>& state,
const UUID& collectionUuid,
@@ -241,7 +243,8 @@ public:
value::SlotVector vars,
PlanYieldPolicy* yieldPolicy,
PlanNodeId nodeId,
- ScanCallbacks callbacks);
+ ScanCallbacks callbacks,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/sort.cpp b/src/mongo/db/exec/sbe/stages/sort.cpp
index 5acf73afe8d..0968b0bea68 100644
--- a/src/mongo/db/exec/sbe/stages/sort.cpp
+++ b/src/mongo/db/exec/sbe/stages/sort.cpp
@@ -55,8 +55,9 @@ SortStage::SortStage(std::unique_ptr<PlanStage> input,
size_t limit,
size_t memoryLimit,
bool allowDiskUse,
- PlanNodeId planNodeId)
- : PlanStage("sort"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("sort"_sd, planNodeId, participateInTrialRunTracking),
_obs(std::move(obs)),
_dirs(std::move(dirs)),
_vals(std::move(vals)),
@@ -80,7 +81,8 @@ std::unique_ptr<PlanStage> SortStage::clone() const {
_specificStats.limit,
_specificStats.maxMemoryUsageBytes,
_allowDiskUse,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void SortStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/sort.h b/src/mongo/db/exec/sbe/stages/sort.h
index 2bfc9e1d9fb..dda9716b75b 100644
--- a/src/mongo/db/exec/sbe/stages/sort.h
+++ b/src/mongo/db/exec/sbe/stages/sort.h
@@ -70,7 +70,8 @@ public:
size_t limit,
size_t memoryLimit,
bool allowDiskUse,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
~SortStage();
diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp
index f0a648f38ad..39cee407a00 100644
--- a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp
+++ b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp
@@ -41,8 +41,9 @@ SortedMergeStage::SortedMergeStage(PlanStage::Vector inputStages,
std::vector<value::SortDirection> dirs,
std::vector<value::SlotVector> inputVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId)
- : PlanStage("smerge"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("smerge"_sd, planNodeId, participateInTrialRunTracking),
_inputKeys(std::move(inputKeys)),
_dirs(std::move(dirs)),
_inputVals(std::move(inputVals)),
@@ -69,8 +70,13 @@ std::unique_ptr<PlanStage> SortedMergeStage::clone() const {
for (auto& child : _children) {
inputStages.emplace_back(child->clone());
}
- return std::make_unique<SortedMergeStage>(
- std::move(inputStages), _inputKeys, _dirs, _inputVals, _outputVals, _commonStats.nodeId);
+ return std::make_unique<SortedMergeStage>(std::move(inputStages),
+ _inputKeys,
+ _dirs,
+ _inputVals,
+ _outputVals,
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void SortedMergeStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.h b/src/mongo/db/exec/sbe/stages/sorted_merge.h
index 3b87e4c8849..436ddfce080 100644
--- a/src/mongo/db/exec/sbe/stages/sorted_merge.h
+++ b/src/mongo/db/exec/sbe/stages/sorted_merge.h
@@ -61,7 +61,8 @@ public:
// Each element of 'inputVals' must be the same size as 'outputVals'.
std::vector<value::SlotVector> inputVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/spool.cpp b/src/mongo/db/exec/sbe/stages/spool.cpp
index 4550f569b09..47ca744962c 100644
--- a/src/mongo/db/exec/sbe/stages/spool.cpp
+++ b/src/mongo/db/exec/sbe/stages/spool.cpp
@@ -35,14 +35,20 @@ namespace mongo::sbe {
SpoolEagerProducerStage::SpoolEagerProducerStage(std::unique_ptr<PlanStage> input,
SpoolId spoolId,
value::SlotVector vals,
- PlanNodeId planNodeId)
- : PlanStage{"espool"_sd, planNodeId}, _spoolId{spoolId}, _vals{std::move(vals)} {
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage{"espool"_sd, planNodeId, participateInTrialRunTracking},
+ _spoolId{spoolId},
+ _vals{std::move(vals)} {
_children.emplace_back(std::move(input));
}
std::unique_ptr<PlanStage> SpoolEagerProducerStage::clone() const {
- return std::make_unique<SpoolEagerProducerStage>(
- _children[0]->clone(), _spoolId, _vals, _commonStats.nodeId);
+ return std::make_unique<SpoolEagerProducerStage>(_children[0]->clone(),
+ _spoolId,
+ _vals,
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void SpoolEagerProducerStage::prepare(CompileCtx& ctx) {
@@ -171,8 +177,9 @@ SpoolLazyProducerStage::SpoolLazyProducerStage(std::unique_ptr<PlanStage> input,
SpoolId spoolId,
value::SlotVector vals,
std::unique_ptr<EExpression> predicate,
- PlanNodeId planNodeId)
- : PlanStage{"lspool"_sd, planNodeId},
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage{"lspool"_sd, planNodeId, participateInTrialRunTracking},
_spoolId{spoolId},
_vals{std::move(vals)},
_predicate{std::move(predicate)} {
@@ -180,8 +187,12 @@ SpoolLazyProducerStage::SpoolLazyProducerStage(std::unique_ptr<PlanStage> input,
}
std::unique_ptr<PlanStage> SpoolLazyProducerStage::clone() const {
- return std::make_unique<SpoolLazyProducerStage>(
- _children[0]->clone(), _spoolId, _vals, _predicate->clone(), _commonStats.nodeId);
+ return std::make_unique<SpoolLazyProducerStage>(_children[0]->clone(),
+ _spoolId,
+ _vals,
+ _predicate->clone(),
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void SpoolLazyProducerStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/spool.h b/src/mongo/db/exec/sbe/stages/spool.h
index a2dd6f81657..09a453e0e0e 100644
--- a/src/mongo/db/exec/sbe/stages/spool.h
+++ b/src/mongo/db/exec/sbe/stages/spool.h
@@ -56,7 +56,8 @@ public:
SpoolEagerProducerStage(std::unique_ptr<PlanStage> input,
SpoolId spoolId,
value::SlotVector vals,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
@@ -109,7 +110,8 @@ public:
SpoolId spoolId,
value::SlotVector vals,
std::unique_ptr<EExpression> predicate,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
@@ -165,13 +167,17 @@ private:
template <bool IsStack>
class SpoolConsumerStage final : public PlanStage {
public:
- SpoolConsumerStage(SpoolId spoolId, value::SlotVector vals, PlanNodeId planNodeId)
- : PlanStage{IsStack ? "sspool"_sd : "cspool"_sd, planNodeId},
+ SpoolConsumerStage(SpoolId spoolId,
+ value::SlotVector vals,
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true)
+ : PlanStage{IsStack ? "sspool"_sd : "cspool"_sd, planNodeId, participateInTrialRunTracking},
_spoolId{spoolId},
_vals{std::move(vals)} {}
std::unique_ptr<PlanStage> clone() const {
- return std::make_unique<SpoolConsumerStage<IsStack>>(_spoolId, _vals, _commonStats.nodeId);
+ return std::make_unique<SpoolConsumerStage<IsStack>>(
+ _spoolId, _vals, _commonStats.nodeId, _participateInTrialRunTracking);
}
void prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/stages.h b/src/mongo/db/exec/sbe/stages/stages.h
index 59f6746a005..02dd6ae62fb 100644
--- a/src/mongo/db/exec/sbe/stages/stages.h
+++ b/src/mongo/db/exec/sbe/stages/stages.h
@@ -254,7 +254,9 @@ protected:
template <typename T>
class CanTrackStats {
public:
- CanTrackStats(StringData stageType, PlanNodeId nodeId) : _commonStats(stageType, nodeId) {}
+ CanTrackStats(StringData stageType, PlanNodeId nodeId, bool participateInTrialRunTracking)
+ : _commonStats(stageType, nodeId),
+ _participateInTrialRunTracking(participateInTrialRunTracking) {}
/**
* Returns a tree of stats. If the stage has any children it must propagate the request for
@@ -414,6 +416,12 @@ protected:
CommonStats _commonStats;
+ // Flag which determines whether this node and its children can participate in trial run
+ // tracking. A stage and its children are not eligible for trial run tracking when they are
+ // planned deterministically (that is, the amount of work they perform is independent of
+ // other parts of the tree which are multiplanned).
+ bool _participateInTrialRunTracking{true};
+
private:
/**
* In general, accessors can be accessed only after getNext returns a row. It is most definitely
@@ -422,14 +430,6 @@ private:
* that feature is retired we can then simply revisit all stages and simplify them.
*/
bool _slotsAccessible{false};
-
- /**
- * Flag which determines whether this node and its children can participate in trial run
- * tracking. A stage and its children are not eligible for trial run tracking when they are
- * planned deterministically (that is, the amount of work they perform is independent of
- * other parts of the tree which are multiplanned).
- */
- bool _participateInTrialRunTracking{true};
};
/**
@@ -496,10 +496,15 @@ class PlanStage : public CanSwitchOperationContext<PlanStage>,
public:
using Vector = absl::InlinedVector<std::unique_ptr<PlanStage>, 2>;
- PlanStage(StringData stageType, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId)
- : CanTrackStats{stageType, nodeId}, CanInterrupt{yieldPolicy} {}
+ PlanStage(StringData stageType,
+ PlanYieldPolicy* yieldPolicy,
+ PlanNodeId nodeId,
+ bool participateInTrialRunTracking)
+ : CanTrackStats{stageType, nodeId, participateInTrialRunTracking},
+ CanInterrupt{yieldPolicy} {}
- PlanStage(StringData stageType, PlanNodeId nodeId) : PlanStage(stageType, nullptr, nodeId) {}
+ PlanStage(StringData stageType, PlanNodeId nodeId, bool participateInTrialRunTracking)
+ : PlanStage(stageType, nullptr, nodeId, participateInTrialRunTracking) {}
virtual ~PlanStage() = default;
diff --git a/src/mongo/db/exec/sbe/stages/traverse.cpp b/src/mongo/db/exec/sbe/stages/traverse.cpp
index d1e0a040b3e..654a1a160fa 100644
--- a/src/mongo/db/exec/sbe/stages/traverse.cpp
+++ b/src/mongo/db/exec/sbe/stages/traverse.cpp
@@ -42,8 +42,9 @@ TraverseStage::TraverseStage(std::unique_ptr<PlanStage> outer,
std::unique_ptr<EExpression> foldExpr,
std::unique_ptr<EExpression> finalExpr,
PlanNodeId planNodeId,
- boost::optional<size_t> nestedArraysDepth)
- : PlanStage("traverse"_sd, planNodeId),
+ boost::optional<size_t> nestedArraysDepth,
+ bool participateInTrialRunTracking)
+ : PlanStage("traverse"_sd, planNodeId, participateInTrialRunTracking),
_inField(inField),
_outField(outField),
_outFieldInner(outFieldInner),
@@ -69,7 +70,8 @@ std::unique_ptr<PlanStage> TraverseStage::clone() const {
_fold ? _fold->clone() : nullptr,
_final ? _final->clone() : nullptr,
_commonStats.nodeId,
- _nestedArraysDepth);
+ _nestedArraysDepth,
+ _participateInTrialRunTracking);
}
void TraverseStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/traverse.h b/src/mongo/db/exec/sbe/stages/traverse.h
index 2b3fee33a47..09e5dc3dfcf 100644
--- a/src/mongo/db/exec/sbe/stages/traverse.h
+++ b/src/mongo/db/exec/sbe/stages/traverse.h
@@ -74,7 +74,8 @@ public:
std::unique_ptr<EExpression> foldExpr,
std::unique_ptr<EExpression> finalExpr,
PlanNodeId planNodeId,
- boost::optional<size_t> nestedArraysDepth);
+ boost::optional<size_t> nestedArraysDepth,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/union.cpp b/src/mongo/db/exec/sbe/stages/union.cpp
index a661e6c579f..2fd6d0b4fc5 100644
--- a/src/mongo/db/exec/sbe/stages/union.cpp
+++ b/src/mongo/db/exec/sbe/stages/union.cpp
@@ -38,8 +38,9 @@ namespace mongo::sbe {
UnionStage::UnionStage(PlanStage::Vector inputStages,
std::vector<value::SlotVector> inputVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId)
- : PlanStage("union"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("union"_sd, planNodeId, participateInTrialRunTracking),
_inputVals{std::move(inputVals)},
_outputVals{std::move(outputVals)} {
_children = std::move(inputStages);
@@ -57,8 +58,11 @@ std::unique_ptr<PlanStage> UnionStage::clone() const {
for (auto& child : _children) {
inputStages.emplace_back(child->clone());
}
- return std::make_unique<UnionStage>(
- std::move(inputStages), _inputVals, _outputVals, _commonStats.nodeId);
+ return std::make_unique<UnionStage>(std::move(inputStages),
+ _inputVals,
+ _outputVals,
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void UnionStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/union.h b/src/mongo/db/exec/sbe/stages/union.h
index 2ec0ec73df9..b21d5e6caf5 100644
--- a/src/mongo/db/exec/sbe/stages/union.h
+++ b/src/mongo/db/exec/sbe/stages/union.h
@@ -53,7 +53,8 @@ public:
UnionStage(PlanStage::Vector inputStages,
std::vector<value::SlotVector> inputVals,
value::SlotVector outputVals,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/unique.cpp b/src/mongo/db/exec/sbe/stages/unique.cpp
index 355927ff912..c88fa9ab43e 100644
--- a/src/mongo/db/exec/sbe/stages/unique.cpp
+++ b/src/mongo/db/exec/sbe/stages/unique.cpp
@@ -37,13 +37,15 @@ namespace mongo {
namespace sbe {
UniqueStage::UniqueStage(std::unique_ptr<PlanStage> input,
value::SlotVector keys,
- PlanNodeId planNodeId)
- : PlanStage("unique"_sd, planNodeId), _keySlots(keys) {
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("unique"_sd, planNodeId, participateInTrialRunTracking), _keySlots(keys) {
_children.emplace_back(std::move(input));
}
std::unique_ptr<PlanStage> UniqueStage::clone() const {
- return std::make_unique<UniqueStage>(_children[0]->clone(), _keySlots, _commonStats.nodeId);
+ return std::make_unique<UniqueStage>(
+ _children[0]->clone(), _keySlots, _commonStats.nodeId, _participateInTrialRunTracking);
}
void UniqueStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/unique.h b/src/mongo/db/exec/sbe/stages/unique.h
index 1165743a0cc..c344cd09d24 100644
--- a/src/mongo/db/exec/sbe/stages/unique.h
+++ b/src/mongo/db/exec/sbe/stages/unique.h
@@ -53,7 +53,10 @@ namespace mongo::sbe {
*/
class UniqueStage final : public PlanStage {
public:
- UniqueStage(std::unique_ptr<PlanStage> input, value::SlotVector keys, PlanNodeId planNodeId);
+ UniqueStage(std::unique_ptr<PlanStage> input,
+ value::SlotVector keys,
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/stages/unwind.cpp b/src/mongo/db/exec/sbe/stages/unwind.cpp
index b4c5e225adc..7ad10eecb23 100644
--- a/src/mongo/db/exec/sbe/stages/unwind.cpp
+++ b/src/mongo/db/exec/sbe/stages/unwind.cpp
@@ -40,8 +40,9 @@ UnwindStage::UnwindStage(std::unique_ptr<PlanStage> input,
value::SlotId outField,
value::SlotId outIndex,
bool preserveNullAndEmptyArrays,
- PlanNodeId planNodeId)
- : PlanStage("unwind"_sd, planNodeId),
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking)
+ : PlanStage("unwind"_sd, planNodeId, participateInTrialRunTracking),
_inField(inField),
_outField(outField),
_outIndex(outIndex),
@@ -59,7 +60,8 @@ std::unique_ptr<PlanStage> UnwindStage::clone() const {
_outField,
_outIndex,
_preserveNullAndEmptyArrays,
- _commonStats.nodeId);
+ _commonStats.nodeId,
+ _participateInTrialRunTracking);
}
void UnwindStage::prepare(CompileCtx& ctx) {
diff --git a/src/mongo/db/exec/sbe/stages/unwind.h b/src/mongo/db/exec/sbe/stages/unwind.h
index 049fee4a069..57b28d9c1cf 100644
--- a/src/mongo/db/exec/sbe/stages/unwind.h
+++ b/src/mongo/db/exec/sbe/stages/unwind.h
@@ -52,7 +52,8 @@ public:
value::SlotId outField,
value::SlotId outIndex,
bool preserveNullAndEmptyArrays,
- PlanNodeId planNodeId);
+ PlanNodeId planNodeId,
+ bool participateInTrialRunTracking = true);
std::unique_ptr<PlanStage> clone() const final;
diff --git a/src/mongo/db/exec/sbe/vm/vm.cpp b/src/mongo/db/exec/sbe/vm/vm.cpp
index 3aae133219f..4f9329e7ed6 100644
--- a/src/mongo/db/exec/sbe/vm/vm.cpp
+++ b/src/mongo/db/exec/sbe/vm/vm.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-
#include "mongo/platform/basic.h"
#include "mongo/db/exec/sbe/expressions/expression.h"
@@ -213,17 +212,13 @@ std::string CodeFragment::toString() const {
case Instruction::cmp3w:
case Instruction::collCmp3w:
case Instruction::fillEmpty:
- case Instruction::fillEmptyConst:
case Instruction::getField:
- case Instruction::getFieldConst:
case Instruction::getElement:
case Instruction::getArraySize:
case Instruction::collComparisonKey:
case Instruction::getFieldOrElement:
case Instruction::traverseP:
- case Instruction::traversePConst:
case Instruction::traverseF:
- case Instruction::traverseFConst:
case Instruction::setField:
case Instruction::aggSum:
case Instruction::aggMin:
@@ -251,9 +246,15 @@ std::string CodeFragment::toString() const {
break;
}
// Instructions with a single integer argument.
+ case Instruction::pushLocalLambda:
+ case Instruction::traversePConst: {
+ auto offset = readFromMemory<int>(pcPointer);
+ pcPointer += sizeof(offset);
+ ss << "offset: " << offset;
+ break;
+ }
case Instruction::pushLocalVal:
- case Instruction::pushMoveLocalVal:
- case Instruction::pushLocalLambda: {
+ case Instruction::pushMoveLocalVal: {
auto arg = readFromMemory<int>(pcPointer);
pcPointer += sizeof(arg);
ss << "arg: " << arg;
@@ -268,6 +269,21 @@ std::string CodeFragment::toString() const {
break;
}
// Instructions with other kinds of arguments.
+ case Instruction::traverseFConst: {
+ auto k = readFromMemory<Instruction::Constants>(pcPointer);
+ pcPointer += sizeof(k);
+ auto offset = readFromMemory<int>(pcPointer);
+ pcPointer += sizeof(offset);
+ ss << "k: " << Instruction::toStringConstants(k) << ", offset: " << offset;
+ break;
+ }
+ case Instruction::fillEmptyConst: {
+ auto k = readFromMemory<Instruction::Constants>(pcPointer);
+ pcPointer += sizeof(k);
+ ss << "k: " << Instruction::toStringConstants(k);
+ break;
+ }
+ case Instruction::getFieldConst:
case Instruction::pushConstVal: {
auto tag = readFromMemory<value::TypeTags>(pcPointer);
pcPointer += sizeof(tag);
diff --git a/src/mongo/db/exec/sbe/vm/vm.h b/src/mongo/db/exec/sbe/vm/vm.h
index 801a139e5b8..2fec8265bfd 100644
--- a/src/mongo/db/exec/sbe/vm/vm.h
+++ b/src/mongo/db/exec/sbe/vm/vm.h
@@ -332,6 +332,19 @@ struct Instruction {
False,
};
+ static const char* toStringConstants(Constants k) {
+ switch (k) {
+ case Null:
+ return "Null";
+ case True:
+ return "True";
+ case False:
+ return "False";
+ default:
+ return "unknown";
+ }
+ }
+
// Make sure that values in this arrays are always in-sync with the enum.
static int stackOffset[];
@@ -777,6 +790,9 @@ public:
void fixup(int offset);
+ // For printing from an interactive debugger.
+ std::string toString() const;
+
private:
void appendSimpleInstruction(Instruction::Tags tag);
auto allocateSpace(size_t size) {
@@ -789,9 +805,6 @@ private:
void copyCodeAndFixup(CodeFragment&& from);
private:
- // For printing from an interactive debugger.
- std::string toString() const;
-
absl::InlinedVector<uint8_t, 16> _instrs;
/**
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 0a1ed4179aa..3d885d9d50e 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -46,15 +46,6 @@
namespace mongo {
-namespace {
-
-bool computeIsStandaloneOrPrimary(OperationContext* opCtx) {
- const auto replCoord{repl::ReplicationCoordinator::get(opCtx)};
- return replCoord->canAcceptWritesForDatabase(opCtx, "admin");
-}
-
-} // namespace
-
namespace write_stage_common {
PreWriteFilter::PreWriteFilter(OperationContext* opCtx, NamespaceString nss)
@@ -65,14 +56,23 @@ PreWriteFilter::PreWriteFilter(OperationContext* opCtx, NamespaceString nss)
return fcv.isVersionInitialized() &&
feature_flags::gFeatureFlagNoChangeStreamEventsDueToOrphans.isEnabled(fcv);
}()),
- _isStandaloneOrPrimary(computeIsStandaloneOrPrimary(_opCtx)) {}
+ _skipFiltering([&] {
+ // Always allow writes on replica sets.
+ if (serverGlobalParams.clusterRole == ClusterRole::None) {
+ return true;
+ }
+
+ // Always allow writes on standalone and secondary nodes.
+ const auto replCoord{repl::ReplicationCoordinator::get(opCtx)};
+ return !replCoord->canAcceptWritesForDatabase(opCtx, NamespaceString::kAdminDb);
+ }()) {}
PreWriteFilter::Action PreWriteFilter::computeAction(const Document& doc) {
// Skip the checks if the Filter is not enabled.
if (!_isEnabled)
return Action::kWrite;
- if (!_isStandaloneOrPrimary) {
+ if (_skipFiltering) {
// Secondaries do not apply any filtering logic as the primary already did.
return Action::kWrite;
}
diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h
index 3eff70da081..5628822efff 100644
--- a/src/mongo/db/exec/write_stage_common.h
+++ b/src/mongo/db/exec/write_stage_common.h
@@ -80,7 +80,7 @@ private:
OperationContext* _opCtx;
NamespaceString _nss;
const bool _isEnabled;
- const bool _isStandaloneOrPrimary;
+ const bool _skipFiltering;
std::unique_ptr<ShardFilterer> _shardFilterer;
};
diff --git a/src/mongo/db/fle_crud.cpp b/src/mongo/db/fle_crud.cpp
index 2525091cb30..c9ee8496652 100644
--- a/src/mongo/db/fle_crud.cpp
+++ b/src/mongo/db/fle_crud.cpp
@@ -1490,7 +1490,6 @@ std::vector<BSONObj> FLEQueryInterfaceImpl::findDocuments(const NamespaceString&
BSONObj filter) {
FindCommandRequest find(nss);
find.setFilter(filter);
- find.setSingleBatch(true);
// Throws on error
return _txnClient.exhaustiveFind(find).get();
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 57e2fbee611..893d7832b18 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -52,16 +52,21 @@ namespace mongo {
namespace dps = ::mongo::dotted_path_support;
static Status parseFlatPoint(const BSONElement& elem, Point* out, bool allowAddlFields = false) {
- if (!elem.isABSONObj())
- return BAD_VALUE("Point must be an array or object");
+ if (!elem.isABSONObj()) {
+ return BAD_VALUE("Point must be an array or object, instead got type "
+ << typeName(elem.type()));
+ }
+
BSONObjIterator it(elem.Obj());
BSONElement x = it.next();
if (!x.isNumber()) {
- return BAD_VALUE("Point must only contain numeric elements");
+ return BAD_VALUE("Point must only contain numeric elements, instead got type "
+ << typeName(x.type()));
}
BSONElement y = it.next();
if (!y.isNumber()) {
- return BAD_VALUE("Point must only contain numeric elements");
+ return BAD_VALUE("Point must only contain numeric elements, instead got type "
+ << typeName(y.type()));
}
if (!allowAddlFields && it.more()) {
return BAD_VALUE("Point must only contain two numeric elements");
@@ -86,7 +91,7 @@ static Status coordToPoint(double lng, double lat, S2Point* out) {
// We don't rely on drem to clean up non-sane points. We just don't let them become
// spherical.
if (!isValidLngLat(lng, lat))
- return BAD_VALUE("longitude/latitude is out of bounds, lng: " << lng << " lat: " << lat);
+ return BAD_VALUE("Longitude/latitude is out of bounds, lng: " << lng << " lat: " << lat);
// Note that it's (lat, lng) for S2 but (lng, lat) for MongoDB.
S2LatLng ll = S2LatLng::FromDegrees(lat, lng).Normalized();
// This shouldn't happen since we should only have valid lng/lats.
@@ -101,7 +106,8 @@ static Status coordToPoint(double lng, double lat, S2Point* out) {
static Status parseGeoJSONCoordinate(const BSONElement& elem, S2Point* out) {
if (Array != elem.type()) {
- return BAD_VALUE("GeoJSON coordinates must be an array");
+ return BAD_VALUE("GeoJSON coordinates must be an array, instead got type "
+ << typeName(elem.type()));
}
Point p;
// GeoJSON allows extra elements, e.g. altitude.
@@ -116,7 +122,8 @@ static Status parseGeoJSONCoordinate(const BSONElement& elem, S2Point* out) {
// "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
static Status parseArrayOfCoordinates(const BSONElement& elem, vector<S2Point>* out) {
if (Array != elem.type()) {
- return BAD_VALUE("GeoJSON coordinates must be an array of coordinates");
+ return BAD_VALUE("GeoJSON coordinates must be an array of coordinates, instead got type "
+ << typeName(elem.type()));
}
BSONObjIterator it(elem.Obj());
// Iterate all coordinates in array
@@ -146,7 +153,8 @@ static Status isLoopClosed(const vector<S2Point>& loop, const BSONElement loopEl
}
if (loop[0] != loop[loop.size() - 1]) {
- return BAD_VALUE("Loop is not closed: " << loopElt.toString(false));
+ return BAD_VALUE("Loop is not closed, first vertex does not equal last vertex: "
+ << loopElt.toString(false));
}
return Status::OK();
@@ -156,7 +164,8 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
bool skipValidation,
S2Polygon* out) {
if (Array != elem.type()) {
- return BAD_VALUE("Polygon coordinates must be an array");
+ return BAD_VALUE("Polygon coordinates must be an array, instead got type "
+ << typeName(elem.type()));
}
std::vector<std::unique_ptr<S2Loop>> loops;
@@ -184,8 +193,9 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
// At least 3 vertices.
if (points.size() < 3) {
- return BAD_VALUE(
- "Loop must have at least 3 different vertices: " << coordinateElt.toString(false));
+ return BAD_VALUE("Loop must have at least 3 different vertices, "
+ << points.size() << " unique vertices were provided: "
+ << coordinateElt.toString(false));
}
loops.push_back(std::make_unique<S2Loop>(points));
@@ -266,15 +276,17 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
}
static Status parseBigSimplePolygonCoordinates(const BSONElement& elem, BigSimplePolygon* out) {
- if (Array != elem.type())
- return BAD_VALUE("Coordinates of polygon must be an array");
+ if (Array != elem.type()) {
+ return BAD_VALUE("Coordinates of polygon must be an array, instead got type "
+ << typeName(elem.type()));
+ }
const vector<BSONElement>& coordinates = elem.Array();
// Only one loop is allowed in a BigSimplePolygon
if (coordinates.size() != 1) {
- return BAD_VALUE(
- "Only one simple loop is allowed in a big polygon: " << elem.toString(false));
+ return BAD_VALUE("Only one simple loop is allowed in a big polygon, instead provided "
+ << coordinates.size() << " loops: " << elem.toString(false));
}
vector<S2Point> exteriorVertices;
@@ -297,7 +309,9 @@ static Status parseBigSimplePolygonCoordinates(const BSONElement& elem, BigSimpl
// At least 3 vertices.
if (exteriorVertices.size() < 3) {
- return BAD_VALUE("Loop must have at least 3 different vertices: " << elem.toString(false));
+ return BAD_VALUE("Loop must have at least 3 different vertices, "
+ << exteriorVertices.size()
+ << " unique vertices were provided: " << elem.toString(false));
}
std::unique_ptr<S2Loop> loop(new S2Loop(exteriorVertices));
@@ -326,8 +340,10 @@ static Status parseGeoJSONCRS(const BSONObj& obj, CRS* crs, bool allowStrictSphe
return Status::OK();
}
- if (!crsElt.isABSONObj())
- return BAD_VALUE("GeoJSON CRS must be an object");
+ if (!crsElt.isABSONObj()) {
+ return BAD_VALUE("GeoJSON CRS must be an object, instead got type "
+ << typeName(crsElt.type()));
+ }
BSONObj crsObj = crsElt.embeddedObject();
// "type": "name"
@@ -336,17 +352,22 @@ static Status parseGeoJSONCRS(const BSONObj& obj, CRS* crs, bool allowStrictSphe
// "properties"
BSONElement propertiesElt = crsObj["properties"];
- if (!propertiesElt.isABSONObj())
- return BAD_VALUE("CRS must have field \"properties\" which is an object");
+ if (!propertiesElt.isABSONObj()) {
+ return BAD_VALUE("CRS must have field \"properties\" which is an object, instead got type "
+ << typeName(propertiesElt.type()));
+ }
BSONObj propertiesObj = propertiesElt.embeddedObject();
- if (String != propertiesObj["name"].type())
- return BAD_VALUE("In CRS, \"properties.name\" must be a string");
+ if (String != propertiesObj["name"].type()) {
+ return BAD_VALUE("In CRS, \"properties.name\" must be a string, instead got type "
+ << typeName(propertiesObj["name"].type()));
+ }
+
const string& name = propertiesObj["name"].String();
if (CRS_CRS84 == name || CRS_EPSG_4326 == name) {
*crs = SPHERE;
} else if (CRS_STRICT_WINDING == name) {
if (!allowStrictSphere) {
- return BAD_VALUE("Strict winding order is only supported by polygon");
+ return BAD_VALUE("Strict winding order CRS is only supported by polygon");
}
*crs = STRICT_SPHERE;
} else {
@@ -369,8 +390,8 @@ static Status parseGeoJSONLineCoordinates(const BSONElement& elem,
eraseDuplicatePoints(&vertices);
if (!skipValidation) {
if (vertices.size() < 2)
- return BAD_VALUE(
- "GeoJSON LineString must have at least 2 vertices: " << elem.toString(false));
+ return BAD_VALUE("GeoJSON LineString must have at least 2 vertices, instead got "
+ << vertices.size() << " vertices: " << elem.toString(false));
string err;
if (!S2Polyline::IsValid(vertices, &err))
@@ -384,9 +405,10 @@ static Status parseGeoJSONLineCoordinates(const BSONElement& elem,
// Parse legacy point or GeoJSON point, used by geo near.
// Only stored legacy points allow additional fields.
Status parsePoint(const BSONElement& elem, PointWithCRS* out, bool allowAddlFields) {
- if (!elem.isABSONObj())
- return BAD_VALUE("Point must be an array or object");
-
+ if (!elem.isABSONObj()) {
+ return BAD_VALUE("Point must be an array or object, instead got type "
+ << typeName(elem.type()));
+ }
BSONObj obj = elem.Obj();
// location: [1, 2] or location: {x: 1, y:2}
if (Array == elem.type() || obj.firstElement().isNumber()) {
@@ -439,7 +461,8 @@ Status GeoParser::parseLegacyPolygon(const BSONObj& obj, PolygonWithCRS* out) {
points.push_back(p);
}
if (points.size() < 3)
- return BAD_VALUE("Polygon must have at least 3 points");
+ return BAD_VALUE("Polygon must have at least 3 points, instead got " << points.size()
+ << " vertices");
out->oldPolygon.init(points);
out->crs = FLAT;
return Status::OK();
@@ -461,7 +484,7 @@ Status GeoParser::parseGeoJSONPoint(const BSONObj& obj, PointWithCRS* out) {
// Projection
out->crs = FLAT;
if (!ShapeProjection::supportsProject(*out, SPHERE))
- return BAD_VALUE("longitude/latitude is out of bounds, lng: " << out->oldPoint.x << " lat: "
+ return BAD_VALUE("Longitude/latitude is out of bounds, lng: " << out->oldPoint.x << " lat: "
<< out->oldPoint.y);
ShapeProjection::projectInto(out, SPHERE);
return Status::OK();
@@ -534,8 +557,11 @@ Status GeoParser::parseMultiLine(const BSONObj& obj, bool skipValidation, MultiL
return status;
BSONElement coordElt = dps::extractElementAtPath(obj, GEOJSON_COORDINATES);
- if (Array != coordElt.type())
- return BAD_VALUE("MultiLineString coordinates must be an array");
+ if (Array != coordElt.type()) {
+ return BAD_VALUE("MultiLineString coordinates must be an array, instead got type "
+ << typeName(coordElt.type()));
+ }
+
out->lines.clear();
auto& lines = out->lines;
@@ -564,9 +590,10 @@ Status GeoParser::parseMultiPolygon(const BSONObj& obj,
return status;
BSONElement coordElt = dps::extractElementAtPath(obj, GEOJSON_COORDINATES);
- if (Array != coordElt.type())
- return BAD_VALUE("MultiPolygon coordinates must be an array");
-
+ if (Array != coordElt.type()) {
+ return BAD_VALUE("MultiPolygon coordinates must be an array, instead got type "
+ << typeName(coordElt.type()));
+ }
out->polygons.clear();
auto& polygons = out->polygons;
@@ -597,11 +624,11 @@ Status GeoParser::parseLegacyCenter(const BSONObj& obj, CapWithCRS* out) {
BSONElement radius = objIt.next();
// radius >= 0 and is not NaN
if (!radius.isNumber() || !(radius.number() >= 0))
- return BAD_VALUE("radius must be a non-negative number");
+ return BAD_VALUE("Radius must be a non-negative number: " << radius.toString(false));
// No more
if (objIt.more())
- return BAD_VALUE("Only 2 fields allowed for circular region");
+ return BAD_VALUE("Only 2 fields allowed for circular region, but more were provided");
out->circle.radius = radius.number();
out->crs = FLAT;
@@ -627,13 +654,15 @@ Status GeoParser::parseCenterSphere(const BSONObj& obj, CapWithCRS* out) {
// Radius
BSONElement radiusElt = objIt.next();
// radius >= 0 and is not NaN
- if (!radiusElt.isNumber() || !(radiusElt.number() >= 0))
- return BAD_VALUE("radius must be a non-negative number");
+ if (!radiusElt.isNumber() || !(radiusElt.number() >= 0)) {
+ return BAD_VALUE("Radius must be a non-negative number: " << radiusElt.toString(false));
+ }
+
double radius = radiusElt.number();
// No more elements
if (objIt.more())
- return BAD_VALUE("Only 2 fields allowed for circular region");
+ return BAD_VALUE("Only 2 fields allowed for circular region, but more were provided");
out->cap = S2Cap::FromAxisAngle(centerPoint, S1Angle::Radians(radius));
out->circle.radius = radius;
@@ -656,16 +685,20 @@ Status GeoParser::parseGeometryCollection(const BSONObj& obj,
bool skipValidation,
GeometryCollection* out) {
BSONElement coordElt = dps::extractElementAtPath(obj, GEOJSON_GEOMETRIES);
- if (Array != coordElt.type())
- return BAD_VALUE("GeometryCollection geometries must be an array");
-
+ if (Array != coordElt.type()) {
+ return BAD_VALUE("GeometryCollection geometries must be an array, instead got type "
+ << typeName(coordElt.type()));
+ }
const vector<BSONElement>& geometries = coordElt.Array();
if (0 == geometries.size())
return BAD_VALUE("GeometryCollection geometries must have at least 1 element");
for (size_t i = 0; i < geometries.size(); ++i) {
if (Object != geometries[i].type())
- return BAD_VALUE("Element " << i << " of \"geometries\" is not an object");
+ return BAD_VALUE("Element " << i
+ << " of \"geometries\" must be an object, instead got type "
+ << typeName(geometries[i].type()) << ": "
+ << geometries[i].toString(false));
const BSONObj& geoObj = geometries[i].Obj();
GeoJSONType type = parseGeoJSONType(geoObj);
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index dd17cf16877..633ab3ce8ce 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -269,6 +269,7 @@ bool NamespaceString::mustBeAppliedInOwnOplogBatch() const {
return isSystemDotViews() || isServerConfigurationCollection() || isPrivilegeCollection() ||
_ns == kDonorReshardingOperationsNamespace.ns() ||
_ns == kForceOplogBatchBoundaryNamespace.ns() ||
+ _ns == kTenantMigrationDonorsNamespace.ns() ||
_ns == kTenantMigrationRecipientsNamespace.ns() || _ns == kConfigsvrShardsNamespace.ns();
}
diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript
index 0b736897acc..983698e5060 100644
--- a/src/mongo/db/ops/SConscript
+++ b/src/mongo/db/ops/SConscript
@@ -34,7 +34,6 @@ env.Library(
env.Library(
target='write_ops_parsers',
source=[
- 'new_write_error_exception_format_feature_flag.idl',
'write_ops.cpp',
'write_ops.idl',
],
diff --git a/src/mongo/db/ops/write_ops.cpp b/src/mongo/db/ops/write_ops.cpp
index 54cef4d3d2a..92d0478a541 100644
--- a/src/mongo/db/ops/write_ops.cpp
+++ b/src/mongo/db/ops/write_ops.cpp
@@ -30,7 +30,6 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/dbmessage.h"
-#include "mongo/db/ops/new_write_error_exception_format_feature_flag_gen.h"
#include "mongo/db/pipeline/aggregation_request_helper.h"
#include "mongo/db/update/update_oplog_entry_serialization.h"
#include "mongo/db/update/update_oplog_entry_version.h"
@@ -295,18 +294,6 @@ WriteError WriteError::parse(const BSONObj& obj) {
auto code = ErrorCodes::Error(obj[WriteError::kCodeFieldName].Int());
auto errmsg = obj[WriteError::kErrmsgFieldName].valueStringDataSafe();
- // At least up to FCV 5.x, the write commands operation used to convert StaleConfig errors
- // into StaleShardVersion and store the extra info of StaleConfig in a sub-field called
- // "errInfo".
- //
- // TODO (SERVER-64449): This special parsing should be removed in the stable version
- // following the resolution of this ticket.
- if (code == ErrorCodes::OBSOLETE_StaleShardVersion) {
- return Status(ErrorCodes::StaleConfig,
- std::move(errmsg),
- obj[WriteError::kErrInfoFieldName].Obj());
- }
-
// All remaining errors have the error stored at the same level as the code and errmsg (in
// the same way that Status is serialised as part of regular command response)
return Status(code, std::move(errmsg), obj);
@@ -319,28 +306,10 @@ BSONObj WriteError::serialize() const {
BSONObjBuilder errBuilder;
errBuilder.append(WriteError::kIndexFieldName, _index);
- // At least up to FCV 5.x, the write commands operation used to convert StaleConfig errors into
- // StaleShardVersion and store the extra info of StaleConfig in a sub-field called "errInfo".
- // This logic preserves this for backwards compatibility.
- //
- // TODO (SERVER-64449): This special serialisation should be removed in the stable version
- // following the resolution of this ticket.
- if (_status == ErrorCodes::StaleConfig &&
- !feature_flags::gFeatureFlagNewWriteErrorExceptionFormat.isEnabled(
- serverGlobalParams.featureCompatibility)) {
- errBuilder.append(WriteError::kCodeFieldName,
- int32_t(ErrorCodes::OBSOLETE_StaleShardVersion));
- errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
- auto extraInfo = _status.extraInfo();
- invariant(extraInfo);
- BSONObjBuilder extraInfoBuilder(errBuilder.subobjStart(WriteError::kErrInfoFieldName));
- extraInfo->serialize(&extraInfoBuilder);
- } else {
- errBuilder.append(WriteError::kCodeFieldName, int32_t(_status.code()));
- errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
- if (auto extraInfo = _status.extraInfo()) {
- extraInfo->serialize(&errBuilder);
- }
+ errBuilder.append(WriteError::kCodeFieldName, int32_t(_status.code()));
+ errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
+ if (auto extraInfo = _status.extraInfo()) {
+ extraInfo->serialize(&errBuilder);
}
return errBuilder.obj();
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 33d1cd596b8..8e02cf04ec7 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -452,8 +452,13 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
opCtx,
wholeOp.getNamespace(),
fixLockModeForSystemDotViewsChanges(wholeOp.getNamespace(), MODE_IX));
- if (*collection)
+ checkCollectionUUIDMismatch(opCtx,
+ wholeOp.getNamespace(),
+ collection->getCollection(),
+ wholeOp.getCollectionUUID());
+ if (*collection) {
break;
+ }
if (source == OperationSource::kTimeseriesInsert) {
assertTimeseriesBucketsCollectionNotFound(wholeOp.getNamespace());
@@ -499,11 +504,6 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
if (shouldProceedWithBatchInsert) {
try {
if (!collection->getCollection()->isCapped() && !inTxn && batch.size() > 1) {
- checkCollectionUUIDMismatch(opCtx,
- wholeOp.getNamespace(),
- collection->getCollection(),
- wholeOp.getCollectionUUID());
-
// First try doing it all together. If all goes well, this is all we need to do.
// See Collection::_insertDocuments for why we do all capped inserts one-at-a-time.
lastOpFixer->startingOp();
@@ -546,10 +546,6 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
// Transactions are not allowed to operate on capped collections.
uassertStatusOK(
checkIfTransactionOnCappedColl(opCtx, collection->getCollection()));
- checkCollectionUUIDMismatch(opCtx,
- wholeOp.getNamespace(),
- collection->getCollection(),
- wholeOp.getCollectionUUID());
lastOpFixer->startingOp();
insertDocuments(opCtx,
collection->getCollection(),
@@ -798,6 +794,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
boost::optional<AutoGetCollection> collection;
while (true) {
collection.emplace(opCtx, ns, fixLockModeForSystemDotViewsChanges(ns, MODE_IX));
+ checkCollectionUUIDMismatch(opCtx, ns, collection->getCollection(), opCollectionUUID);
if (*collection) {
break;
}
@@ -862,8 +859,6 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
uassertStatusOK(checkIfTransactionOnCappedColl(opCtx, coll));
}
- checkCollectionUUIDMismatch(opCtx, ns, collection->getCollection(), opCollectionUUID);
-
const ExtensionsCallbackReal extensionsCallback(opCtx, &updateRequest->getNamespaceString());
ParsedUpdate parsedUpdate(opCtx, updateRequest, extensionsCallback, forgoOpCounterIncrements);
uassertStatusOK(parsedUpdate.parseRequest());
diff --git a/src/mongo/db/pipeline/abt/pipeline_test.cpp b/src/mongo/db/pipeline/abt/pipeline_test.cpp
index 34f9d486d32..694047d6683 100644
--- a/src/mongo/db/pipeline/abt/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/abt/pipeline_test.cpp
@@ -2326,13 +2326,15 @@ TEST(ABTTranslate, PartialIndex) {
// The expression matches the pipeline.
// By default the constant is translated as "int32".
- auto conversionResult = convertExprToPartialSchemaReq(make<EvalFilter>(
- make<PathGet>("b",
- make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int32(2)))),
- make<Variable>(scanProjName)));
- ASSERT_TRUE(conversionResult._success);
- ASSERT_FALSE(conversionResult._hasEmptyInterval);
- ASSERT_FALSE(conversionResult._retainPredicate);
+ auto conversionResult = convertExprToPartialSchemaReq(
+ make<EvalFilter>(
+ make<PathGet>(
+ "b", make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int32(2)))),
+ make<Variable>(scanProjName)),
+ true /*isFilterContext*/);
+ ASSERT_TRUE(conversionResult.has_value());
+ ASSERT_FALSE(conversionResult->_hasEmptyInterval);
+ ASSERT_FALSE(conversionResult->_retainPredicate);
Metadata metadata = {
{{scanDefName,
@@ -2341,7 +2343,7 @@ TEST(ABTTranslate, PartialIndex) {
IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}},
true /*multiKey*/,
{DistributionType::Centralized},
- std::move(conversionResult._reqMap)}}}}}}};
+ std::move(conversionResult->_reqMap)}}}}}}};
ABT translated = translatePipeline(
metadata, "[{$match: {'a': 3, 'b': 2}}]", scanProjName, scanDefName, prefixId);
@@ -2394,13 +2396,15 @@ TEST(ABTTranslate, PartialIndexNegative) {
ProjectionName scanProjName = prefixId.getNextId("scan");
// The expression does not match the pipeline.
- auto conversionResult = convertExprToPartialSchemaReq(make<EvalFilter>(
- make<PathGet>("b",
- make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int32(2)))),
- make<Variable>(scanProjName)));
- ASSERT_TRUE(conversionResult._success);
- ASSERT_FALSE(conversionResult._hasEmptyInterval);
- ASSERT_FALSE(conversionResult._retainPredicate);
+ auto conversionResult = convertExprToPartialSchemaReq(
+ make<EvalFilter>(
+ make<PathGet>(
+ "b", make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int32(2)))),
+ make<Variable>(scanProjName)),
+ true /*isFilterContext*/);
+ ASSERT_TRUE(conversionResult.has_value());
+ ASSERT_FALSE(conversionResult->_hasEmptyInterval);
+ ASSERT_FALSE(conversionResult->_retainPredicate);
Metadata metadata = {
{{scanDefName,
@@ -2409,7 +2413,7 @@ TEST(ABTTranslate, PartialIndexNegative) {
IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}},
true /*multiKey*/,
{DistributionType::Centralized},
- std::move(conversionResult._reqMap)}}}}}}};
+ std::move(conversionResult->_reqMap)}}}}}}};
ABT translated = translatePipeline(
metadata, "[{$match: {'a': 3, 'b': 3}}]", scanProjName, scanDefName, prefixId);
diff --git a/src/mongo/db/pipeline/aggregation_context_fixture.h b/src/mongo/db/pipeline/aggregation_context_fixture.h
index 76cc01a40c4..e7595382094 100644
--- a/src/mongo/db/pipeline/aggregation_context_fixture.h
+++ b/src/mongo/db/pipeline/aggregation_context_fixture.h
@@ -33,6 +33,7 @@
#include <memory>
#include "mongo/db/concurrency/locker_noop_client_observer.h"
+#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/unittest/temp_dir.h"
@@ -76,6 +77,14 @@ private:
boost::intrusive_ptr<ExpressionContextForTest> _expCtx;
};
+// A custom-deleter which disposes a DocumentSource when it goes out of scope.
+struct DocumentSourceDeleter {
+ void operator()(DocumentSource* docSource) {
+ docSource->dispose();
+ delete docSource;
+ }
+};
+
class ServerlessAggregationContextFixture : public AggregationContextFixture {
public:
ServerlessAggregationContextFixture()
diff --git a/src/mongo/db/pipeline/change_stream_pre_image_helpers.cpp b/src/mongo/db/pipeline/change_stream_pre_image_helpers.cpp
index f153a30818f..e4cbb6032ae 100644
--- a/src/mongo/db/pipeline/change_stream_pre_image_helpers.cpp
+++ b/src/mongo/db/pipeline/change_stream_pre_image_helpers.cpp
@@ -32,35 +32,49 @@
#include "mongo/db/pipeline/change_stream_pre_image_helpers.h"
+#include "mongo/base/error_codes.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
#include "mongo/db/concurrency/locker.h"
-#include "mongo/db/dbhelpers.h"
+#include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context.h"
#include "mongo/util/assert_util.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
-
namespace mongo {
void writeToChangeStreamPreImagesCollection(OperationContext* opCtx,
const ChangeStreamPreImage& preImage) {
- const auto collectionNamespace = NamespaceString::kChangeStreamPreImagesNamespace;
+ tassert(6646200,
+ "Expected to be executed in a write unit of work",
+ opCtx->lockState()->inAWriteUnitOfWork());
tassert(5869404,
str::stream() << "Invalid pre-image document applyOpsIndex: "
<< preImage.getId().getApplyOpsIndex(),
preImage.getId().getApplyOpsIndex() >= 0);
- // This lock acquisition can block on a stronger lock held by another operation modifying the
- // pre-images collection. There are no known cases where an operation holding an exclusive lock
- // on the pre-images collection also waits for oplog visibility.
+ // This lock acquisition can block on a stronger lock held by another operation modifying
+ // the pre-images collection. There are no known cases where an operation holding an
+ // exclusive lock on the pre-images collection also waits for oplog visibility.
AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState());
- AutoGetCollection preimagesCollectionRaii(opCtx, collectionNamespace, LockMode::MODE_IX);
- UpdateResult res = Helpers::upsert(opCtx, collectionNamespace.toString(), preImage.toBSON());
+ AutoGetCollection preImagesCollectionRaii(
+ opCtx, NamespaceString::kChangeStreamPreImagesNamespace, LockMode::MODE_IX);
+ auto& changeStreamPreImagesCollection = preImagesCollectionRaii.getCollection();
+ tassert(6646201,
+ "The change stream pre-images collection is not present",
+ changeStreamPreImagesCollection);
+
+ // Inserts into the change stream pre-images collection are not replicated.
+ repl::UnreplicatedWritesBlock unreplicatedWritesBlock{opCtx};
+ const auto insertionStatus = changeStreamPreImagesCollection->insertDocument(
+ opCtx, InsertStatement{preImage.toBSON()}, &CurOp::get(opCtx)->debug());
tassert(5868601,
- str::stream() << "Failed to insert a new document into the pre-images collection: ts: "
- << preImage.getId().getTs().toString()
- << ", applyOpsIndex: " << preImage.getId().getApplyOpsIndex(),
- !res.existing && !res.upsertedId.isEmpty());
+ str::stream() << "Attempted to insert a duplicate document into the pre-images "
+ "collection. Pre-image id: "
+ << preImage.getId().toBSON().toString(),
+ insertionStatus != ErrorCodes::DuplicateKey);
+ uassertStatusOK(insertionStatus);
}
} // namespace mongo
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index b98af917d99..c992288a0e4 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/pipeline/document_source_cursor.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/exec/document_value/document.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/collection_query_info.h"
@@ -225,15 +226,20 @@ Value DocumentSourceCursor::serialize(boost::optional<ExplainOptions::Verbosity>
{
auto opCtx = pExpCtx->opCtx;
- auto lockMode = getLockModeForQuery(opCtx, _exec->nss());
- AutoGetDb dbLock(opCtx, _exec->nss().db(), lockMode);
- Lock::CollectionLock collLock(opCtx, _exec->nss(), lockMode);
- auto collection = dbLock.getDb()
- ? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, _exec->nss())
- : nullptr;
+ auto secondaryNssList = _exec->getSecondaryNamespaces();
+ AutoGetCollectionForReadMaybeLockFree readLock(opCtx,
+ _exec->nss(),
+ AutoGetCollectionViewMode::kViewsForbidden,
+ Date_t::max(),
+ secondaryNssList);
+ MultipleCollectionAccessor collections(opCtx,
+ &readLock.getCollection(),
+ readLock.getNss(),
+ readLock.isAnySecondaryNamespaceAViewOrSharded(),
+ secondaryNssList);
Explain::explainStages(_exec.get(),
- collection,
+ collections,
verbosity.get(),
_execStatus,
_winningPlanTrialStats,
diff --git a/src/mongo/db/pipeline/document_source_lookup_test.cpp b/src/mongo/db/pipeline/document_source_lookup_test.cpp
index 82a0b6bbd61..aae4d7beef5 100644
--- a/src/mongo/db/pipeline/document_source_lookup_test.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup_test.cpp
@@ -82,6 +82,13 @@ public:
}
};
+auto makeLookUpFromBson(BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx) {
+ auto docSource = DocumentSourceLookUp::createFromBson(elem, expCtx);
+ auto lookup = static_cast<DocumentSourceLookUp*>(docSource.detach());
+ return std::unique_ptr<DocumentSourceLookUp, DocumentSourceDeleter>(lookup,
+ DocumentSourceDeleter());
+}
+
// A 'let' variable defined in a $lookup stage is expected to be available to all sub-pipelines. For
// sub-pipelines below the immediate one, they are passed to via ExpressionContext. This test
// confirms that variables defined in the ExpressionContext are captured by the $lookup stage.
@@ -869,9 +876,7 @@ TEST_F(DocumentSourceLookUpTest, ShouldPropagatePauses) {
{"foreignField", "_id"_sd},
{"as", "foreignDocs"_sd}}}}
.toBson();
- auto parsed = DocumentSourceLookUp::createFromBson(lookupSpec.firstElement(), expCtx);
- auto lookup = static_cast<DocumentSourceLookUp*>(parsed.get());
-
+ auto lookup = makeLookUpFromBson(lookupSpec.firstElement(), expCtx);
lookup->setSource(mockLocalSource.get());
auto next = lookup->getNext();
@@ -890,7 +895,6 @@ TEST_F(DocumentSourceLookUpTest, ShouldPropagatePauses) {
ASSERT_TRUE(lookup->getNext().isEOF());
ASSERT_TRUE(lookup->getNext().isEOF());
- lookup->dispose();
}
TEST_F(DocumentSourceLookUpTest, ShouldPropagatePausesWhileUnwinding) {
@@ -905,6 +909,14 @@ TEST_F(DocumentSourceLookUpTest, ShouldPropagatePausesWhileUnwinding) {
expCtx->mongoProcessInterface =
std::make_shared<MockMongoInterface>(std::move(mockForeignContents));
+ // Mock its input, pausing every other result.
+ auto mockLocalSource =
+ DocumentSourceMock::createForTest({Document{{"foreignId", 0}},
+ DocumentSource::GetNextResult::makePauseExecution(),
+ Document{{"foreignId", 1}},
+ DocumentSource::GetNextResult::makePauseExecution()},
+ expCtx);
+
// Set up the $lookup stage.
auto lookupSpec = Document{{"$lookup",
Document{{"from", fromNs.coll()},
@@ -912,21 +924,13 @@ TEST_F(DocumentSourceLookUpTest, ShouldPropagatePausesWhileUnwinding) {
{"foreignField", "_id"_sd},
{"as", "foreignDoc"_sd}}}}
.toBson();
- auto parsed = DocumentSourceLookUp::createFromBson(lookupSpec.firstElement(), expCtx);
- auto lookup = static_cast<DocumentSourceLookUp*>(parsed.get());
+ auto lookup = makeLookUpFromBson(lookupSpec.firstElement(), expCtx);
const bool preserveNullAndEmptyArrays = false;
const boost::optional<std::string> includeArrayIndex = boost::none;
lookup->setUnwindStage(DocumentSourceUnwind::create(
expCtx, "foreignDoc", preserveNullAndEmptyArrays, includeArrayIndex));
- // Mock its input, pausing every other result.
- auto mockLocalSource =
- DocumentSourceMock::createForTest({Document{{"foreignId", 0}},
- DocumentSource::GetNextResult::makePauseExecution(),
- Document{{"foreignId", 1}},
- DocumentSource::GetNextResult::makePauseExecution()},
- expCtx);
lookup->setSource(mockLocalSource.get());
auto next = lookup->getNext();
@@ -945,7 +949,6 @@ TEST_F(DocumentSourceLookUpTest, ShouldPropagatePausesWhileUnwinding) {
ASSERT_TRUE(lookup->getNext().isEOF());
ASSERT_TRUE(lookup->getNext().isEOF());
- lookup->dispose();
}
TEST_F(DocumentSourceLookUpTest, LookupReportsAsFieldIsModified) {
@@ -961,14 +964,12 @@ TEST_F(DocumentSourceLookUpTest, LookupReportsAsFieldIsModified) {
{"foreignField", "_id"_sd},
{"as", "foreignDocs"_sd}}}}
.toBson();
- auto parsed = DocumentSourceLookUp::createFromBson(lookupSpec.firstElement(), expCtx);
- auto lookup = static_cast<DocumentSourceLookUp*>(parsed.get());
+ auto lookup = makeLookUpFromBson(lookupSpec.firstElement(), expCtx);
auto modifiedPaths = lookup->getModifiedPaths();
ASSERT(modifiedPaths.type == DocumentSource::GetModPathsReturn::Type::kFiniteSet);
ASSERT_EQ(1U, modifiedPaths.paths.size());
ASSERT_EQ(1U, modifiedPaths.paths.count("foreignDocs"));
- lookup->dispose();
}
TEST_F(DocumentSourceLookUpTest, LookupReportsFieldsModifiedByAbsorbedUnwind) {
@@ -984,8 +985,7 @@ TEST_F(DocumentSourceLookUpTest, LookupReportsFieldsModifiedByAbsorbedUnwind) {
{"foreignField", "_id"_sd},
{"as", "foreignDoc"_sd}}}}
.toBson();
- auto parsed = DocumentSourceLookUp::createFromBson(lookupSpec.firstElement(), expCtx);
- auto lookup = static_cast<DocumentSourceLookUp*>(parsed.get());
+ auto lookup = makeLookUpFromBson(lookupSpec.firstElement(), expCtx);
const bool preserveNullAndEmptyArrays = false;
const boost::optional<std::string> includeArrayIndex = std::string("arrIndex");
@@ -997,7 +997,6 @@ TEST_F(DocumentSourceLookUpTest, LookupReportsFieldsModifiedByAbsorbedUnwind) {
ASSERT_EQ(2U, modifiedPaths.paths.size());
ASSERT_EQ(1U, modifiedPaths.paths.count("foreignDoc"));
ASSERT_EQ(1U, modifiedPaths.paths.count("arrIndex"));
- lookup->dispose();
}
BSONObj sequentialCacheStageObj(const StringData status = "kBuilding"_sd,
diff --git a/src/mongo/db/pipeline/document_source_union_with_test.cpp b/src/mongo/db/pipeline/document_source_union_with_test.cpp
index 04f440fa91a..05e0feb7baa 100644
--- a/src/mongo/db/pipeline/document_source_union_with_test.cpp
+++ b/src/mongo/db/pipeline/document_source_union_with_test.cpp
@@ -60,6 +60,19 @@ using MockMongoInterface = StubLookupSingleDocumentProcessInterface;
// This provides access to getExpCtx(), but we'll use a different name for this test suite.
using DocumentSourceUnionWithTest = AggregationContextFixture;
+auto makeUnion(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ std::unique_ptr<Pipeline, PipelineDeleter> pipeline) {
+ return std::unique_ptr<DocumentSourceUnionWith, DocumentSourceDeleter>(
+ new DocumentSourceUnionWith(expCtx, std::move(pipeline)), DocumentSourceDeleter());
+}
+
+auto makeUnionFromBson(BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx) {
+ auto docSource = DocumentSourceUnionWith::createFromBson(elem, expCtx);
+ auto unionWith = static_cast<DocumentSourceUnionWith*>(docSource.detach());
+ return std::unique_ptr<DocumentSourceUnionWith, DocumentSourceDeleter>(unionWith,
+ DocumentSourceDeleter());
+}
+
TEST_F(DocumentSourceUnionWithTest, BasicSerialUnions) {
const auto docs = std::array{Document{{"a", 1}}, Document{{"b", 1}}, Document{{"c", 1}}};
const auto mock = DocumentSourceMock::createForTest(docs[0], getExpCtx());
@@ -69,19 +82,19 @@ TEST_F(DocumentSourceUnionWithTest, BasicSerialUnions) {
mockCtxOne->mongoProcessInterface = std::make_unique<MockMongoInterface>(mockDequeOne);
const auto mockCtxTwo = getExpCtx()->copyWith({});
mockCtxTwo->mongoProcessInterface = std::make_unique<MockMongoInterface>(mockDequeTwo);
- auto unionWithOne = DocumentSourceUnionWith(
- mockCtxOne,
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
- auto unionWithTwo = DocumentSourceUnionWith(
- mockCtxTwo,
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
- unionWithOne.setSource(mock.get());
- unionWithTwo.setSource(&unionWithOne);
+ auto unionWithOne =
+ makeUnion(mockCtxOne,
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
+ auto unionWithTwo =
+ makeUnion(mockCtxTwo,
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
+ unionWithOne->setSource(mock.get());
+ unionWithTwo->setSource(unionWithOne.get());
auto comparator = DocumentComparator();
auto results = comparator.makeUnorderedDocumentSet();
for (auto& doc [[maybe_unused]] : docs) {
- auto next = unionWithTwo.getNext();
+ auto next = unionWithTwo->getNext();
ASSERT_TRUE(next.isAdvanced());
const auto [ignored, inserted] = results.insert(next.releaseDocument());
ASSERT_TRUE(inserted);
@@ -89,12 +102,9 @@ TEST_F(DocumentSourceUnionWithTest, BasicSerialUnions) {
for (const auto& doc : docs)
ASSERT_TRUE(results.find(doc) != results.end());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
-
- unionWithOne.dispose();
- unionWithTwo.dispose();
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
}
TEST_F(DocumentSourceUnionWithTest, BasicNestedUnions) {
@@ -109,16 +119,16 @@ TEST_F(DocumentSourceUnionWithTest, BasicNestedUnions) {
auto unionWithOne = make_intrusive<DocumentSourceUnionWith>(
mockCtxOne,
Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
- auto unionWithTwo = DocumentSourceUnionWith(
- mockCtxTwo,
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{unionWithOne},
- getExpCtx()));
- unionWithTwo.setSource(mock.get());
+ auto unionWithTwo =
+ makeUnion(mockCtxTwo,
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{unionWithOne},
+ getExpCtx()));
+ unionWithTwo->setSource(mock.get());
auto comparator = DocumentComparator();
auto results = comparator.makeUnorderedDocumentSet();
for (auto& doc [[maybe_unused]] : docs) {
- auto next = unionWithTwo.getNext();
+ auto next = unionWithTwo->getNext();
ASSERT_TRUE(next.isAdvanced());
const auto [ignored, inserted] = results.insert(next.releaseDocument());
ASSERT_TRUE(inserted);
@@ -126,11 +136,9 @@ TEST_F(DocumentSourceUnionWithTest, BasicNestedUnions) {
for (const auto& doc : docs)
ASSERT_TRUE(results.find(doc) != results.end());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
-
- unionWithTwo.dispose();
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
}
TEST_F(DocumentSourceUnionWithTest, UnionsWithNonEmptySubPipelines) {
@@ -145,19 +153,19 @@ TEST_F(DocumentSourceUnionWithTest, UnionsWithNonEmptySubPipelines) {
mockCtxTwo->mongoProcessInterface = std::make_unique<MockMongoInterface>(mockDequeTwo);
const auto filter = DocumentSourceMatch::create(BSON("d" << 1), mockCtxOne);
const auto proj = DocumentSourceAddFields::create(BSON("d" << 1), mockCtxTwo);
- auto unionWithOne = DocumentSourceUnionWith(
+ auto unionWithOne = makeUnion(
mockCtxOne,
Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{filter}, getExpCtx()));
- auto unionWithTwo = DocumentSourceUnionWith(
+ auto unionWithTwo = makeUnion(
mockCtxTwo,
Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{proj}, getExpCtx()));
- unionWithOne.setSource(mock.get());
- unionWithTwo.setSource(&unionWithOne);
+ unionWithOne->setSource(mock.get());
+ unionWithTwo->setSource(unionWithOne.get());
auto comparator = DocumentComparator();
auto results = comparator.makeUnorderedDocumentSet();
for (auto& doc [[maybe_unused]] : outputDocs) {
- auto next = unionWithTwo.getNext();
+ auto next = unionWithTwo->getNext();
ASSERT_TRUE(next.isAdvanced());
const auto [ignored, inserted] = results.insert(next.releaseDocument());
ASSERT_TRUE(inserted);
@@ -165,12 +173,9 @@ TEST_F(DocumentSourceUnionWithTest, UnionsWithNonEmptySubPipelines) {
for (const auto& doc : outputDocs)
ASSERT_TRUE(results.find(doc) != results.end());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
-
- unionWithOne.dispose();
- unionWithTwo.dispose();
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
}
TEST_F(DocumentSourceUnionWithTest, SerializeAndParseWithPipeline) {
@@ -315,26 +320,23 @@ TEST_F(DocumentSourceUnionWithTest, PropagatePauses) {
mockCtxOne->mongoProcessInterface = std::make_unique<MockMongoInterface>(mockDequeOne);
const auto mockCtxTwo = getExpCtx()->copyWith({});
mockCtxTwo->mongoProcessInterface = std::make_unique<MockMongoInterface>(mockDequeTwo);
- auto unionWithOne = DocumentSourceUnionWith(
- mockCtxOne,
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
- auto unionWithTwo = DocumentSourceUnionWith(
- mockCtxTwo,
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
- unionWithOne.setSource(mock.get());
- unionWithTwo.setSource(&unionWithOne);
-
- ASSERT_TRUE(unionWithTwo.getNext().isAdvanced());
- ASSERT_TRUE(unionWithTwo.getNext().isPaused());
- ASSERT_TRUE(unionWithTwo.getNext().isAdvanced());
- ASSERT_TRUE(unionWithTwo.getNext().isPaused());
-
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
- ASSERT_TRUE(unionWithTwo.getNext().isEOF());
-
- unionWithOne.dispose();
- unionWithTwo.dispose();
+ auto unionWithOne =
+ makeUnion(mockCtxOne,
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
+ auto unionWithTwo =
+ makeUnion(mockCtxTwo,
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
+ unionWithOne->setSource(mock.get());
+ unionWithTwo->setSource(unionWithOne.get());
+
+ ASSERT_TRUE(unionWithTwo->getNext().isAdvanced());
+ ASSERT_TRUE(unionWithTwo->getNext().isPaused());
+ ASSERT_TRUE(unionWithTwo->getNext().isAdvanced());
+ ASSERT_TRUE(unionWithTwo->getNext().isPaused());
+
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
+ ASSERT_TRUE(unionWithTwo->getNext().isEOF());
}
TEST_F(DocumentSourceUnionWithTest, ReturnEOFAfterBeingDisposed) {
@@ -406,10 +408,10 @@ TEST_F(DocumentSourceUnionWithTest, RespectsViewDefinition) {
expCtx->mongoProcessInterface =
std::make_shared<MockMongoInterface>(std::move(mockForeignContents));
- auto bson = BSON("$unionWith" << nsToUnionWith.coll());
- auto unionWith = DocumentSourceUnionWith::createFromBson(bson.firstElement(), expCtx);
const auto localMock =
DocumentSourceMock::createForTest({Document{{"_id"_sd, "local"_sd}}}, getExpCtx());
+ auto bson = BSON("$unionWith" << nsToUnionWith.coll());
+ auto unionWith = makeUnionFromBson(bson.firstElement(), expCtx);
unionWith->setSource(localMock.get());
auto result = unionWith->getNext();
@@ -421,8 +423,6 @@ TEST_F(DocumentSourceUnionWithTest, RespectsViewDefinition) {
ASSERT_DOCUMENT_EQ(result.getDocument(), (Document{{"_id"_sd, 2}}));
ASSERT_TRUE(unionWith->getNext().isEOF());
-
- unionWith->dispose();
}
TEST_F(DocumentSourceUnionWithTest, ConcatenatesViewDefinitionToPipeline) {
@@ -445,7 +445,7 @@ TEST_F(DocumentSourceUnionWithTest, ConcatenatesViewDefinitionToPipeline) {
"coll" << viewNsToUnionWith.coll() << "pipeline"
<< BSON_ARRAY(fromjson(
"{$set: {originalId: '$_id', _id: {$add: [1, '$_id']}}}"))));
- auto unionWith = DocumentSourceUnionWith::createFromBson(bson.firstElement(), expCtx);
+ auto unionWith = makeUnionFromBson(bson.firstElement(), expCtx);
unionWith->setSource(localMock.get());
auto result = unionWith->getNext();
@@ -459,8 +459,6 @@ TEST_F(DocumentSourceUnionWithTest, ConcatenatesViewDefinitionToPipeline) {
ASSERT_DOCUMENT_EQ(result.getDocument(), (Document{{"_id"_sd, 3}, {"originalId"_sd, 2}}));
ASSERT_TRUE(unionWith->getNext().isEOF());
-
- unionWith->dispose();
}
TEST_F(DocumentSourceUnionWithTest, RejectUnionWhenDepthLimitIsExceeded) {
@@ -482,9 +480,9 @@ TEST_F(DocumentSourceUnionWithTest, RejectUnionWhenDepthLimitIsExceeded) {
}
TEST_F(DocumentSourceUnionWithTest, ConstraintsWithoutPipelineAreCorrect) {
- auto emptyUnion = DocumentSourceUnionWith(
- getExpCtx(),
- Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
+ auto emptyUnion =
+ makeUnion(getExpCtx(),
+ Pipeline::create(std::list<boost::intrusive_ptr<DocumentSource>>{}, getExpCtx()));
StageConstraints defaultConstraints(StageConstraints::StreamType::kStreaming,
StageConstraints::PositionRequirement::kNone,
StageConstraints::HostTypeRequirement::kAnyShard,
@@ -493,9 +491,7 @@ TEST_F(DocumentSourceUnionWithTest, ConstraintsWithoutPipelineAreCorrect) {
StageConstraints::TransactionRequirement::kNotAllowed,
StageConstraints::LookupRequirement::kAllowed,
StageConstraints::UnionRequirement::kAllowed);
- ASSERT_TRUE(emptyUnion.constraints(Pipeline::SplitState::kUnsplit) == defaultConstraints);
-
- emptyUnion.dispose();
+ ASSERT_TRUE(emptyUnion->constraints(Pipeline::SplitState::kUnsplit) == defaultConstraints);
}
TEST_F(DocumentSourceUnionWithTest, ConstraintsWithMixedSubPipelineAreCorrect) {
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index e0ac0211ef2..2fecb18bebe 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -559,7 +559,8 @@ std::vector<BSONObj> CommonMongodProcessInterface::getMatchingPlanCacheEntryStat
collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()](
const sbe::PlanCacheKey& key) {
// Only fetch plan cache entries with keys matching given UUID and collectionVersion.
- return uuid == key.getCollectionUuid() && collVersion == key.getCollectionVersion();
+ return uuid == key.getMainCollectionState().uuid &&
+ collVersion == key.getMainCollectionState().version;
};
auto planCacheEntriesSBE =
diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
index a0e9bd5e572..a8ca2a48896 100644
--- a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/pipeline/aggregation_context_fixture.h"
#include "mongo/db/pipeline/process_interface/standalone_process_interface.h"
#include "mongo/unittest/unittest.h"
@@ -67,7 +65,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecifiedOnMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp(1, 1)));
+ boost::make_optional(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0}));
auto processInterface = makeProcessInterface();
// Test that 'targetCollectionVersion' is not accepted if not from mongos.
@@ -90,7 +88,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
TEST_F(ProcessInterfaceStandaloneTest, FailsToEnsureFieldsUniqueIfJoinFieldsAreNotSentFromMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp(1, 1)));
+ boost::make_optional(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0}));
auto processInterface = makeProcessInterface();
expCtx->fromMongos = true;
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 79863fc7f14..a8d15b8dbbe 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/exec/document_value/document_value_test_util.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/pipeline/document_source_match.h"
@@ -163,10 +161,12 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
onCommand([&](const executor::RemoteCommandRequest& request) {
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries.
@@ -175,7 +175,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
const Timestamp timestamp(1, 1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -246,10 +246,12 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution as if a chunk [0, 10] was
@@ -259,7 +261,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
const Timestamp timestamp(1, 1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -337,23 +339,27 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
Timestamp timestamp{1, 1};
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution so that all chunks are on
// the same shard.
const UUID uuid = UUID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(
cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), shardKeyPattern.getKeyPattern().globalMax()},
@@ -412,7 +418,7 @@ TEST_F(ShardedUnionTest, IncorporatesViewDefinitionAndRetriesWhenViewErrorReceiv
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
const Timestamp timestamp(1, 1);
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index c4d2de8fcb0..6449c5241fc 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -538,10 +538,11 @@ std::string CanonicalQuery::toStringShort() const {
}
CanonicalQuery::QueryShapeString CanonicalQuery::encodeKey() const {
- // TODO SERVER-61507: remove '_pipeline.empty()' check. Canonical queries with pushed down
- // $group/$lookup stages are not SBE-compatible until SERVER-61507 is complete.
+ // TODO SERVER-61507: remove 'canUseSbePlanCache' check. Canonical queries with pushed
+ // down $group stages are not compatible with the SBE plan cache until SERVER-61507 is complete.
return (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() &&
- !_forceClassicEngine && _sbeCompatible && _pipeline.empty())
+ !_forceClassicEngine && _sbeCompatible &&
+ canonical_query_encoder::canUseSbePlanCache(*this))
? canonical_query_encoder::encodeSBE(*this)
: canonical_query_encoder::encode(*this);
}
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 2013c8a635e..11b1a99479a 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/matcher/expression_text_noop.h"
#include "mongo/db/matcher/expression_where.h"
#include "mongo/db/matcher/expression_where_noop.h"
+#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/query/analyze_regex.h"
#include "mongo/db/query/projection.h"
#include "mongo/db/query/query_feature_flags_gen.h"
@@ -86,6 +87,7 @@ const char kEncodeProjectionRequirementSeparator = '-';
const char kEncodeRegexFlagsSeparator = '/';
const char kEncodeSortSection = '~';
const char kEncodeEngineSection = '@';
+const char kEncodePipelineSection = '^';
// These special bytes are used in the encoding of auto-parameterized match expressions in the SBE
// plan cache key.
@@ -135,6 +137,7 @@ void encodeUserString(StringData s, BuilderType* builder) {
case kEncodeEngineSection:
case kEncodeParamMarker:
case kEncodeConstantLiteralMarker:
+ case kEncodePipelineSection:
case '\\':
if constexpr (hasAppendChar<BuilderType>) {
builder->appendChar('\\');
@@ -431,6 +434,26 @@ void encodeCollation(const CollatorInterface* collation, StringBuilder* keyBuild
// not be stable between versions.
}
+void encodePipeline(const std::vector<std::unique_ptr<InnerPipelineStageInterface>>& pipeline,
+ BufBuilder* bufBuilder) {
+ bufBuilder->appendChar(kEncodePipelineSection);
+ for (auto& stage : pipeline) {
+ std::vector<Value> serializedArray;
+ if (auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(stage->documentSource())) {
+ lookupStage->serializeToArray(serializedArray, boost::none);
+ tassert(6443201,
+ "$lookup stage isn't serialized to a single bson object",
+ serializedArray.size() == 1 && serializedArray[0].getType() == Object);
+ const auto bson = serializedArray[0].getDocument().toBson();
+ bufBuilder->appendBuf(bson.objdata(), bson.objsize());
+ } else {
+ tasserted(6443200,
+ str::stream() << "Pipeline stage cannot be encoded in plan cache key: "
+ << stage->documentSource()->getSourceName());
+ }
+ }
+}
+
template <class RegexIterator>
void encodeRegexFlagsForMatch(RegexIterator first, RegexIterator last, StringBuilder* keyBuilder) {
// We sort the flags, so that queries with the same regex flags in different orders will have
@@ -1085,6 +1108,8 @@ std::string encodeSBE(const CanonicalQuery& cq) {
encodeFindCommandRequest(cq.getFindCommandRequest(), &bufBuilder);
+ encodePipeline(cq.pipeline(), &bufBuilder);
+
return base64::encode(StringData(bufBuilder.buf(), bufBuilder.len()));
}
@@ -1106,5 +1131,14 @@ CanonicalQuery::IndexFilterKey encodeForIndexFilters(const CanonicalQuery& cq) {
uint32_t computeHash(StringData key) {
return SimpleStringDataComparator::kInstance.hash(key);
}
+
+bool canUseSbePlanCache(const CanonicalQuery& cq) {
+ for (auto& stage : cq.pipeline()) {
+ if (StringData{stage->documentSource()->getSourceName()} != "$lookup") {
+ return false;
+ }
+ }
+ return true;
+}
} // namespace canonical_query_encoder
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h
index 3164ddbec67..4bfbb68c2f2 100644
--- a/src/mongo/db/query/canonical_query_encoder.h
+++ b/src/mongo/db/query/canonical_query_encoder.h
@@ -68,5 +68,11 @@ CanonicalQuery::IndexFilterKey encodeForIndexFilters(const CanonicalQuery& cq);
* Returns a hash of the given key (produced from either a QueryShapeString or a PlanCacheKey).
*/
uint32_t computeHash(StringData key);
+
+/**
+ * Returns whether a plan generated from this query can be stored in the SBE plan cache.
+ */
+bool canUseSbePlanCache(const CanonicalQuery& cq);
+
} // namespace canonical_query_encoder
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index 486b4f2d14f..3394e048be8 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -29,10 +29,11 @@
#include "mongo/db/query/canonical_query_encoder.h"
-#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
+#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/expression_context_for_test.h"
+#include "mongo/db/pipeline/inner_pipeline_stage_impl.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_test_service_context.h"
@@ -46,10 +47,17 @@ namespace {
using std::unique_ptr;
static const NamespaceString nss("testdb.testcoll");
+static const NamespaceString foreignNss("testdb.foreigncoll");
-PlanCacheKey makeKey(const CanonicalQuery& cq) {
- CollectionMock coll(nss);
- return plan_cache_key_factory::make<PlanCacheKey>(cq, &coll);
+std::vector<std::unique_ptr<InnerPipelineStageInterface>> parsePipeline(
+ const boost::intrusive_ptr<ExpressionContext> expCtx, const std::vector<BSONObj>& rawPipeline) {
+ auto pipeline = Pipeline::parse(rawPipeline, expCtx);
+
+ std::vector<std::unique_ptr<InnerPipelineStageInterface>> stages;
+ for (auto&& source : pipeline->getSources()) {
+ stages.emplace_back(std::make_unique<InnerPipelineStageImpl>(source));
+ }
+ return stages;
}
/**
@@ -59,7 +67,8 @@ unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
BSONObj sort,
BSONObj proj,
BSONObj collation,
- std::unique_ptr<FindCommandRequest> findCommand = nullptr) {
+ std::unique_ptr<FindCommandRequest> findCommand = nullptr,
+ std::vector<BSONObj> pipelineObj = {}) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
@@ -70,14 +79,26 @@ unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
findCommand->setSort(sort.getOwned());
findCommand->setProjection(proj.getOwned());
findCommand->setCollation(collation.getOwned());
- const boost::intrusive_ptr<ExpressionContext> expCtx;
+
+ const auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx.get(), nss);
+ expCtx->addResolvedNamespaces({foreignNss});
+ if (!findCommand->getCollation().isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(findCommand->getCollation());
+ ASSERT_OK(statusWithCollator.getStatus());
+ expCtx->setCollator(std::move(statusWithCollator.getValue()));
+ }
+ auto pipeline = parsePipeline(expCtx, pipelineObj);
+
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
std::move(findCommand),
false,
expCtx,
ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures);
+ MatchExpressionParser::kAllowAllSpecialFeatures,
+ ProjectionPolicies::findProjectionPolicies(),
+ std::move(pipeline));
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -115,13 +136,14 @@ void testComputeSBEKey(BSONObj query,
BSONObj sort,
BSONObj proj,
std::string expectedStr,
- std::unique_ptr<FindCommandRequest> findCommand = nullptr) {
+ std::unique_ptr<FindCommandRequest> findCommand = nullptr,
+ std::vector<BSONObj> pipelineObj = {}) {
BSONObj collation;
unique_ptr<CanonicalQuery> cq(
- canonicalize(query, sort, proj, collation, std::move(findCommand)));
+ canonicalize(query, sort, proj, collation, std::move(findCommand), std::move(pipelineObj)));
cq->setSbeCompatible(true);
- auto key = makeKey(*cq);
- ASSERT_EQUALS(key.toString(), expectedStr);
+ const auto key = canonical_query_encoder::encodeSBE(*cq);
+ ASSERT_EQUALS(key, expectedStr);
}
void testComputeKey(const char* queryStr,
@@ -135,12 +157,14 @@ void testComputeSBEKey(const char* queryStr,
const char* sortStr,
const char* projStr,
std::string expectedStr,
- std::unique_ptr<FindCommandRequest> findCommand = nullptr) {
+ std::unique_ptr<FindCommandRequest> findCommand = nullptr,
+ std::vector<BSONObj> pipelineObj = {}) {
testComputeSBEKey(fromjson(queryStr),
fromjson(sortStr),
fromjson(projStr),
expectedStr,
- std::move(findCommand));
+ std::move(findCommand),
+ std::move(pipelineObj));
}
TEST(CanonicalQueryEncoderTest, ComputeKey) {
@@ -262,8 +286,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyEscaped) {
// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
// not be the same.
TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
- PlanCache planCache(5000);
-
// Legacy coordinates.
unique_ptr<CanonicalQuery> cqLegacy(
canonicalize("{a: {$geoWithin: "
@@ -273,7 +295,8 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
canonicalize("{a: {$geoWithin: "
"{$geometry: {type: 'Polygon', coordinates: "
"[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- ASSERT_NOT_EQUALS(makeKey(*cqLegacy), makeKey(*cqNew));
+ ASSERT_NOT_EQUALS(canonical_query_encoder::encode(*cqLegacy),
+ canonical_query_encoder::encode(*cqNew));
}
// GEO_NEAR cache keys should include information on geometry and CRS in addition
@@ -395,85 +418,87 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryForceClassicEngine", false);
- // TODO SERVER-61314: Remove when featureFlagSbePlanCache is removed.
RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbePlanCache", true);
- testComputeSBEKey("{}", "{}", "{}", "YW4ABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ testComputeSBEKey("{}", "{}", "{}", "YW4ABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
"{$or: [{a: 1}, {b: 2}]}",
"{}",
"{}",
- "b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ "b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
- "{a: 1}", "{}", "{}", "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ "{a: 1}", "{}", "{}", "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
- "{b: 1}", "{}", "{}", "ZXEAYj8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ "{b: 1}", "{}", "{}", "ZXEAYj8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
"{a: 1, b: 1, c: 1}",
"{}",
"{}",
- "YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
+ "YW4AW2VxAGE/"
+ "AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
// With sort
- testComputeSBEKey("{}", "{a: 1}", "{}", "YW4ABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ testComputeSBEKey("{}", "{a: 1}", "{}", "YW4ABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
- "{}", "{a: -1}", "{}", "YW4ABQAAAAB+ZGEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ "{}", "{a: -1}", "{}", "YW4ABQAAAAB+ZGEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
testComputeSBEKey(
- "{a: 1}", "{a: 1}", "{}", "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=");
+ "{a: 1}", "{a: 1}", "{}", "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe");
// With projection
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{a: 1}",
- "ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
- testComputeSBEKey(
- "{}", "{a: 1}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
+ "ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
+ testComputeSBEKey("{}",
+ "{a: 1}",
+ "{a: 1}",
+ "YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
testComputeSBEKey("{}",
"{a: 1}",
"{a: 1, b: [{$const: 1}]}",
"YW4AKAAAABBhAAEAAAAEYgAZAAAAAzAAEQAAABAkY29uc3QAAQAAAAAAAH5hYQAAAAAAAAAAbm5u"
- "bgUAAAAABQAAAAAFAAAAAA==");
+ "bgUAAAAABQAAAAAFAAAAAF4=");
testComputeSBEKey(
- "{}", "{}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
+ "{}", "{}", "{a: 1}", "YW4ADAAAABBhAAEAAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
testComputeSBEKey(
- "{}", "{}", "{a: true}", "YW4ACQAAAAhhAAEAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
+ "{}", "{}", "{a: true}", "YW4ACQAAAAhhAAEAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
testComputeSBEKey(
- "{}", "{}", "{a: false}", "YW4ACQAAAAhhAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA");
+ "{}", "{}", "{a: false}", "YW4ACQAAAAhhAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==");
// With FindCommandRequest
auto findCommand = std::make_unique<FindCommandRequest>(nss);
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAAA=",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setAllowDiskUse(true);
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAHRubm4FAAAAAAUAAAAABQAAAAA=",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAHRubm4FAAAAAAUAAAAABQAAAABe",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setAllowDiskUse(false);
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAGZubm4FAAAAAAUAAAAABQAAAAA=",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAGZubm4FAAAAAAUAAAAABQAAAABe",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setReturnKey(true);
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG50bm4FAAAAAAUAAAAABQAAAAA=",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG50bm4FAAAAAAUAAAAABQAAAABe",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setRequestResumeToken(false);
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5uZm4FAAAAAAUAAAAABQAAAAA=",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5uZm4FAAAAAAUAAAAABQAAAABe",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
@@ -481,7 +506,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEKAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA",
+ "ZXEAYT8AAAAABQAAAAB+YWEKAAAAAAAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
@@ -489,7 +514,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAACgAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAA",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAACgAAAAAAAABubm5uBQAAAAAFAAAAAAUAAAAAXg==",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
@@ -497,14 +522,14 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAwAAAAQYQABAAAAAAUAAAAA",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAwAAAAQYQABAAAAAAUAAAAAXg==",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setMax(mongo::fromjson("{ a : 1 }"));
testComputeSBEKey("{a: 1}",
"{a: 1}",
"{}",
- "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAADAAAABBhAAEAAAAA",
+ "ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAAUAAAAADAAAABBhAAEAAAAAXg==",
std::move(findCommand));
findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setRequestResumeToken(true);
@@ -515,9 +540,74 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
"{a: 1}",
"{}",
"{}",
- "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAAAFAAAAAAUAAAAA",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAAAFAAAAAAUAAAAAXg==",
std::move(findCommand));
}
+TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
+ // SBE must be enabled in order to generate SBE plan cache keys.
+ RAIIServerParameterControllerForTest controllerSBE("internalQueryForceClassicEngine", false);
+
+ RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbePlanCache", true);
+
+ auto getLookupBson = [](StringData localField, StringData foreignField, StringData asField) {
+ return BSON("$lookup" << BSON("from" << foreignNss.coll() << "localField" << localField
+ << "foreignField" << foreignField << "as" << asField));
+ };
+
+ // No pipeline stage.
+ testComputeSBEKey("{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABe",
+ nullptr,
+ {});
+
+ // Different $lookup stage options.
+ testComputeSBEKey(
+ "{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAA"
+ "AAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAA",
+ nullptr,
+ {getLookupBson("a", "b", "as")});
+ testComputeSBEKey("{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A"
+ "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAADAAAAYTEAAmZv"
+ "cmVpZ25GaWVsZAACAAAAYgAAAA==",
+ nullptr,
+ {getLookupBson("a1", "b", "as")});
+ testComputeSBEKey("{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A"
+ "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9y"
+ "ZWlnbkZpZWxkAAMAAABiMQAAAA==",
+ nullptr,
+ {getLookupBson("a", "b1", "as")});
+ testComputeSBEKey("{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWwAAAAMkbG9va3VwAE0A"
+ "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAgAAAGEAAmZv"
+ "cmVpZ25GaWVsZAACAAAAYgAAAA==",
+ nullptr,
+ {getLookupBson("a", "b", "as1")});
+
+ // Multiple $lookup stages.
+ testComputeSBEKey("{a: 1}",
+ "{}",
+ "{}",
+ "ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAAUAAAAABQAAAABeWgAAAAMkbG9va3VwAEwA"
+ "AAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9y"
+ "ZWlnbkZpZWxkAAIAAABiAAAAXQAAAAMkbG9va3VwAE8AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwA"
+ "AmFzAAQAAABhczEAAmxvY2FsRmllbGQAAwAAAGExAAJmb3JlaWduRmllbGQAAwAAAGIxAAAA",
+ nullptr,
+ {getLookupBson("a", "b", "as"), getLookupBson("a1", "b1", "as1")});
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 414badb8332..568c3da9fe0 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/query/collection_query_info.h"
#include "mongo/db/query/explain_common.h"
#include "mongo/db/query/get_executor.h"
+#include "mongo/db/query/multiple_collection_accessor.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/plan_executor_impl.h"
@@ -79,7 +80,7 @@ namespace {
* - 'out' is a builder for the explain output.
*/
void generatePlannerInfo(PlanExecutor* exec,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
BSONObj extraInfo,
BSONObjBuilder* out) {
BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));
@@ -91,22 +92,23 @@ void generatePlannerInfo(PlanExecutor* exec,
bool indexFilterSet = false;
boost::optional<uint32_t> queryHash;
boost::optional<uint32_t> planCacheKeyHash;
- if (collection && exec->getCanonicalQuery()) {
+ const auto& mainCollection = collections.getMainCollection();
+ if (mainCollection && exec->getCanonicalQuery()) {
const QuerySettings* querySettings =
- QuerySettingsDecoration::get(collection->getSharedDecorations());
+ QuerySettingsDecoration::get(mainCollection->getSharedDecorations());
if (exec->getCanonicalQuery()->isSbeCompatible() &&
feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() &&
!exec->getCanonicalQuery()->getForceClassicEngine() &&
- // TODO(SERVER-61507): Remove pipeline check once lowered pipelines are integrated with
- // SBE plan cache.
- exec->getCanonicalQuery()->pipeline().empty()) {
- const auto planCacheKeyInfo = plan_cache_key_factory::make<sbe::PlanCacheKey>(
- *exec->getCanonicalQuery(), collection);
+ // TODO SERVER-61507: remove canUseSbePlanCache check when $group pushdown is
+ // integrated with SBE plan cache.
+ canonical_query_encoder::canUseSbePlanCache(*exec->getCanonicalQuery())) {
+ const auto planCacheKeyInfo =
+ plan_cache_key_factory::make(*exec->getCanonicalQuery(), collections);
planCacheKeyHash = planCacheKeyInfo.planCacheKeyHash();
queryHash = planCacheKeyInfo.queryHash();
} else {
- const auto planCacheKeyInfo =
- plan_cache_key_factory::make<PlanCacheKey>(*exec->getCanonicalQuery(), collection);
+ const auto planCacheKeyInfo = plan_cache_key_factory::make<PlanCacheKey>(
+ *exec->getCanonicalQuery(), mainCollection);
planCacheKeyHash = planCacheKeyInfo.planCacheKeyHash();
queryHash = planCacheKeyInfo.queryHash();
}
@@ -310,7 +312,7 @@ void appendBasicPlanCacheEntryInfoToBSON(const EntryType& entry, BSONObjBuilder*
} // namespace
void Explain::explainStages(PlanExecutor* exec,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
ExplainOptions::Verbosity verbosity,
Status executePlanStatus,
boost::optional<PlanExplainer::PlanStatsDetails> winningPlanTrialStats,
@@ -325,7 +327,7 @@ void Explain::explainStages(PlanExecutor* exec,
out->appendElements(explainVersionToBson(explainer.getVersion()));
if (verbosity >= ExplainOptions::Verbosity::kQueryPlanner) {
- generatePlannerInfo(exec, collection, extraInfo, out);
+ generatePlannerInfo(exec, collections, extraInfo, out);
}
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
@@ -364,7 +366,7 @@ void Explain::explainPipeline(PlanExecutor* exec,
}
void Explain::explainStages(PlanExecutor* exec,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
ExplainOptions::Verbosity verbosity,
BSONObj extraInfo,
const BSONObj& command,
@@ -372,9 +374,10 @@ void Explain::explainStages(PlanExecutor* exec,
auto&& explainer = exec->getPlanExplainer();
auto winningPlanTrialStats = explainer.getWinningPlanTrialStats();
Status executePlanStatus = Status::OK();
- const CollectionPtr* collectionPtr = &collection;
+ const MultipleCollectionAccessor* collectionsPtr = &collections;
// If we need execution stats, then run the plan in order to gather the stats.
+ const MultipleCollectionAccessor emptyCollections;
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
try {
executePlan(exec);
@@ -386,12 +389,12 @@ void Explain::explainStages(PlanExecutor* exec,
// then the collection may no longer be valid. We conservatively set our collection pointer
// to null in case it is invalid.
if (!executePlanStatus.isOK() && executePlanStatus != ErrorCodes::NoQueryExecutionPlans) {
- collectionPtr = &CollectionPtr::null;
+ collectionsPtr = &emptyCollections;
}
}
explainStages(exec,
- *collectionPtr,
+ *collectionsPtr,
verbosity,
executePlanStatus,
winningPlanTrialStats,
@@ -403,6 +406,15 @@ void Explain::explainStages(PlanExecutor* exec,
explain_common::generateServerParameters(out);
}
+void Explain::explainStages(PlanExecutor* exec,
+ const CollectionPtr& collection,
+ ExplainOptions::Verbosity verbosity,
+ BSONObj extraInfo,
+ const BSONObj& command,
+ BSONObjBuilder* out) {
+ explainStages(exec, MultipleCollectionAccessor(collection), verbosity, extraInfo, command, out);
+}
+
void Explain::planCacheEntryToBSON(const PlanCacheEntry& entry, BSONObjBuilder* out) {
out->append("version", "1");
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index d41dd3a1725..1dcabdeb7e3 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -39,6 +39,7 @@ namespace mongo {
class Collection;
class CollectionPtr;
+class MultipleCollectionAccessor;
class OperationContext;
class PlanExecutorPipeline;
struct PlanSummaryStats;
@@ -77,15 +78,26 @@ public:
BSONObj extraInfo,
const BSONObj& command,
BSONObjBuilder* out);
+
+ /**
+ * Similar to the above function, but takes in multiple collections instead to support
+ * aggregation that involves multiple collections (e.g. $lookup).
+ */
+ static void explainStages(PlanExecutor* exec,
+ const MultipleCollectionAccessor& collections,
+ ExplainOptions::Verbosity verbosity,
+ BSONObj extraInfo,
+ const BSONObj& command,
+ BSONObjBuilder* out);
+
/**
* Adds "queryPlanner" and "executionStats" (if requested in verbosity) fields to 'out'. Unlike
* the other overload of explainStages() above, this one does not add the "serverInfo" section.
*
* - 'exec' is the stage tree for the operation being explained.
- * - 'collection' is the relevant collection. During this call it may be required to execute the
- * plan to collect statistics. If the PlanExecutor uses 'kLockExternally' lock policy, the
- * caller should hold at least an IS lock on the collection the that the query runs on, even if
- * 'collection' parameter is nullptr.
+ * - 'collections' are the relevant main and secondary collections (e.g. for $lookup). If the
+ * PlanExecutor uses 'kLockExternally' lock policy, the caller should hold the necessary db_raii
+ * object on the involved collections.
* - 'verbosity' is the verbosity level of the explain.
* - 'extraInfo' specifies additional information to include into the output.
* - 'executePlanStatus' is the status returned after executing the query (Status::OK if the
@@ -97,7 +109,7 @@ public:
*/
static void explainStages(
PlanExecutor* exec,
- const CollectionPtr& collection,
+ const MultipleCollectionAccessor& collections,
ExplainOptions::Verbosity verbosity,
Status executePlanStatus,
boost::optional<PlanExplainer::PlanStatsDetails> winningPlanTrialStats,
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 5c22beab210..6c77f43ae1a 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -647,7 +647,7 @@ public:
_cq->setCollator(mainColl->getDefaultCollator()->clone());
}
- auto planCacheKey = plan_cache_key_factory::make<KeyType>(*_cq, mainColl);
+ auto planCacheKey = buildPlanCacheKey();
// Fill in some opDebug information, unless it has already been filled by an outer pipeline.
OpDebug& opDebug = CurOp::get(_opCtx)->debug();
if (!opDebug.queryHash) {
@@ -743,6 +743,11 @@ protected:
virtual PlanStageType buildExecutableTree(const QuerySolution& solution) const = 0;
/**
+ * Constructs the plan cache key.
+ */
+ virtual KeyType buildPlanCacheKey() const = 0;
+
+ /**
* Either constructs a PlanStage tree from a cached plan (if exists in the plan cache), or
* constructs a "id hack" PlanStage tree. Returns nullptr if no cached plan or id hack plan can
* be constructed.
@@ -879,6 +884,10 @@ protected:
return result;
}
+ PlanCacheKey buildPlanCacheKey() const {
+ return plan_cache_key_factory::make<PlanCacheKey>(*_cq, _collection);
+ }
+
std::unique_ptr<ClassicPrepareExecutionResult> buildCachedPlan(
const PlanCacheKey& planCacheKey) final {
initializePlannerParamsIfNeeded();
@@ -1083,13 +1092,17 @@ protected:
return result;
}
+ sbe::PlanCacheKey buildPlanCacheKey() const {
+ return plan_cache_key_factory::make(*_cq, _collections);
+ }
+
std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlan(
const sbe::PlanCacheKey& planCacheKey) final {
if (shouldCacheQuery(*_cq)) {
- // TODO SERVER-61507: remove _cq->pipeline().empty() check when $group pushdown is
+ // TODO SERVER-61507: remove canUseSbePlanCache check when $group pushdown is
// integrated with SBE plan cache.
if (!feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() ||
- !_cq->pipeline().empty()) {
+ !canonical_query_encoder::canUseSbePlanCache(*_cq)) {
// If the feature flag is off, we first try to build an "id hack" plan because the
// id hack plans are not cached in the classic cache. We then fall back to use the
// classic plan cache.
@@ -1346,18 +1359,19 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
// No need for runtime planning, just use the constructed plan stage tree.
invariant(solutions.size() == 1);
invariant(roots.size() == 1);
- if (!cq->pipeline().empty()) {
- // Need to extend the solution with the agg pipeline and rebuild the execution tree.
- solutions[0] = QueryPlanner::extendWithAggPipeline(
- *cq,
- std::move(solutions[0]),
- fillOutSecondaryCollectionsInformation(opCtx, collections, cq.get()));
- roots[0] = helper.buildExecutableTree(*(solutions[0]));
- }
auto&& [root, data] = roots[0];
+
if (!planningResult->recoveredPinnedCacheEntry()) {
- plan_cache_util::updatePlanCache(
- opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data);
+ if (!cq->pipeline().empty()) {
+ // Need to extend the solution with the agg pipeline and rebuild the execution tree.
+ solutions[0] = QueryPlanner::extendWithAggPipeline(
+ *cq,
+ std::move(solutions[0]),
+ fillOutSecondaryCollectionsInformation(opCtx, collections, cq.get()));
+ roots[0] = helper.buildExecutableTree(*(solutions[0]));
+ }
+
+ plan_cache_util::updatePlanCache(opCtx, collections, *cq, *solutions[0], *root, data);
}
// Prepare the SBE tree for execution.
diff --git a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp
index fd6bf9e1e40..4ecaf2c0795 100644
--- a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp
+++ b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp
@@ -624,16 +624,17 @@ static void convertFilterToSargableNode(ABT::reference_type node,
return;
}
- PartialSchemaReqConversion conversion = convertExprToPartialSchemaReq(filterNode.getFilter());
- if (!conversion._success) {
+ auto conversion =
+ convertExprToPartialSchemaReq(filterNode.getFilter(), true /*isFilterContext*/);
+ if (!conversion) {
return;
}
- if (conversion._hasEmptyInterval) {
+ if (conversion->_hasEmptyInterval) {
addEmptyValueScanNode(ctx);
return;
}
- for (const auto& entry : conversion._reqMap) {
+ for (const auto& entry : conversion->_reqMap) {
uassert(6624111,
"Filter partial schema requirement must contain a variable name.",
!entry.first._projectionName.empty());
@@ -648,29 +649,29 @@ static void convertFilterToSargableNode(ABT::reference_type node,
// If in substitution mode, disallow retaining original predicate. If in exploration mode, only
// allow retaining the original predicate and if we have at least one index available.
if constexpr (isSubstitution) {
- if (conversion._retainPredicate) {
+ if (conversion->_retainPredicate) {
return;
}
- } else if (!conversion._retainPredicate || scanDef.getIndexDefs().empty()) {
+ } else if (!conversion->_retainPredicate || scanDef.getIndexDefs().empty()) {
return;
}
bool hasEmptyInterval = false;
auto candidateIndexMap = computeCandidateIndexMap(ctx.getPrefixId(),
indexingAvailability.getScanProjection(),
- conversion._reqMap,
+ conversion->_reqMap,
scanDef,
hasEmptyInterval);
if (hasEmptyInterval) {
addEmptyValueScanNode(ctx);
} else {
- ABT sargableNode = make<SargableNode>(std::move(conversion._reqMap),
+ ABT sargableNode = make<SargableNode>(std::move(conversion->_reqMap),
std::move(candidateIndexMap),
IndexReqTarget::Complete,
filterNode.getChild());
- if (conversion._retainPredicate) {
+ if (conversion->_retainPredicate) {
const GroupIdType childGroupId =
filterNode.getChild().cast<MemoLogicalDelegatorNode>()->getGroupId();
if (childGroupId == indexingAvailability.getScanGroupId()) {
@@ -813,22 +814,24 @@ struct SubstituteConvert<EvaluationNode> {
}
// We still want to extract sargable nodes from EvalNode to use for PhysicalScans.
- PartialSchemaReqConversion conversion =
- convertExprToPartialSchemaReq(evalNode.getProjection());
+ auto conversion =
+ convertExprToPartialSchemaReq(evalNode.getProjection(), false /*isFilterContext*/);
+ if (!conversion) {
+ return;
+ }
uassert(6624165,
"Should not be getting retainPredicate set for EvalNodes",
- !conversion._retainPredicate);
-
- if (!conversion._success || conversion._reqMap.size() != 1) {
+ !conversion->_retainPredicate);
+ if (conversion->_reqMap.size() != 1) {
// For evaluation nodes we expect to create a single entry.
return;
}
- if (conversion._hasEmptyInterval) {
+ if (conversion->_hasEmptyInterval) {
addEmptyValueScanNode(ctx);
return;
}
- for (auto& entry : conversion._reqMap) {
+ for (auto& entry : conversion->_reqMap) {
PartialSchemaRequirement& req = entry.second;
req.setBoundProjectionName(evalNode.getProjectionName());
@@ -842,12 +845,12 @@ struct SubstituteConvert<EvaluationNode> {
bool hasEmptyInterval = false;
auto candidateIndexMap = computeCandidateIndexMap(
- ctx.getPrefixId(), scanProjName, conversion._reqMap, scanDef, hasEmptyInterval);
+ ctx.getPrefixId(), scanProjName, conversion->_reqMap, scanDef, hasEmptyInterval);
if (hasEmptyInterval) {
addEmptyValueScanNode(ctx);
} else {
- ABT newNode = make<SargableNode>(std::move(conversion._reqMap),
+ ABT newNode = make<SargableNode>(std::move(conversion->_reqMap),
std::move(candidateIndexMap),
IndexReqTarget::Complete,
evalNode.getChild());
diff --git a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp
index 6f6f6c743ed..58cbf9dee2b 100644
--- a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp
+++ b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp
@@ -4310,13 +4310,15 @@ TEST(PhysRewriter, PartialIndex1) {
// TODO: Test cases where partial filter bound is a range which subsumes the query
// requirement
// TODO: (e.g. half open interval)
- auto conversionResult = convertExprToPartialSchemaReq(make<EvalFilter>(
- make<PathGet>("b",
- make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(2)))),
- make<Variable>("root")));
- ASSERT_TRUE(conversionResult._success);
- ASSERT_FALSE(conversionResult._hasEmptyInterval);
- ASSERT_FALSE(conversionResult._retainPredicate);
+ auto conversionResult = convertExprToPartialSchemaReq(
+ make<EvalFilter>(
+ make<PathGet>(
+ "b", make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(2)))),
+ make<Variable>("root")),
+ true /*isFilterContext*/);
+ ASSERT_TRUE(conversionResult.has_value());
+ ASSERT_FALSE(conversionResult->_hasEmptyInterval);
+ ASSERT_FALSE(conversionResult->_retainPredicate);
OptPhaseManager phaseManager(
{OptPhaseManager::OptPhase::MemoSubstitutionPhase,
@@ -4329,7 +4331,7 @@ TEST(PhysRewriter, PartialIndex1) {
IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}},
true /*isMultiKey*/,
{DistributionType::Centralized},
- std::move(conversionResult._reqMap)}}}}}}},
+ std::move(conversionResult->_reqMap)}}}}}}},
{true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests});
ABT optimized = rootNode;
@@ -4387,13 +4389,15 @@ TEST(PhysRewriter, PartialIndex2) {
ABT rootNode =
make<RootNode>(ProjectionRequirement{ProjectionNameVector{"root"}}, std::move(filterANode));
- auto conversionResult = convertExprToPartialSchemaReq(make<EvalFilter>(
- make<PathGet>("a",
- make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(3)))),
- make<Variable>("root")));
- ASSERT_TRUE(conversionResult._success);
- ASSERT_FALSE(conversionResult._hasEmptyInterval);
- ASSERT_FALSE(conversionResult._retainPredicate);
+ auto conversionResult = convertExprToPartialSchemaReq(
+ make<EvalFilter>(
+ make<PathGet>(
+ "a", make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(3)))),
+ make<Variable>("root")),
+ true /*isFilterContext*/);
+ ASSERT_TRUE(conversionResult.has_value());
+ ASSERT_FALSE(conversionResult->_hasEmptyInterval);
+ ASSERT_FALSE(conversionResult->_retainPredicate);
OptPhaseManager phaseManager(
{OptPhaseManager::OptPhase::MemoSubstitutionPhase,
@@ -4406,7 +4410,7 @@ TEST(PhysRewriter, PartialIndex2) {
IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}},
true /*isMultiKey*/,
{DistributionType::Centralized},
- std::move(conversionResult._reqMap)}}}}}}},
+ std::move(conversionResult->_reqMap)}}}}}}},
{true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests});
ABT optimized = rootNode;
@@ -4462,13 +4466,15 @@ TEST(PhysRewriter, PartialIndexReject) {
ABT rootNode =
make<RootNode>(ProjectionRequirement{ProjectionNameVector{"root"}}, std::move(filterBNode));
- auto conversionResult = convertExprToPartialSchemaReq(make<EvalFilter>(
- make<PathGet>("b",
- make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(4)))),
- make<Variable>("root")));
- ASSERT_TRUE(conversionResult._success);
- ASSERT_FALSE(conversionResult._hasEmptyInterval);
- ASSERT_FALSE(conversionResult._retainPredicate);
+ auto conversionResult = convertExprToPartialSchemaReq(
+ make<EvalFilter>(
+ make<PathGet>(
+ "b", make<PathTraverse>(make<PathCompare>(Operations::Eq, Constant::int64(4)))),
+ make<Variable>("root")),
+ true /*isFilterContext*/);
+ ASSERT_TRUE(conversionResult.has_value());
+ ASSERT_FALSE(conversionResult->_hasEmptyInterval);
+ ASSERT_FALSE(conversionResult->_retainPredicate);
OptPhaseManager phaseManager(
{OptPhaseManager::OptPhase::MemoSubstitutionPhase,
@@ -4481,7 +4487,7 @@ TEST(PhysRewriter, PartialIndexReject) {
IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}},
true /*isMultiKey*/,
{DistributionType::Centralized},
- std::move(conversionResult._reqMap)}}}}}}},
+ std::move(conversionResult->_reqMap)}}}}}}},
{true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests});
ABT optimized = rootNode;
diff --git a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp
index 89bfe74551f..0278e20700e 100644
--- a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp
+++ b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp
@@ -86,7 +86,7 @@ void ConstEval::removeUnusedEvalNodes() {
// TODO: consider caching.
// TODO: consider deriving IndexingAvailability.
if (!_disableSargableInlining ||
- !convertExprToPartialSchemaReq(k->getProjection())._success) {
+ !convertExprToPartialSchemaReq(k->getProjection(), false /*isFilterContext*/)) {
// Schedule node inlining as there is exactly one reference.
_singleRef.emplace(v.front());
_changed = true;
diff --git a/src/mongo/db/query/optimizer/utils/utils.cpp b/src/mongo/db/query/optimizer/utils/utils.cpp
index 42476964493..da4be863228 100644
--- a/src/mongo/db/query/optimizer/utils/utils.cpp
+++ b/src/mongo/db/query/optimizer/utils/utils.cpp
@@ -340,18 +340,8 @@ VariableNameSetType collectVariableReferences(const ABT& n) {
return NodeVariableTracker::collect(n);
}
-PartialSchemaReqConversion::PartialSchemaReqConversion()
- : _success(false),
- _bound(),
- _reqMap(),
- _hasIntersected(false),
- _hasTraversed(false),
- _hasEmptyInterval(false),
- _retainPredicate(false) {}
-
PartialSchemaReqConversion::PartialSchemaReqConversion(PartialSchemaRequirements reqMap)
- : _success(true),
- _bound(),
+ : _bound(),
_reqMap(std::move(reqMap)),
_hasIntersected(false),
_hasTraversed(false),
@@ -359,8 +349,7 @@ PartialSchemaReqConversion::PartialSchemaReqConversion(PartialSchemaRequirements
_retainPredicate(false) {}
PartialSchemaReqConversion::PartialSchemaReqConversion(ABT bound)
- : _success(true),
- _bound(std::move(bound)),
+ : _bound(std::move(bound)),
_reqMap(),
_hasIntersected(false),
_hasTraversed(false),
@@ -372,23 +361,24 @@ PartialSchemaReqConversion::PartialSchemaReqConversion(ABT bound)
*/
class PartialSchemaReqConverter {
public:
- PartialSchemaReqConverter() = default;
+ using ResultType = boost::optional<PartialSchemaReqConversion>;
- PartialSchemaReqConversion handleEvalPathAndEvalFilter(PartialSchemaReqConversion pathResult,
- PartialSchemaReqConversion inputResult) {
- if (!pathResult._success || !inputResult._success) {
+ PartialSchemaReqConverter(const bool isFilterContext) : _isFilterContext(isFilterContext) {}
+
+ ResultType handleEvalPathAndEvalFilter(ResultType pathResult, ResultType inputResult) {
+ if (!pathResult || !inputResult) {
return {};
}
- if (pathResult._bound.has_value() || !inputResult._bound.has_value() ||
- !inputResult._reqMap.empty()) {
+ if (pathResult->_bound.has_value() || !inputResult->_bound.has_value() ||
+ !inputResult->_reqMap.empty()) {
return {};
}
- if (auto boundPtr = inputResult._bound->cast<Variable>(); boundPtr != nullptr) {
+ if (auto boundPtr = inputResult->_bound->cast<Variable>(); boundPtr != nullptr) {
const ProjectionName& boundVarName = boundPtr->name();
PartialSchemaRequirements newMap;
- for (auto& [key, req] : pathResult._reqMap) {
+ for (auto& [key, req] : pathResult->_reqMap) {
if (!key._projectionName.empty()) {
return {};
}
@@ -396,40 +386,40 @@ public:
}
PartialSchemaReqConversion result{std::move(newMap)};
- result._hasEmptyInterval = pathResult._hasEmptyInterval;
- result._retainPredicate = pathResult._retainPredicate;
+ result._hasEmptyInterval = pathResult->_hasEmptyInterval;
+ result._retainPredicate = pathResult->_retainPredicate;
return result;
}
return {};
}
- PartialSchemaReqConversion transport(const ABT& n,
- const EvalPath& evalPath,
- PartialSchemaReqConversion pathResult,
- PartialSchemaReqConversion inputResult) {
+ ResultType transport(const ABT& n,
+ const EvalPath& evalPath,
+ ResultType pathResult,
+ ResultType inputResult) {
return handleEvalPathAndEvalFilter(std::move(pathResult), std::move(inputResult));
}
- PartialSchemaReqConversion transport(const ABT& n,
- const EvalFilter& evalFilter,
- PartialSchemaReqConversion pathResult,
- PartialSchemaReqConversion inputResult) {
+ ResultType transport(const ABT& n,
+ const EvalFilter& evalFilter,
+ ResultType pathResult,
+ ResultType inputResult) {
return handleEvalPathAndEvalFilter(std::move(pathResult), std::move(inputResult));
}
- static PartialSchemaReqConversion handleComposition(const bool isMultiplicative,
- PartialSchemaReqConversion leftResult,
- PartialSchemaReqConversion rightResult) {
- if (!leftResult._success || !rightResult._success) {
+ static ResultType handleComposition(const bool isMultiplicative,
+ ResultType leftResult,
+ ResultType rightResult) {
+ if (!leftResult || !rightResult) {
return {};
}
- if (leftResult._bound.has_value() || rightResult._bound.has_value()) {
+ if (leftResult->_bound.has_value() || rightResult->_bound.has_value()) {
return {};
}
- auto& leftReqMap = leftResult._reqMap;
- auto& rightReqMap = rightResult._reqMap;
+ auto& leftReqMap = leftResult->_reqMap;
+ auto& rightReqMap = rightResult->_reqMap;
if (isMultiplicative) {
{
ProjectionRenames projectionRenames;
@@ -441,7 +431,7 @@ public:
}
}
- if (!leftResult._hasTraversed && !rightResult._hasTraversed) {
+ if (!leftResult->_hasTraversed && !rightResult->_hasTraversed) {
// Intersect intervals only if we have not traversed. E.g. (-inf, 90) ^ (70, +inf)
// becomes (70, 90).
for (auto& [key, req] : leftReqMap) {
@@ -449,7 +439,7 @@ public:
if (newIntervals) {
req.getIntervals() = std::move(newIntervals.get());
} else {
- leftResult._hasEmptyInterval = true;
+ leftResult->_hasEmptyInterval = true;
break;
}
}
@@ -458,7 +448,7 @@ public:
return {};
}
- leftResult._hasIntersected = true;
+ leftResult->_hasIntersected = true;
return leftResult;
}
@@ -537,32 +527,40 @@ public:
rightPath.is<PathIdentity>()) {
// leftPath = Id, rightPath = Traverse Id.
combineIntervalsDNF(false /*intersect*/, leftIntervals, newInterval);
- leftResult._retainPredicate = true;
+ leftResult->_retainPredicate = true;
return leftResult;
} else if (const auto rightTraversePtr = rightPath.cast<PathTraverse>();
rightTraversePtr != nullptr && rightTraversePtr->getPath().is<PathIdentity>() &&
leftPath.is<PathIdentity>()) {
// leftPath = Traverse Id, rightPath = Id.
combineIntervalsDNF(false /*intersect*/, rightIntervals, newInterval);
- rightResult._retainPredicate = true;
+ rightResult->_retainPredicate = true;
return rightResult;
}
return {};
}
- PartialSchemaReqConversion transport(const ABT& n,
- const PathComposeM& pathComposeM,
- PartialSchemaReqConversion leftResult,
- PartialSchemaReqConversion rightResult) {
+ ResultType transport(const ABT& n,
+ const PathComposeM& pathComposeM,
+ ResultType leftResult,
+ ResultType rightResult) {
+ if (!_isFilterContext) {
+ return {};
+ }
+
return handleComposition(
true /*isMultiplicative*/, std::move(leftResult), std::move(rightResult));
}
- PartialSchemaReqConversion transport(const ABT& n,
- const PathComposeA& pathComposeA,
- PartialSchemaReqConversion leftResult,
- PartialSchemaReqConversion rightResult) {
+ ResultType transport(const ABT& n,
+ const PathComposeA& pathComposeA,
+ ResultType leftResult,
+ ResultType rightResult) {
+ if (!_isFilterContext) {
+ return {};
+ }
+
const auto& path1 = pathComposeA.getPath1();
const auto& path2 = pathComposeA.getPath2();
const auto& eqNull = make<PathCompare>(Operations::Eq, Constant::null());
@@ -574,9 +572,9 @@ public:
auto intervalExpr = IntervalReqExpr::makeSingularDNF(IntervalRequirement{
{true /*inclusive*/, Constant::null()}, {true /*inclusive*/, Constant::null()}});
- return {PartialSchemaRequirements{
+ return {{PartialSchemaRequirements{
{PartialSchemaKey{},
- PartialSchemaRequirement{"" /*boundProjectionName*/, std::move(intervalExpr)}}}};
+ PartialSchemaRequirement{"" /*boundProjectionName*/, std::move(intervalExpr)}}}}};
}
return handleComposition(
@@ -584,19 +582,18 @@ public:
}
template <class T>
- static PartialSchemaReqConversion handleGetAndTraverse(const ABT& n,
- PartialSchemaReqConversion inputResult) {
- if (!inputResult._success) {
+ static ResultType handleGetAndTraverse(const ABT& n, ResultType inputResult) {
+ if (!inputResult) {
return {};
}
- if (inputResult._bound.has_value()) {
+ if (inputResult->_bound.has_value()) {
return {};
}
// New map has keys with appended paths.
PartialSchemaRequirements newMap;
- for (auto& entry : inputResult._reqMap) {
+ for (auto& entry : inputResult->_reqMap) {
if (!entry.first._projectionName.empty()) {
return {};
}
@@ -611,41 +608,39 @@ public:
newMap.emplace(PartialSchemaKey{"", std::move(path)}, std::move(entry.second));
}
- inputResult._reqMap = std::move(newMap);
+ inputResult->_reqMap = std::move(newMap);
return inputResult;
}
- PartialSchemaReqConversion transport(const ABT& n,
- const PathGet& pathGet,
- PartialSchemaReqConversion inputResult) {
+ ResultType transport(const ABT& n, const PathGet& pathGet, ResultType inputResult) {
return handleGetAndTraverse<PathGet>(n, std::move(inputResult));
}
- PartialSchemaReqConversion transport(const ABT& n,
- const PathTraverse& pathTraverse,
- PartialSchemaReqConversion inputResult) {
- if (inputResult._reqMap.size() > 1) {
+ ResultType transport(const ABT& n, const PathTraverse& pathTraverse, ResultType inputResult) {
+ if (!inputResult) {
+ return {};
+ }
+ if (inputResult->_reqMap.size() > 1) {
// Cannot append traverse if we have more than one requirement.
return {};
}
- PartialSchemaReqConversion result =
- handleGetAndTraverse<PathTraverse>(n, std::move(inputResult));
- result._hasTraversed = true;
+ auto result = handleGetAndTraverse<PathTraverse>(n, std::move(inputResult));
+ if (result) {
+ result->_hasTraversed = true;
+ }
return result;
}
- PartialSchemaReqConversion transport(const ABT& n,
- const PathCompare& pathCompare,
- PartialSchemaReqConversion inputResult) {
- if (!inputResult._success) {
+ ResultType transport(const ABT& n, const PathCompare& pathCompare, ResultType inputResult) {
+ if (!inputResult) {
return {};
}
- if (!inputResult._bound.has_value() || !inputResult._reqMap.empty()) {
+ if (!inputResult->_bound.has_value() || !inputResult->_reqMap.empty()) {
return {};
}
- const auto& bound = inputResult._bound;
+ const auto& bound = inputResult->_bound;
bool lowBoundInclusive = false;
boost::optional<ABT> lowBound;
bool highBoundInclusive = false;
@@ -681,51 +676,53 @@ public:
auto intervalExpr = IntervalReqExpr::makeSingularDNF(IntervalRequirement{
{lowBoundInclusive, std::move(lowBound)}, {highBoundInclusive, std::move(highBound)}});
- return {PartialSchemaRequirements{
+ return {{PartialSchemaRequirements{
{PartialSchemaKey{},
- PartialSchemaRequirement{"" /*boundProjectionName*/, std::move(intervalExpr)}}}};
+ PartialSchemaRequirement{"" /*boundProjectionName*/, std::move(intervalExpr)}}}}};
}
- PartialSchemaReqConversion transport(const ABT& n, const PathIdentity& pathIdentity) {
- return {PartialSchemaRequirements{{{}, {}}}};
+ ResultType transport(const ABT& n, const PathIdentity& pathIdentity) {
+ return {{PartialSchemaRequirements{{{}, {}}}}};
}
- PartialSchemaReqConversion transport(const ABT& n, const Constant& c) {
+ ResultType transport(const ABT& n, const Constant& c) {
if (c.isNull()) {
// Cannot create bounds with just NULL.
return {};
}
- return {n};
+ return {{n}};
}
template <typename T, typename... Ts>
- PartialSchemaReqConversion transport(const ABT& n, const T& node, Ts&&...) {
+ ResultType transport(const ABT& n, const T& node, Ts&&...) {
if constexpr (std::is_base_of_v<ExpressionSyntaxSort, T>) {
// We allow expressions to participate in bounds.
- return {n};
+ return {{n}};
}
// General case. Reject conversion.
return {};
}
- PartialSchemaReqConversion convert(const ABT& input) {
+ ResultType convert(const ABT& input) {
return algebra::transport<true>(input, *this);
}
+
+private:
+ const bool _isFilterContext;
};
-PartialSchemaReqConversion convertExprToPartialSchemaReq(const ABT& expr) {
- PartialSchemaReqConverter converter;
- PartialSchemaReqConversion result = converter.convert(expr);
- if (result._reqMap.empty()) {
- result._success = false;
- return result;
+boost::optional<PartialSchemaReqConversion> convertExprToPartialSchemaReq(
+ const ABT& expr, const bool isFilterContext) {
+ PartialSchemaReqConverter converter(isFilterContext);
+ auto result = converter.convert(expr);
+ if (!result || result->_reqMap.empty()) {
+ return {};
}
- for (const auto& entry : result._reqMap) {
+ for (const auto& entry : result->_reqMap) {
if (entry.first.emptyPath() && isIntervalReqFullyOpenDNF(entry.second.getIntervals())) {
// We need to determine either path or interval (or both).
- result._success = false;
- return result;
+ return {};
}
}
return result;
diff --git a/src/mongo/db/query/optimizer/utils/utils.h b/src/mongo/db/query/optimizer/utils/utils.h
index 473e2bbbd70..d3164d10db6 100644
--- a/src/mongo/db/query/optimizer/utils/utils.h
+++ b/src/mongo/db/query/optimizer/utils/utils.h
@@ -154,13 +154,9 @@ private:
};
struct PartialSchemaReqConversion {
- PartialSchemaReqConversion();
PartialSchemaReqConversion(PartialSchemaRequirements reqMap);
PartialSchemaReqConversion(ABT bound);
- // Is our current bottom-up conversion successful. If not shortcut to top.
- bool _success;
-
// If set, contains a Constant or Variable bound of an (yet unknown) interval.
boost::optional<ABT> _bound;
@@ -185,9 +181,11 @@ struct PartialSchemaReqConversion {
/**
* Takes an expression that comes from an Filter or Evaluation node, and attempt to convert
* to a PartialSchemaReqConversion. This is done independent of the availability of indexes.
- * Essentially this means to extract intervals over paths whenever possible.
+ * Essentially this means to extract intervals over paths whenever possible. If the conversion is
+ * not possible, return empty result.
*/
-PartialSchemaReqConversion convertExprToPartialSchemaReq(const ABT& expr);
+boost::optional<PartialSchemaReqConversion> convertExprToPartialSchemaReq(const ABT& expr,
+ bool isFilterContext);
bool intersectPartialSchemaReq(PartialSchemaRequirements& target,
const PartialSchemaRequirements& source,
diff --git a/src/mongo/db/query/plan_cache_key_factory.cpp b/src/mongo/db/query/plan_cache_key_factory.cpp
index 6b154b29105..b330fa5ccd6 100644
--- a/src/mongo/db/query/plan_cache_key_factory.cpp
+++ b/src/mongo/db/query/plan_cache_key_factory.cpp
@@ -89,12 +89,6 @@ PlanCacheKeyInfo makePlanCacheKeyInfo(const CanonicalQuery& query,
return PlanCacheKeyInfo(shapeString, indexabilityKeyBuilder.str());
}
-PlanCacheKey make(const CanonicalQuery& query,
- const CollectionPtr& collection,
- PlanCacheKeyTag<PlanCacheKey>) {
- return {makePlanCacheKeyInfo(query, collection)};
-}
-
namespace {
/**
* Returns the highest index commit timestamp associated with an index on 'collection' that is
@@ -129,24 +123,62 @@ boost::optional<Timestamp> computeNewestVisibleIndexTimestamp(OperationContext*
return currentNewestVisible.isNull() ? boost::optional<Timestamp>{} : currentNewestVisible;
}
+
+sbe::PlanCacheKeyCollectionState computeCollectionState(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ bool isSecondaryColl) {
+ boost::optional<sbe::PlanCacheKeyShardingEpoch> keyShardingEpoch;
+ // We don't version secondary collections in the current shard versioning protocol. Also, since
+ // currently we only push down $lookup to SBE when secondary collections (and main collection)
+ // are unsharded, it's OK to not encode the sharding information here.
+ if (!isSecondaryColl) {
+ const auto shardVersion{
+ OperationShardingState::get(opCtx).getShardVersion(collection->ns())};
+ if (shardVersion) {
+ keyShardingEpoch =
+ sbe::PlanCacheKeyShardingEpoch{shardVersion->epoch(), shardVersion->getTimestamp()};
+ }
+ }
+ return {collection->uuid(),
+ CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion(),
+ plan_cache_detail::computeNewestVisibleIndexTimestamp(opCtx, collection),
+ keyShardingEpoch};
+}
} // namespace
+PlanCacheKey make(const CanonicalQuery& query,
+ const CollectionPtr& collection,
+ PlanCacheKeyTag<PlanCacheKey> tag) {
+ return {plan_cache_detail::makePlanCacheKeyInfo(query, collection)};
+}
+
sbe::PlanCacheKey make(const CanonicalQuery& query,
const CollectionPtr& collection,
- PlanCacheKeyTag<sbe::PlanCacheKey>) {
- OperationContext* opCtx = query.getOpCtx();
- auto collectionVersion = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
- const auto shardVersion{OperationShardingState::get(opCtx).getShardVersion(collection->ns())};
- const auto keyShardingEpoch = shardVersion
- ? boost::make_optional(
- sbe::PlanCacheKeyShardingEpoch{shardVersion->epoch(), shardVersion->getTimestamp()})
- : boost::none;
-
- return {makePlanCacheKeyInfo(query, collection),
- collection->uuid(),
- collectionVersion,
- computeNewestVisibleIndexTimestamp(opCtx, collection),
- keyShardingEpoch};
+ PlanCacheKeyTag<sbe::PlanCacheKey> tag) {
+ return plan_cache_key_factory::make(query, MultipleCollectionAccessor(collection));
}
} // namespace plan_cache_detail
+
+namespace plan_cache_key_factory {
+sbe::PlanCacheKey make(const CanonicalQuery& query, const MultipleCollectionAccessor& collections) {
+ OperationContext* opCtx = query.getOpCtx();
+ auto mainCollectionState = plan_cache_detail::computeCollectionState(
+ opCtx, collections.getMainCollection(), false /* isSecondaryColl */);
+ std::vector<sbe::PlanCacheKeyCollectionState> secondaryCollectionStates;
+ secondaryCollectionStates.reserve(collections.getSecondaryCollections().size());
+ // We always use the collection order saved in MultipleCollectionAccessor to populate the plan
+ // cache key, which is ordered by the secondary collection namespaces.
+ for (auto& [_, collection] : collections.getSecondaryCollections()) {
+ if (collection) {
+ secondaryCollectionStates.emplace_back(plan_cache_detail::computeCollectionState(
+ opCtx, collection, true /* isSecondaryColl */));
+ }
+ }
+
+ return {plan_cache_detail::makePlanCacheKeyInfo(query, collections.getMainCollection()),
+ std::move(mainCollectionState),
+ std::move(secondaryCollectionStates)};
+}
+} // namespace plan_cache_key_factory
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key_factory.h b/src/mongo/db/query/plan_cache_key_factory.h
index 8d811793211..663297093c7 100644
--- a/src/mongo/db/query/plan_cache_key_factory.h
+++ b/src/mongo/db/query/plan_cache_key_factory.h
@@ -52,14 +52,14 @@ template <typename KeyType>
struct PlanCacheKeyTag {};
/**
- * Creates a key for the classic plan cache from the canonical query and collection instances.
+ * Creates a key for the classic plan cache from the canonical query and a single collection.
*/
PlanCacheKey make(const CanonicalQuery& query,
const CollectionPtr& collection,
PlanCacheKeyTag<PlanCacheKey> tag);
/**
- * Creates a key for the SBE plan cache from the canonical query and collection instances.
+ * Similar to above, but for the SBE plan cache key.
*/
sbe::PlanCacheKey make(const CanonicalQuery& query,
const CollectionPtr& collection,
@@ -77,5 +77,12 @@ template <typename Key>
Key make(const CanonicalQuery& query, const CollectionPtr& collection) {
return plan_cache_detail::make(query, collection, plan_cache_detail::PlanCacheKeyTag<Key>{});
}
+
+/**
+ * Similar to above, a factory helper to make a SBE plan cache key, but used for agg queries that
+ * might involve multiple collections.
+ */
+sbe::PlanCacheKey make(const CanonicalQuery& query, const MultipleCollectionAccessor& collections);
+
} // namespace plan_cache_key_factory
} // namespace mongo
diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h
index bfbfbca8ec1..4edad47e067 100644
--- a/src/mongo/db/query/query_request_helper.h
+++ b/src/mongo/db/query/query_request_helper.h
@@ -40,14 +40,13 @@
namespace mongo {
-class QueryMessage;
class Status;
template <typename T>
class StatusWith;
/**
- * Parses the QueryMessage or find command received from the user and makes the various fields
- * more easily accessible.
+ * Parses the find command received from the user and makes the various fields more easily
+ * accessible.
*/
namespace query_request_helper {
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index 5f1b8f008d6..0ecd5ba50f5 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -53,10 +53,17 @@ CandidatePlans CachedSolutionPlanner::plan(
// If the cached plan is accepted we'd like to keep the results from the trials even if there
// are parts of agg pipelines being lowered into SBE, so we run the trial with the extended
- // plan. This works because TrialRunTracker, attached to HashAgg stage, tracks as "results" the
- // results of its child stage. Thus, we can use the number of reads the plan was cached with
- // during multiplanning even though multiplanning ran trials of pre-extended plans.
- if (!_cq.pipeline().empty()) {
+ // plan. This works because TrialRunTracker, attached to HashAgg stage in $group queries, tracks
+ // as "results" the results of its child stage. For $lookup queries, the TrialRunTracker will
+ // only track the number of reads from the local side. Thus, we can use the number of reads the
+ // plan was cached with during multiplanning even though multiplanning ran trials of
+ // pre-extended plans.
+ //
+ // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is integrated with
+ // SBE plan cache.
+ if (!_cq.pipeline().empty() &&
+ !(feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV() &&
+ canonical_query_encoder::canUseSbePlanCache(_cq))) {
_yieldPolicy->clearRegisteredPlans();
auto secondaryCollectionsInfo =
fillOutSecondaryCollectionsInformation(_opCtx, _collections, &_cq);
@@ -184,7 +191,7 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso
cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, mainColl));
if (feature_flags::gFeatureFlagSbePlanCache.isEnabledAndIgnoreFCV()) {
auto&& sbePlanCache = sbe::getPlanCache(_opCtx);
- sbePlanCache.deactivate(plan_cache_key_factory::make<sbe::PlanCacheKey>(_cq, mainColl));
+ sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections));
}
}
diff --git a/src/mongo/db/query/sbe_multi_planner.cpp b/src/mongo/db/query/sbe_multi_planner.cpp
index b9966e74683..c4ba4f7efad 100644
--- a/src/mongo/db/query/sbe_multi_planner.cpp
+++ b/src/mongo/db/query/sbe_multi_planner.cpp
@@ -130,13 +130,13 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans(
winner.root->open(false);
}
- // Writes a cache entry for the winning plan to the plan cache if possible.
- plan_cache_util::updatePlanCache(_opCtx,
- _collections.getMainCollection(),
- _cachingMode,
- _cq,
- std::move(decision),
- candidates);
+ // If there is a pushed down pipeline that cannot use SBE plan cache, then write a cache entry
+ // before extending the pipeline.
+ // TODO SERVER-61507: Remove this block once $group pushdown is integrated with SBE plan cache.
+ if (!canonical_query_encoder::canUseSbePlanCache(_cq)) {
+ plan_cache_util::updatePlanCache(
+ _opCtx, _collections, _cachingMode, _cq, std::move(decision), candidates);
+ }
// Extend the winning candidate with the agg pipeline and rebuild the execution tree. Because
// the trial was done with find-only part of the query, we cannot reuse the results. The
@@ -152,10 +152,16 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans(
// The winner might have been replanned. So, pass through the replanning reason to the new
// plan.
data.replanReason = std::move(winner.data.replanReason);
+
+ // We need to clone the plan here for the plan cache to use. The clone will be stored in the
+ // cache prior to preparation, whereas the original copy of the tree will be prepared and
+ // used to execute this query.
+ auto clonedPlan = std::make_pair(rootStage->clone(), stage_builder::PlanStageData(data));
stage_builder::prepareSlotBasedExecutableTree(
_opCtx, rootStage.get(), &data, _cq, _collections, _yieldPolicy);
candidates[winnerIdx] = sbe::plan_ranker::CandidatePlan{
std::move(solution), std::move(rootStage), std::move(data)};
+ candidates[winnerIdx].clonedPlan.emplace(std::move(clonedPlan));
candidates[winnerIdx].root->open(false);
if (_cq.getExplain()) {
@@ -173,6 +179,16 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans(
}
}
+ // If pipeline can use SBE plan cache or there is no pushed down pipeline, then write a cache
+ // entry after extending the pipeline.
+ // TODO SERVER-61507: Remove canUseSbePlanCache check once $group pushdown is
+ // integrated with SBE plan cache.
+ if (canonical_query_encoder::canUseSbePlanCache(_cq)) {
+ // Writes a cache entry for the winning plan to the plan cache if possible.
+ plan_cache_util::updatePlanCache(
+ _opCtx, _collections, _cachingMode, _cq, std::move(decision), candidates);
+ }
+
return {std::move(candidates), winnerIdx};
}
} // namespace mongo::sbe
diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp
index 0d7a90e9ed5..bbd6db6418a 100644
--- a/src/mongo/db/query/sbe_plan_cache.cpp
+++ b/src/mongo/db/query/sbe_plan_cache.cpp
@@ -160,8 +160,17 @@ void clearPlanCacheEntriesWith(ServiceContext* serviceCtx,
sbe::getPlanCache(serviceCtx)
.removeIf([&collectionUuid, collectionVersion](const PlanCacheKey& key,
const sbe::PlanCacheEntry& entry) {
- return key.getCollectionVersion() == collectionVersion &&
- key.getCollectionUuid() == collectionUuid;
+ if (key.getMainCollectionState().version == collectionVersion &&
+ key.getMainCollectionState().uuid == collectionUuid) {
+ return true;
+ }
+ for (auto& collectionState : key.getSecondaryCollectionStates()) {
+ if (collectionState.version == collectionVersion &&
+ collectionState.uuid == collectionUuid) {
+ return true;
+ }
+ }
+ return false;
});
LOGV2_DEBUG(6006600,
diff --git a/src/mongo/db/query/sbe_plan_cache.h b/src/mongo/db/query/sbe_plan_cache.h
index 6e7853fa817..b33488ade0f 100644
--- a/src/mongo/db/query/sbe_plan_cache.h
+++ b/src/mongo/db/query/sbe_plan_cache.h
@@ -56,35 +56,91 @@ struct PlanCacheKeyShardingEpoch {
Timestamp ts;
};
+struct PlanCacheKeyCollectionState {
+ bool operator==(const PlanCacheKeyCollectionState& other) const {
+ return other.uuid == uuid && other.version == version &&
+ other.newestVisibleIndexTimestamp == newestVisibleIndexTimestamp &&
+ other.shardVersion == shardVersion;
+ }
+
+ size_t hashCode() const {
+ size_t hash = UUID::Hash{}(uuid);
+ boost::hash_combine(hash, version);
+ if (newestVisibleIndexTimestamp) {
+ boost::hash_combine(hash, newestVisibleIndexTimestamp->asULL());
+ }
+ if (shardVersion) {
+ shardVersion->epoch.hash_combine(hash);
+ boost::hash_combine(hash, shardVersion->ts.asULL());
+ }
+ return hash;
+ }
+
+ UUID uuid;
+
+ // There is a special collection versioning scheme associated with the SBE plan cache. Whenever
+ // an action against a collection is made which should invalidate the plan cache entries for the
+ // collection -- in particular index builds and drops -- the version number is incremented.
+ // Readers specify the version number that they are reading at so that they only pick up cache
+ // entries with the right set of indexes.
+ //
+ // We also clean up all cache entries for a particular (collectionUuid, versionNumber) pair when
+ // all readers seeing this version of the collection have drained.
+ size_t version;
+
+ // The '_collectionVersion' is not currently sufficient in order to ensure that the indexes
+ // visible to the reader are consistent with the indexes present in the cache entry. The reason
+ // is that all readers see the latest copy-on-write version of the 'Collection' object, even
+ // though they are allowed to read at an older timestamp, potentially at a time before an index
+ // build completed.
+ //
+ // To solve this problem, we incorporate the timestamp of the newest index visible to the reader
+ // into the plan cache key. This ensures that the set of indexes visible to the reader match
+ // those present in the plan cache entry, preventing a situation where the plan cache entry
+ // reflects a newer version of the index catalog than the one visible to the reader.
+ //
+ // In the future, this could instead be solved with point-in-time catalog lookups.
+ boost::optional<Timestamp> newestVisibleIndexTimestamp;
+
+ // Ensures that a cached SBE plan cannot be reused if the collection has since become sharded or
+ // changed its shard key. The cached plan may no longer be valid after sharding or shard key
+ // refining since the structure of the plan depends on whether the collection is sharded, and if
+ // sharded depends on the shard key.
+ const boost::optional<PlanCacheKeyShardingEpoch> shardVersion;
+};
+
/**
* Represents the "key" used in the PlanCache mapping from query shape -> query plan.
*/
class PlanCacheKey {
public:
PlanCacheKey(PlanCacheKeyInfo&& info,
- UUID collectionUuid,
- size_t collectionVersion,
- boost::optional<Timestamp> newestVisibleIndexTimestamp,
- boost::optional<PlanCacheKeyShardingEpoch> shardVersion)
+ PlanCacheKeyCollectionState mainCollectionState,
+ std::vector<PlanCacheKeyCollectionState> secondaryCollectionStates)
: _info{std::move(info)},
- _collectionUuid{collectionUuid},
- _collectionVersion{collectionVersion},
- _newestVisibleIndexTimestamp{newestVisibleIndexTimestamp},
- _shardVersion{shardVersion} {}
+ _mainCollectionState{std::move(mainCollectionState)},
+ _secondaryCollectionStates{std::move(secondaryCollectionStates)} {
+ // For secondary collections, we don't encode shard version in the key since we don't shard
+ // version these collections. This is OK because we only push down $lookup queries to SBE
+ // when involved collections are unsharded.
+ for (const auto& collState : _secondaryCollectionStates) {
+ tassert(6443202,
+ "Secondary collections should not encode shard version in plan cache key",
+ collState.shardVersion == boost::none);
+ }
+ }
- const UUID& getCollectionUuid() const {
- return _collectionUuid;
+ const PlanCacheKeyCollectionState& getMainCollectionState() const {
+ return _mainCollectionState;
}
- size_t getCollectionVersion() const {
- return _collectionVersion;
+ const std::vector<PlanCacheKeyCollectionState>& getSecondaryCollectionStates() const {
+ return _secondaryCollectionStates;
}
bool operator==(const PlanCacheKey& other) const {
- return other._collectionVersion == _collectionVersion &&
- other._collectionUuid == _collectionUuid &&
- other._newestVisibleIndexTimestamp == _newestVisibleIndexTimestamp &&
- other._info == _info && other._shardVersion == _shardVersion;
+ return other._info == _info && other._mainCollectionState == _mainCollectionState &&
+ other._secondaryCollectionStates == _secondaryCollectionStates;
}
bool operator!=(const PlanCacheKey& other) const {
@@ -97,14 +153,9 @@ public:
uint32_t planCacheKeyHash() const {
size_t hash = _info.planCacheKeyHash();
- boost::hash_combine(hash, UUID::Hash{}(_collectionUuid));
- boost::hash_combine(hash, _collectionVersion);
- if (_newestVisibleIndexTimestamp) {
- boost::hash_combine(hash, _newestVisibleIndexTimestamp->asULL());
- }
- if (_shardVersion) {
- _shardVersion->epoch.hash_combine(hash);
- boost::hash_combine(hash, _shardVersion->ts.asULL());
+ boost::hash_combine(hash, _mainCollectionState.hashCode());
+ for (auto& collectionState : _secondaryCollectionStates) {
+ boost::hash_combine(hash, collectionState.hashCode());
}
return hash;
}
@@ -117,37 +168,12 @@ private:
// Contains the actual encoding of the query shape as well as the index discriminators.
const PlanCacheKeyInfo _info;
- const UUID _collectionUuid;
-
- // There is a special collection versioning scheme associated with the SBE plan cache. Whenever
- // an action against a collection is made which should invalidate the plan cache entries for the
- // collection -- in particular index builds and drops -- the version number is incremented.
- // Readers specify the version number that they are reading at so that they only pick up cache
- // entries with the right set of indexes.
- //
- // We also clean up all cache entries for a particular (collectionUuid, versionNumber) pair when
- // all readers seeing this version of the collection have drained.
- const size_t _collectionVersion;
-
- // The '_collectionVersion' is not currently sufficient in order to ensure that the indexes
- // visible to the reader are consistent with the indexes present in the cache entry. The reason
- // is that all readers see the latest copy-on-write version of the 'Collection' object, even
- // though they are allowed to read at an older timestamp, potentially at a time before an index
- // build completed.
- //
- // To solve this problem, we incorporate the timestamp of the newest index visible to the reader
- // into the plan cache key. This ensures that the set of indexes visible to the reader match
- // those present in the plan cache entry, preventing a situation where the plan cache entry
- // reflects a newer version of the index catalog than the one visible to the reader.
- //
- // In the future, this could instead be solved with point-in-time catalog lookups.
- const boost::optional<Timestamp> _newestVisibleIndexTimestamp;
+ const PlanCacheKeyCollectionState _mainCollectionState;
- // Ensures that a cached SBE plan cannot be reused if the collection has since become sharded or
- // changed its shard key. The cached plan may no longer be valid after sharding or shard key
- // refining since the structure of the plan depends on whether the collection is sharded, and if
- // sharded depends on the shard key.
- const boost::optional<PlanCacheKeyShardingEpoch> _shardVersion;
+ // To make sure the plan cache key matches, the secondary collection states need to be passed
+ // in a defined order. Currently, we use the collection order stored in
+ // MultipleCollectionAccessor, which is ordered by the collection namespaces.
+ const std::vector<PlanCacheKeyCollectionState> _secondaryCollectionStates;
};
class PlanCacheKeyHasher {
diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp
index e5e714ad3aa..c6ce37cb434 100644
--- a/src/mongo/db/query/sbe_sub_planner.cpp
+++ b/src/mongo/db/query/sbe_sub_planner.cpp
@@ -116,8 +116,9 @@ CandidatePlans SubPlanner::plan(
// TODO SERVER-61507: do it unconditionally when $group pushdown is integrated with the SBE plan
// cache.
- if (_cq.pipeline().empty()) {
- plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data);
+ if (canonical_query_encoder::canUseSbePlanCache(_cq)) {
+ plan_cache_util::updatePlanCache(
+ _opCtx, _collections, _cq, *compositeSolution, *root, data);
}
return {makeVector(plan_ranker::CandidatePlan{
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 7cae9ba2b01..4887982c95c 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/repl/apply_ops.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/client/client_deprecated.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_holder.h"
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index dd261037b08..e380fbe6238 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -317,38 +317,43 @@ BaseCloner::AfterStageBehavior CollectionCloner::setupIndexBuildersForUnfinished
}
void CollectionCloner::runQuery() {
- // Non-resumable query.
- client_deprecated::Query query;
+ FindCommandRequest findCmd{_sourceDbAndUuid};
if (_resumeToken) {
// Resume the query from where we left off.
LOGV2_DEBUG(21133, 1, "Collection cloner will resume the last successful query");
- query.requestResumeToken(true).resumeAfter(_resumeToken.get());
+ findCmd.setRequestResumeToken(true);
+ findCmd.setResumeAfter(_resumeToken.get());
} else {
// New attempt at a resumable query.
LOGV2_DEBUG(21134, 1, "Collection cloner will run a new query");
- query.requestResumeToken(true);
+ findCmd.setRequestResumeToken(true);
}
- query.hint(BSON("$natural" << 1));
+
+ findCmd.setHint(BSON("$natural" << 1));
+ findCmd.setNoCursorTimeout(true);
+ findCmd.setReadConcern(ReadConcernArgs::kLocal);
+ if (_collectionClonerBatchSize) {
+ findCmd.setBatchSize(_collectionClonerBatchSize);
+ }
+
+ ExhaustMode exhaustMode = collectionClonerUsesExhaust ? ExhaustMode::kOn : ExhaustMode::kOff;
// We reset this every time we retry or resume a query.
// We distinguish the first batch from the rest so that we only store the remote cursor id
// the first time we get it.
_firstBatchOfQueryRound = true;
- getClient()->query_DEPRECATED(
- [this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); },
- _sourceDbAndUuid,
- BSONObj{},
- query,
- nullptr /* fieldsToReturn */,
- QueryOption_NoCursorTimeout | QueryOption_SecondaryOk |
- (collectionClonerUsesExhaust ? QueryOption_Exhaust : 0),
- _collectionClonerBatchSize,
- ReadConcernArgs::kLocal);
+ auto cursor = getClient()->find(
+ std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred}, exhaustMode);
+
+ // Process the results of the cursor one batch at a time.
+ while (cursor->more()) {
+ handleNextBatch(*cursor);
+ }
}
-void CollectionCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
+void CollectionCloner::handleNextBatch(DBClientCursor& cursor) {
{
stdx::lock_guard<InitialSyncSharedData> lk(*getSharedData());
if (!getSharedData()->getStatus(lk).isOK()) {
@@ -370,15 +375,15 @@ void CollectionCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
if (_firstBatchOfQueryRound) {
// Store the cursorId of the remote cursor.
- _remoteCursorId = iter.getCursorId();
+ _remoteCursorId = cursor.getCursorId();
}
_firstBatchOfQueryRound = false;
{
stdx::lock_guard<Latch> lk(_mutex);
_stats.receivedBatches++;
- while (iter.moreInCurrentBatch()) {
- _documentsToInsert.emplace_back(iter.nextSafe());
+ while (cursor.moreInCurrentBatch()) {
+ _documentsToInsert.emplace_back(cursor.nextSafe());
}
}
@@ -394,7 +399,7 @@ void CollectionCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
}
// Store the resume token for this batch.
- _resumeToken = iter.getPostBatchResumeToken();
+ _resumeToken = cursor.getPostBatchResumeToken();
initialSyncHangCollectionClonerAfterHandlingBatchResponse.executeIf(
[&](const BSONObj&) {
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index 80d8a9d72bc..085c6abdb3f 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -207,10 +207,10 @@ private:
AfterStageBehavior setupIndexBuildersForUnfinishedIndexesStage();
/**
- * Put all results from a query batch into a buffer to be inserted, and schedule
- * it to be inserted.
+ * Put all results from a query batch into a buffer to be inserted, and schedule it to be
+ * inserted.
*/
- void handleNextBatch(DBClientCursorBatchIterator& iter);
+ void handleNextBatch(DBClientCursor& cursor);
/**
* Called whenever there is a new batch of documents ready from the DBClientConnection.
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index e9ca22da35c..575035711e0 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -623,8 +623,6 @@ void OplogApplierImpl::_deriveOpsAndFillWriterVectors(
LogicalSessionIdMap<std::vector<OplogEntry*>> partialTxnOps;
CachedCollectionProperties collPropertiesCache;
- // Used to serialize writes to the tenant migrations donor and recipient namespaces.
- boost::optional<uint32_t> tenantMigrationsWriterId;
for (auto&& op : *ops) {
// If the operation's optime is before or the same as the beginApplyingOpTime we don't want
// to apply it, so don't include it in writerVectors.
@@ -706,19 +704,6 @@ void OplogApplierImpl::_deriveOpsAndFillWriterVectors(
continue;
}
- // Writes to the tenant migration namespaces must be serialized to preserve the order of
- // migration and access blocker states.
- if (op.getNss() == NamespaceString::kTenantMigrationDonorsNamespace ||
- op.getNss() == NamespaceString::kTenantMigrationRecipientsNamespace) {
- auto writerId = OplogApplierUtils::addToWriterVector(
- opCtx, &op, writerVectors, &collPropertiesCache, tenantMigrationsWriterId);
- if (!tenantMigrationsWriterId) {
- tenantMigrationsWriterId.emplace(writerId);
- } else {
- invariant(writerId == *tenantMigrationsWriterId);
- }
- continue;
- }
OplogApplierUtils::addToWriterVector(opCtx, &op, writerVectors, &collPropertiesCache);
}
}
diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp
index 9485fc8ccc2..b734004bb28 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp
@@ -2644,42 +2644,6 @@ TEST_F(OplogApplierImplWithSlowAutoAdvancingClockTest, DoNotLogNonSlowOpApplicat
ASSERT_EQUALS(0, countTextFormatLogLinesContaining(expected.str()));
}
-TEST_F(OplogApplierImplTest, SerializeOplogApplicationOfWritesToTenantMigrationNamespaces) {
- auto writerPool = makeReplWriterPool();
- NoopOplogApplierObserver observer;
- TrackOpsAppliedApplier oplogApplier(
- nullptr, // executor
- nullptr, // oplogBuffer
- &observer,
- ReplicationCoordinator::get(_opCtx.get()),
- getConsistencyMarkers(),
- getStorageInterface(),
- repl::OplogApplier::Options(repl::OplogApplication::Mode::kSecondary),
- writerPool.get());
-
- const auto donorNss = NamespaceString::kTenantMigrationDonorsNamespace;
- const auto recipientNss = NamespaceString::kTenantMigrationRecipientsNamespace;
-
- std::vector<OplogEntry> opsToApply;
- opsToApply.push_back(
- makeDeleteDocumentOplogEntry({Timestamp(Seconds(2), 0), 1LL}, donorNss, BSON("_id" << 2)));
- opsToApply.push_back(makeInsertDocumentOplogEntry(
- {Timestamp(Seconds(3), 0), 1LL}, recipientNss, BSON("_id" << 3)));
- opsToApply.push_back(makeDeleteDocumentOplogEntry(
- {Timestamp(Seconds(4), 0), 1LL}, recipientNss, BSON("_id" << 3)));
- opsToApply.push_back(
- makeInsertDocumentOplogEntry({Timestamp(Seconds(5), 0), 1LL}, donorNss, BSON("_id" << 4)));
-
- ASSERT_OK(oplogApplier.applyOplogBatch(_opCtx.get(), opsToApply));
- const auto applied = oplogApplier.getOperationsApplied();
- ASSERT_EQ(4U, applied.size());
- ASSERT_BSONOBJ_EQ(opsToApply[0].getEntry().toBSON(), applied[0].getEntry().toBSON());
- ASSERT_BSONOBJ_EQ(opsToApply[1].getEntry().toBSON(), applied[1].getEntry().toBSON());
- ASSERT_BSONOBJ_EQ(opsToApply[2].getEntry().toBSON(), applied[2].getEntry().toBSON());
- ASSERT_BSONOBJ_EQ(opsToApply[3].getEntry().toBSON(), applied[3].getEntry().toBSON());
-}
-
-
class OplogApplierImplTxnTableTest : public OplogApplierImplTest {
public:
void setUp() override {
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index ea7e73a4033..6ec6c9778de 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -265,12 +265,8 @@ OpTime OplogFetcher::getLastOpTimeFetched_forTest() const {
return _getLastOpTimeFetched();
}
-BSONObj OplogFetcher::getFindQueryFilter_forTest() const {
- return _makeFindQueryFilter();
-}
-
-client_deprecated::Query OplogFetcher::getFindQuerySettings_forTest(long long findTimeout) const {
- return _makeFindQuerySettings(findTimeout);
+FindCommandRequest OplogFetcher::makeFindCmdRequest_forTest(long long findTimeout) const {
+ return _makeFindCmdRequest(findTimeout);
}
Milliseconds OplogFetcher::getAwaitDataTimeout_forTest() const {
@@ -584,46 +580,56 @@ AggregateCommandRequest OplogFetcher::_makeAggregateCommandRequest(long long max
return aggRequest;
}
-BSONObj OplogFetcher::_makeFindQueryFilter() const {
- BSONObjBuilder queryBob;
-
- auto lastOpTimeFetched = _getLastOpTimeFetched();
- BSONObjBuilder filterBob;
- filterBob.append("ts", BSON("$gte" << lastOpTimeFetched.getTimestamp()));
- // Handle caller-provided filter.
- if (!_config.queryFilter.isEmpty()) {
- filterBob.append(
- "$or",
- BSON_ARRAY(_config.queryFilter << BSON("ts" << lastOpTimeFetched.getTimestamp())));
+FindCommandRequest OplogFetcher::_makeFindCmdRequest(long long findTimeout) const {
+ FindCommandRequest findCmd{_nss};
+
+ // Construct the find command's filter and set it on the 'FindCommandRequest'.
+ {
+ BSONObjBuilder queryBob;
+
+ auto lastOpTimeFetched = _getLastOpTimeFetched();
+ BSONObjBuilder filterBob;
+ filterBob.append("ts", BSON("$gte" << lastOpTimeFetched.getTimestamp()));
+ // Handle caller-provided filter.
+ if (!_config.queryFilter.isEmpty()) {
+ filterBob.append(
+ "$or",
+ BSON_ARRAY(_config.queryFilter << BSON("ts" << lastOpTimeFetched.getTimestamp())));
+ }
+ findCmd.setFilter(filterBob.obj());
+ }
+
+ findCmd.setTailable(true);
+ findCmd.setAwaitData(true);
+ findCmd.setMaxTimeMS(findTimeout);
+
+ if (_config.batchSize) {
+ findCmd.setBatchSize(_config.batchSize);
}
- return filterBob.obj();
-}
-client_deprecated::Query OplogFetcher::_makeFindQuerySettings(long long findTimeout) const {
- auto query = client_deprecated::Query().maxTimeMS(findTimeout);
if (_config.requestResumeToken) {
- query.hint(BSON("$natural" << 1)).requestResumeToken(true);
+ findCmd.setHint(BSON("$natural" << 1));
+ findCmd.setRequestResumeToken(true);
}
auto lastCommittedWithCurrentTerm =
_dataReplicatorExternalState->getCurrentTermAndLastCommittedOpTime();
auto term = lastCommittedWithCurrentTerm.value;
if (term != OpTime::kUninitializedTerm) {
- query.term(term);
+ findCmd.setTerm(term);
}
if (_config.queryReadConcern.isEmpty()) {
// This ensures that the sync source waits for all earlier oplog writes to be visible.
// Since Timestamp(0, 0) isn't allowed, Timestamp(0, 1) is the minimal we can use.
- query.readConcern(BSON("level"
- << "local"
- << "afterClusterTime" << Timestamp(0, 1)));
+ findCmd.setReadConcern(BSON("level"
+ << "local"
+ << "afterClusterTime" << Timestamp(0, 1)));
} else {
// Caller-provided read concern.
- query.appendElements(_config.queryReadConcern.toBSON());
+ findCmd.setReadConcern(_config.queryReadConcern.toBSONInner());
}
-
- return query;
+ return findCmd;
}
Status OplogFetcher::_createNewCursor(bool initialFind) {
@@ -651,17 +657,9 @@ Status OplogFetcher::_createNewCursor(bool initialFind) {
}
_cursor = std::move(ret.getValue());
} else {
+ auto findCmd = _makeFindCmdRequest(maxTimeMs);
_cursor = std::make_unique<DBClientCursor>(
- _conn.get(),
- _nss,
- _makeFindQueryFilter(),
- _makeFindQuerySettings(maxTimeMs),
- 0 /* limit */,
- 0 /* nToSkip */,
- nullptr /* fieldsToReturn */,
- QueryOption_CursorTailable | QueryOption_AwaitData |
- (oplogFetcherUsesExhaust ? QueryOption_Exhaust : 0),
- _config.batchSize);
+ _conn.get(), std::move(findCmd), ReadPreferenceSetting{}, oplogFetcherUsesExhaust);
}
_firstBatch = true;
diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h
index 07cdf982b38..2147eb9ebde 100644
--- a/src/mongo/db/repl/oplog_fetcher.h
+++ b/src/mongo/db/repl/oplog_fetcher.h
@@ -275,8 +275,7 @@ public:
/**
* Returns the `find` query run on the sync source's oplog.
*/
- BSONObj getFindQueryFilter_forTest() const;
- client_deprecated::Query getFindQuerySettings_forTest(long long findTimeout) const;
+ FindCommandRequest makeFindCmdRequest_forTest(long long findTimeout) const;
/**
* Returns the OpTime of the last oplog entry fetched and processed.
@@ -387,11 +386,9 @@ private:
/**
* This function will create the `find` query to issue to the sync source. It is provided with
- * whether this is the initial attempt to create the `find` query to determine what the find
- * timeout should be.
+ * the value to use as the "maxTimeMS" for the find command.
*/
- BSONObj _makeFindQueryFilter() const;
- client_deprecated::Query _makeFindQuerySettings(long long findTimeout) const;
+ FindCommandRequest _makeFindCmdRequest(long long findTimeout) const;
/**
* Gets the next batch from the exhaust cursor.
diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp
index e98039a0f8a..adc09da1300 100644
--- a/src/mongo/db/repl/oplog_fetcher_test.cpp
+++ b/src/mongo/db/repl/oplog_fetcher_test.cpp
@@ -806,19 +806,25 @@ TEST_F(OplogFetcherTest,
auto oplogFetcher = makeOplogFetcher();
auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getInitialFindMaxTime_forTest());
- auto filter = oplogFetcher->getFindQueryFilter_forTest();
+ auto findCmdRequest = oplogFetcher->makeFindCmdRequest_forTest(findTimeout);
+
+ auto filter = findCmdRequest.getFilter();
ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter);
- auto queryObj =
- (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated();
- ASSERT_EQUALS(60000, queryObj.getIntField("$maxTimeMS"));
+ auto maxTimeMS = findCmdRequest.getMaxTimeMS();
+ ASSERT(maxTimeMS);
+ ASSERT_EQUALS(60000, *maxTimeMS);
- ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type());
+ auto readConcern = findCmdRequest.getReadConcern();
+ ASSERT(readConcern);
ASSERT_BSONOBJ_EQ(BSON("level"
<< "local"
<< "afterClusterTime" << Timestamp(0, 1)),
- queryObj["readConcern"].Obj());
- ASSERT_EQUALS(dataReplicatorExternalState->currentTerm, queryObj["term"].numberLong());
+ *readConcern);
+
+ auto term = findCmdRequest.getTerm();
+ ASSERT(term);
+ ASSERT_EQUALS(dataReplicatorExternalState->currentTerm, *term);
}
TEST_F(OplogFetcherTest,
@@ -826,21 +832,26 @@ TEST_F(OplogFetcherTest,
dataReplicatorExternalState->currentTerm = OpTime::kUninitializedTerm;
auto oplogFetcher = makeOplogFetcher();
- auto filter = oplogFetcher->getFindQueryFilter_forTest();
- ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter);
-
// Test that the correct maxTimeMS is set if we are retrying the 'find' query.
auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getRetriedFindMaxTime_forTest());
- auto queryObj =
- (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated();
- ASSERT_EQUALS(2000, queryObj.getIntField("$maxTimeMS"));
+ auto findCmdRequest = oplogFetcher->makeFindCmdRequest_forTest(findTimeout);
- ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type());
+ auto filter = findCmdRequest.getFilter();
+ ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter);
+
+ auto maxTimeMS = findCmdRequest.getMaxTimeMS();
+ ASSERT(maxTimeMS);
+ ASSERT_EQUALS(2000, *maxTimeMS);
+
+ auto readConcern = findCmdRequest.getReadConcern();
+ ASSERT(readConcern);
ASSERT_BSONOBJ_EQ(BSON("level"
<< "local"
<< "afterClusterTime" << Timestamp(0, 1)),
- queryObj["readConcern"].Obj());
- ASSERT_FALSE(queryObj.hasField("term"));
+ *readConcern);
+
+ auto term = findCmdRequest.getTerm();
+ ASSERT(!term);
}
TEST_F(
diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp
index 0635903d48d..165538954bd 100644
--- a/src/mongo/db/repl/tenant_collection_cloner.cpp
+++ b/src/mongo/db/repl/tenant_collection_cloner.cpp
@@ -474,35 +474,42 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::queryStage() {
}
void TenantCollectionCloner::runQuery() {
- const BSONObj& filter = _lastDocId.isEmpty()
- ? BSONObj{} // Use $expr and the aggregation version of $gt to avoid type bracketing.
- : BSON("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"])));
-
- auto query = _collectionOptions.clusteredIndex
- // RecordIds are _id values and has no separate _id index
- ? client_deprecated::Query().hint(BSON("$natural" << 1))
- : client_deprecated::Query().hint(BSON("_id" << 1));
-
- // Any errors that are thrown here (including NamespaceNotFound) will be handled on the stage
- // level.
- getClient()->query_DEPRECATED(
- [this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); },
- _sourceDbAndUuid,
- filter,
- query,
- nullptr /* fieldsToReturn */,
- QueryOption_NoCursorTimeout | QueryOption_SecondaryOk |
- (collectionClonerUsesExhaust ? QueryOption_Exhaust : 0),
- _collectionClonerBatchSize,
- ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ FindCommandRequest findCmd{_sourceDbAndUuid};
+
+ findCmd.setFilter(
+ _lastDocId.isEmpty()
+ ? BSONObj{} // Use $expr and the aggregation version of $gt to avoid type bracketing.
+ : BSON("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"]))));
+
+ if (_collectionOptions.clusteredIndex) {
+ findCmd.setHint(BSON("$natural" << 1));
+ } else {
+ findCmd.setHint(BSON("_id" << 1));
+ }
+
+ findCmd.setNoCursorTimeout(true);
+ findCmd.setReadConcern(ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ if (_collectionClonerBatchSize) {
+ findCmd.setBatchSize(_collectionClonerBatchSize);
+ }
+
+ ExhaustMode exhaustMode = collectionClonerUsesExhaust ? ExhaustMode::kOn : ExhaustMode::kOff;
+
+ auto cursor = getClient()->find(
+ std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred}, exhaustMode);
+
+ // Process the results of the cursor one batch at a time.
+ while (cursor->more()) {
+ handleNextBatch(*cursor);
+ }
}
-void TenantCollectionCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
+void TenantCollectionCloner::handleNextBatch(DBClientCursor& cursor) {
{
stdx::lock_guard<Latch> lk(_mutex);
_stats.receivedBatches++;
- while (iter.moreInCurrentBatch()) {
- _documentsToInsert.emplace_back(iter.nextSafe());
+ while (cursor.moreInCurrentBatch()) {
+ _documentsToInsert.emplace_back(cursor.nextSafe());
}
}
diff --git a/src/mongo/db/repl/tenant_collection_cloner.h b/src/mongo/db/repl/tenant_collection_cloner.h
index b9c22928917..12bd9bbb832 100644
--- a/src/mongo/db/repl/tenant_collection_cloner.h
+++ b/src/mongo/db/repl/tenant_collection_cloner.h
@@ -209,10 +209,10 @@ private:
AfterStageBehavior queryStage();
/**
- * Put all results from a query batch into a buffer to be inserted, and schedule
- * it to be inserted.
+ * Put all results from a query batch into a buffer to be inserted, and schedule it to be
+ * inserted.
*/
- void handleNextBatch(DBClientCursorBatchIterator& iter);
+ void handleNextBatch(DBClientCursor& cursor);
/**
* Called whenever there is a new batch of documents ready from the DBClientConnection.
diff --git a/src/mongo/db/repl/tenant_file_cloner.cpp b/src/mongo/db/repl/tenant_file_cloner.cpp
index 83ae3c65fc8..b909039eed1 100644
--- a/src/mongo/db/repl/tenant_file_cloner.cpp
+++ b/src/mongo/db/repl/tenant_file_cloner.cpp
@@ -188,8 +188,7 @@ void TenantFileCloner::runQuery() {
getClient(), std::move(aggRequest), true /* secondaryOk */, useExhaust));
try {
while (cursor->more()) {
- DBClientCursorBatchIterator iter(*cursor);
- handleNextBatch(iter);
+ handleNextBatch(*cursor);
}
} catch (const DBException& e) {
// We cannot continue after an error when processing exhaust cursors. Instead we must
@@ -207,7 +206,7 @@ void TenantFileCloner::runQuery() {
}
}
-void TenantFileCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
+void TenantFileCloner::handleNextBatch(DBClientCursor& cursor) {
LOGV2_DEBUG(6113307,
3,
"TenantFileCloner handleNextBatch",
@@ -215,7 +214,7 @@ void TenantFileCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
"backupId"_attr = _backupId,
"remoteFile"_attr = _remoteFileName,
"fileOffset"_attr = getFileOffset(),
- "moreInCurrentBatch"_attr = iter.moreInCurrentBatch());
+ "moreInCurrentBatch"_attr = cursor.moreInCurrentBatch());
{
stdx::lock_guard<TenantMigrationSharedData> lk(*getSharedData());
if (!getSharedData()->getStatus(lk).isOK()) {
@@ -225,11 +224,11 @@ void TenantFileCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
str::stream() << message << ": " << getSharedData()->getStatus(lk));
}
}
- while (iter.moreInCurrentBatch()) {
+ while (cursor.moreInCurrentBatch()) {
stdx::lock_guard<Latch> lk(_mutex);
_stats.receivedBatches++;
- while (iter.moreInCurrentBatch()) {
- _dataToWrite.emplace_back(iter.nextSafe());
+ while (cursor.moreInCurrentBatch()) {
+ _dataToWrite.emplace_back(cursor.nextSafe());
}
}
diff --git a/src/mongo/db/repl/tenant_file_cloner.h b/src/mongo/db/repl/tenant_file_cloner.h
index 90e37946224..27ff89fbc3a 100644
--- a/src/mongo/db/repl/tenant_file_cloner.h
+++ b/src/mongo/db/repl/tenant_file_cloner.h
@@ -160,7 +160,7 @@ private:
/**
* Put all results from a query batch into a buffer, and schedule it to be written to disk.
*/
- void handleNextBatch(DBClientCursorBatchIterator& iter);
+ void handleNextBatch(DBClientCursor& cursor);
/**
* Called whenever there is a new batch of documents ready from the DBClientConnection.
diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
index 79fa28313f4..26f8d75ba3c 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp
@@ -288,11 +288,11 @@ void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx,
if (nss == NamespaceString::kTenantMigrationRecipientsNamespace &&
!tenant_migration_access_blocker::inRecoveryMode(opCtx)) {
if (tenantIdToDeleteDecoration(opCtx)) {
+ auto tenantId = tenantIdToDeleteDecoration(opCtx).get();
LOGV2_INFO(8423337, "Removing expired 'multitenant migration' migration");
- opCtx->recoveryUnit()->onCommit([opCtx](boost::optional<Timestamp>) {
+ opCtx->recoveryUnit()->onCommit([opCtx, tenantId](boost::optional<Timestamp>) {
TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext())
- .remove(tenantIdToDeleteDecoration(opCtx).get(),
- TenantMigrationAccessBlocker::BlockerType::kRecipient);
+ .remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient);
});
}
@@ -302,8 +302,7 @@ void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx,
"Removing expired 'shard merge' migration",
"migrationId"_attr = migrationId);
TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext())
- .removeRecipientAccessBlockersForMigration(
- migrationIdToDeleteDecoration(opCtx).get());
+ .removeRecipientAccessBlockersForMigration(migrationId);
repl::TenantFileImporterService::get(opCtx->getServiceContext())->reset(migrationId);
}
}
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 072247f2de5..96f4e84813a 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -55,6 +55,7 @@ env.Library(
'collection_critical_section_document.idl',
'collection_sharding_runtime.cpp',
'collection_sharding_state_factory_shard.cpp',
+ 'commit_chunk_migration.idl',
'config_server_op_observer.cpp',
'global_index_metrics.cpp',
'metadata_manager.cpp',
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index 9381e0a2da6..aa7b056ae34 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/oid.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balance_stats.h"
@@ -79,7 +77,7 @@ private:
const Timestamp _timestamp{Timestamp(1, 1)};
const ShardId _shardPrimary{"dummyShardPrimary"};
const DatabaseVersion _dbVersion{UUID::gen(), _timestamp};
- ChunkVersion _nextVersion{1, 0, _epoch, _timestamp};
+ ChunkVersion _nextVersion{{_epoch, _timestamp}, {1, 0}};
};
TEST_F(BalanceStatsTest, SingleChunkNoZones) {
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 9710eae64c2..fc2c42a59c1 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -293,11 +293,11 @@ void Balancer::initiateBalancer(OperationContext* opCtx) {
void Balancer::interruptBalancer() {
stdx::lock_guard<Latch> scopedLock(_mutex);
- if (_state != kRunning)
+ if (_state != kRunning) {
return;
+ }
_state = kStopping;
- _thread.detach();
// Interrupt the balancer thread if it has been started. We are guaranteed that the operation
// context of that thread is still alive, because we hold the balancer mutex.
@@ -312,8 +312,10 @@ void Balancer::interruptBalancer() {
void Balancer::waitForBalancerToStop() {
stdx::unique_lock<Latch> scopedLock(_mutex);
-
_joinCond.wait(scopedLock, [this] { return _state == kStopped; });
+ if (_thread.joinable()) {
+ _thread.join();
+ }
}
void Balancer::joinCurrentRound(OperationContext* opCtx) {
@@ -612,12 +614,12 @@ void Balancer::_consumeActionStreamLoop() {
void Balancer::_mainThread() {
ON_BLOCK_EXIT([this] {
- stdx::lock_guard<Latch> scopedLock(_mutex);
-
- _state = kStopped;
+ {
+ stdx::lock_guard<Latch> scopedLock(_mutex);
+ _state = kStopped;
+ LOGV2_DEBUG(21855, 1, "Balancer thread terminated");
+ }
_joinCond.notify_all();
-
- LOGV2_DEBUG(21855, 1, "Balancer thread terminated");
});
Client::initThread("Balancer");
@@ -985,15 +987,6 @@ int Balancer::_moveChunks(OperationContext* opCtx,
return coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes());
}();
- if (serverGlobalParams.featureCompatibility.isLessThan(
- multiversion::FeatureCompatibilityVersion::kVersion_6_0)) {
- // TODO SERVER-65322 only use `moveRange` once v6.0 branches out
- MoveChunkSettings settings(maxChunkSizeBytes,
- balancerConfig->getSecondaryThrottle(),
- balancerConfig->waitForDelete());
- return _commandScheduler->requestMoveChunk(opCtx, migrateInfo, settings);
- }
-
MoveRangeRequestBase requestBase(migrateInfo.to);
requestBase.setWaitForDelete(balancerConfig->waitForDelete());
requestBase.setMin(migrateInfo.minKey);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index bf22d67619e..8b50d3d002f 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/commands.h"
#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/db/s/balancer/cluster_statistics_impl.h"
@@ -133,7 +131,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -192,7 +190,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -251,7 +249,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
@@ -302,7 +300,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index 678e5f63f9f..72e86413aa9 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h"
@@ -65,7 +63,7 @@ public:
chunk.setMax(BSON("x" << min + 10));
chunk.setJumbo(false);
chunk.setShard(shardId);
- chunk.setVersion(ChunkVersion(1, 1, OID::gen(), Timestamp(10)));
+ chunk.setVersion(ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}));
return chunk;
}
@@ -76,7 +74,7 @@ public:
kUuid,
BSON("x" << min),
BSON("x" << min + 10),
- ChunkVersion(1, 1, OID::gen(), Timestamp(10)),
+ ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}),
MoveChunkRequest::ForceJumbo::kDoNotForce);
}
@@ -234,7 +232,7 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulMergeChunkCommand) {
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse =
_scheduler.requestMergeChunks(operationContext(), kNss, kShardId0, range, version);
ASSERT_OK(futureResponse.getNoThrow());
@@ -246,7 +244,7 @@ TEST_F(BalancerCommandsSchedulerTest, MergeChunkNonexistentShard) {
auto remoteResponsesFuture = setRemoteResponses();
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse = _scheduler.requestMergeChunks(
operationContext(), kNss, ShardId("nonexistent"), range, version);
auto shardNotFoundError = Status{ErrorCodes::ShardNotFound, "Shard nonexistent not found"};
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
index 607e57dab44..94b6e874cbf 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/s/balancer/cluster_chunks_resize_policy_impl.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
+
namespace mongo {
namespace {
@@ -37,7 +38,7 @@ class ClusterChunksResizePolicyTest : public ConfigServerTestFixture {
protected:
const NamespaceString kNss{"testDb.testColl"};
const UUID kUuid = UUID::gen();
- const ChunkVersion kCollectionVersion = ChunkVersion(1, 1, OID::gen(), Timestamp(10));
+ const ChunkVersion kCollectionVersion = ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1});
const ShardId kShardId0 = ShardId("shard0");
const ShardId kShardId1 = ShardId("shard1");
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index f605983fe2c..610e150c963 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -27,12 +27,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/jsobj.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/s/catalog/type_chunk.h"
-
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -48,7 +45,7 @@ const ShardId kToShard("shard0001");
const bool kWaitForDelete{true};
TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -68,7 +65,7 @@ TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
}
TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
const auto secondaryThrottle =
MigrationSecondaryThrottleOptions::createWithWriteConcern(WriteConcernOptions(
"majority", WriteConcernOptions::SyncMode::JOURNAL, Milliseconds(60000)));
@@ -94,7 +91,7 @@ TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::min(), kMin);
@@ -111,7 +108,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
}
TEST(MigrationTypeTest, MissingRequiredMinField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -128,7 +125,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
}
TEST(MigrationTypeTest, MissingRequiredMaxField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -145,7 +142,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
}
TEST(MigrationTypeTest, MissingRequiredFromShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -162,7 +159,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
}
TEST(MigrationTypeTest, MissingRequiredToShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index e2e3081b436..74dc6a9e655 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
@@ -79,7 +77,7 @@ protected:
boost::none,
true,
[&] {
- ChunkVersion version(1, 0, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunk1(uuid,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c9ed5d77272..4084fe8e9e2 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/base/status.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/s/collection_metadata.h"
@@ -62,7 +60,7 @@ CollectionMetadata makeCollectionMetadataImpl(
std::vector<ChunkType> allChunks;
auto nextMinKey = shardKeyPattern.globalMin();
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
for (const auto& myNextChunk : thisShardsChunks) {
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
index ecd23800b69..50e92b41571 100644
--- a/src/mongo/db/s/collmod_coordinator.cpp
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -77,7 +77,7 @@ bool hasTimeSeriesGranularityUpdate(const CollModRequest& request) {
CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState),
+ : RecoverableShardingDDLCoordinator(service, "CollModCoordinator", initialState),
_request{_doc.getCollModRequest()} {}
void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
@@ -93,31 +93,9 @@ void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq));
}
-boost::optional<BSONObj> CollModCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- cmdBob.appendElements(_request.toBSON());
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "CollModCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
+void CollModCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
+};
void CollModCoordinator::_performNoopRetryableWriteOnParticipants(
OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor) {
@@ -208,9 +186,10 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
_saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
if (_collInfo->isSharded) {
- _doc.setCollUUID(
- sharding_ddl_util::getCollectionUUID(opCtx, nss(), true /* allowViews */));
- sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollUUID());
+ _doc.setCollUUID(sharding_ddl_util::getCollectionUUID(
+ opCtx, _collInfo->nsForTargeting, true /* allowViews */));
+ sharding_ddl_util::stopMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
}
_saveShardingInfoOnCoordinatorIfNecessary(opCtx);
@@ -309,7 +288,8 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
CommandHelpers::appendSimpleCommandStatus(builder, ok, errmsg);
}
_result = builder.obj();
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
+ sharding_ddl_util::resumeMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
} else {
CollMod cmd(nss());
cmd.setCollModRequest(_request);
@@ -344,7 +324,8 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
+ sharding_ddl_util::resumeMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
}
}
return status;
diff --git a/src/mongo/db/s/collmod_coordinator.h b/src/mongo/db/s/collmod_coordinator.h
index 8c51b62e8ce..4b65502f78d 100644
--- a/src/mongo/db/s/collmod_coordinator.h
+++ b/src/mongo/db/s/collmod_coordinator.h
@@ -46,9 +46,7 @@ public:
void checkIfOptionsConflict(const BSONObj& doc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
diff --git a/src/mongo/db/s/commit_chunk_migration.idl b/src/mongo/db/s/commit_chunk_migration.idl
new file mode 100644
index 00000000000..6484623cd5c
--- /dev/null
+++ b/src/mongo/db/s/commit_chunk_migration.idl
@@ -0,0 +1,85 @@
+
+ # Copyright (C) 2019-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+ - "mongo/s/sharding_types.idl"
+ - "mongo/s/chunk_version.idl"
+
+structs:
+ ConfigSvrCommitChunkMigrationResponse:
+ description: "Response of the _configsvrCommitChunkMigration command."
+ strict: false
+ fields:
+ shardVersion:
+ type: ChunkVersion
+ description: "Collection version at the end of the migration."
+
+ MigratedChunkType:
+ description: "ChunkType describing a migrated chunk"
+ strict: false
+ fields:
+ lastmod : ChunkVersion
+ min: object
+ max: object
+
+commands:
+ _configsvrCommitChunkMigration:
+ command_name: _configsvrCommitChunkMigration
+ cpp_name: CommitChunkMigrationRequest
+ description: "internal _configsvrCommitChunkMigration command for config server"
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ strict: false
+ reply_type: ConfigSvrCommitChunkMigrationResponse
+ fields:
+ fromShard:
+ type: shard_id
+ description: "from shard name"
+
+ toShard:
+ type: shard_id
+ description: "to shard name"
+
+ migratedChunk:
+ type: MigratedChunkType
+ description: "ChunkType describing a migrated chunk"
+
+ fromShardCollectionVersion:
+ type: ChunkVersion
+ description: "{ shardVersionField: <version> }"
+
+ validAfter:
+ type: timestamp
+ description: "The time after which this chunk is at the new shard" \ No newline at end of file
diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
index fca007b684c..69c67d89dcb 100644
--- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
+++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
@@ -187,43 +187,16 @@ void doDropOperation(const CompactStructuredEncryptionDataState& state) {
boost::optional<BSONObj> CompactStructuredEncryptionDataCoordinator::reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode connMode,
MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder bob;
-
- CompactStructuredEncryptionDataPhaseEnum currPhase;
- std::string nss;
- std::string escNss;
- std::string eccNss;
- std::string ecoNss;
- std::string ecocNss;
- std::string ecocRenameUuid;
- std::string ecocUiid;
- std::string ecocRenameNss;
- {
- stdx::lock_guard l{_docMutex};
- currPhase = _doc.getPhase();
- nss = _doc.getId().getNss().ns();
- escNss = _doc.getEscNss().ns();
- eccNss = _doc.getEccNss().ns();
- ecoNss = _doc.getEcocNss().ns();
- ecocNss = _doc.getEcocNss().ns();
- ecocRenameUuid =
- _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none";
- ecocUiid = _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none";
- ecocRenameNss = _doc.getEcocRenameNss().ns();
- }
-
- bob.append("type", "op");
- bob.append("desc", "CompactStructuredEncryptionDataCoordinator");
- bob.append("op", "command");
- bob.append("nss", nss);
- bob.append("escNss", escNss);
- bob.append("eccNss", eccNss);
- bob.append("ecocNss", ecocNss);
- bob.append("ecocUuid", ecocUiid);
- bob.append("ecocRenameNss", ecocRenameNss);
- bob.append("ecocRenameUuid", ecocRenameUuid);
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
+ auto bob = basicReportBuilder();
+
+ stdx::lock_guard lg{_docMutex};
+ bob.append("escNss", _doc.getEscNss().ns());
+ bob.append("eccNss", _doc.getEccNss().ns());
+ bob.append("ecocNss", _doc.getEcocNss().ns());
+ bob.append("ecocUuid", _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none");
+ bob.append("ecocRenameNss", _doc.getEcocRenameNss().ns());
+ bob.append("ecocRenameUuid",
+ _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none");
return bob.obj();
}
diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
index b441ae3c156..b030e19910a 100644
--- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
+++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
@@ -50,7 +50,8 @@ public:
CompactStructuredEncryptionDataCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& doc)
- : RecoverableShardingDDLCoordinator(service, doc) {}
+ : RecoverableShardingDDLCoordinator(
+ service, "CompactStructuredEncryptionDataCoordinator", doc) {}
boost::optional<BSONObj> reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode connMode,
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 9dcff9c96d0..a50f499662f 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -31,12 +31,14 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/chunk_move_write_concern_options.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -44,7 +46,6 @@
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -79,9 +80,23 @@ namespace {
* }
*
*/
-class ConfigSvrCommitChunkMigrationCommand : public BasicCommand {
+
+
+ChunkType toChunkType(const MigratedChunkType& migratedChunk) {
+
+ ChunkType chunk;
+ chunk.setMin(migratedChunk.getMin());
+ chunk.setMax(migratedChunk.getMax());
+ chunk.setVersion(migratedChunk.getLastmod());
+ return chunk;
+}
+
+
+class ConfigSvrCommitChunkMigrationCommand
+ : public TypedCommand<ConfigSvrCommitChunkMigrationCommand> {
public:
- ConfigSvrCommitChunkMigrationCommand() : BasicCommand("_configsvrCommitChunkMigration") {}
+ using Request = CommitChunkMigrationRequest;
+ using Response = ConfigSvrCommitChunkMigrationResponse;
bool skipApiVersionCheck() const override {
// Internal command (server to server).
@@ -100,51 +115,57 @@ public:
return true;
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
+ class Invocation : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ ConfigSvrCommitChunkMigrationResponse typedRun(OperationContext* opCtx) {
+
+ uassert(ErrorCodes::IllegalOperation,
+ "_configsvrClearJumboFlag can only be run on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+
+ // Set the operation context read concern level to local for reads into the config
+ // database.
+ repl::ReadConcernArgs::get(opCtx) =
+ repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
+
+ const NamespaceString nss = ns();
+ auto migratedChunk = toChunkType(request().getMigratedChunk());
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ StatusWith<BSONObj> chunkVersionResponse =
+ ShardingCatalogManager::get(opCtx)->commitChunkMigration(
+ opCtx,
+ nss,
+ migratedChunk,
+ request().getFromShardCollectionVersion().epoch(),
+ request().getFromShardCollectionVersion().getTimestamp(),
+ request().getFromShard(),
+ request().getToShard(),
+ request().getValidAfter());
+
+ auto chunkVersionObj = uassertStatusOK(chunkVersionResponse);
+
+ return Response{ChunkVersion::parse(chunkVersionObj[ChunkVersion::kShardVersionField])};
}
- return Status::OK();
- }
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
+ private:
+ bool supportsWriteConcern() const override {
+ return true;
+ }
- bool run(OperationContext* opCtx,
- const std::string& dbName,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) override {
-
- // Set the operation context read concern level to local for reads into the config database.
- repl::ReadConcernArgs::get(opCtx) =
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
-
- const NamespaceString nss = NamespaceString(parseNs(dbName, cmdObj));
-
- auto commitRequest =
- uassertStatusOK(CommitChunkMigrationRequest::createFromCommand(nss, cmdObj));
-
- StatusWith<BSONObj> response = ShardingCatalogManager::get(opCtx)->commitChunkMigration(
- opCtx,
- nss,
- commitRequest.getMigratedChunk(),
- commitRequest.getCollectionEpoch(),
- commitRequest.getCollectionTimestamp(),
- commitRequest.getFromShard(),
- commitRequest.getToShard(),
- commitRequest.getValidAfter());
- uassertStatusOK(response.getStatus());
- result.appendElements(response.getValue());
- return true;
- }
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
} configsvrCommitChunkMigrationCommand;
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 1be2dd486fb..0b2ab1b0474 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/initial_split_policy.h"
#include "mongo/client/read_preference.h"
@@ -50,7 +47,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -73,17 +69,11 @@ void appendChunk(const SplitPolicyParams& params,
const BSONObj& min,
const BSONObj& max,
ChunkVersion* version,
- const Timestamp& creationTimestamp,
const ShardId& shardId,
std::vector<ChunkType>* chunks) {
- chunks->emplace_back(
- params.collectionUUID,
- ChunkRange(min, max),
- ChunkVersion(
- version->majorVersion(), version->minorVersion(), version->epoch(), creationTimestamp),
- shardId);
+ chunks->emplace_back(params.collectionUUID, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setHistory({ChunkHistory(creationTimestamp, shardId)});
+ chunk.setHistory({ChunkHistory(version->getTimestamp(), shardId)});
version->incMinor();
}
@@ -238,7 +228,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
finalSplitPoints.push_back(splitPoint);
}
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern(shardKeyPattern.getKeyPattern());
std::vector<ChunkType> chunks;
@@ -254,7 +244,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
? params.primaryShardId
: allShardIds[(i / numContiguousChunksPerShard) % allShardIds.size()];
- appendChunk(params, min, max, &version, validAfter, shardId, &chunks);
+ appendChunk(params, min, max, &version, shardId, &chunks);
}
return {std::move(chunks)};
@@ -327,14 +317,13 @@ InitialSplitPolicy::ShardCollectionConfig SingleChunkOnPrimarySplitPolicy::creat
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern = shardKeyPattern.getKeyPattern();
std::vector<ChunkType> chunks;
appendChunk(params,
keyPattern.globalMin(),
keyPattern.globalMax(),
&version,
- validAfter,
params.primaryShardId,
&chunks);
@@ -421,19 +410,14 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
return shardIds[indx++ % shardIds.size()];
};
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
auto lastChunkMax = keyPattern.globalMin();
std::vector<ChunkType> chunks;
for (const auto& tag : _tags) {
// Create a chunk for the hole [lastChunkMax, tag.getMinKey)
if (tag.getMinKey().woCompare(lastChunkMax) > 0) {
- appendChunk(params,
- lastChunkMax,
- tag.getMinKey(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, tag.getMinKey(), &version, nextShardIdForHole(), &chunks);
}
// Create chunk for the actual tag - [tag.getMinKey, tag.getMaxKey)
const auto it = tagToShards.find(tag.getTag());
@@ -470,7 +454,7 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
const BSONObj max = (splitPointIdx == splitInfo.splitPoints.size())
? tag.getMaxKey()
: splitInfo.splitPoints[splitPointIdx];
- appendChunk(params, min, max, &version, validAfter, targetShard, &chunks);
+ appendChunk(params, min, max, &version, targetShard, &chunks);
}
}
lastChunkMax = tag.getMaxKey();
@@ -478,13 +462,8 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
// Create a chunk for the hole [lastChunkMax, MaxKey]
if (lastChunkMax.woCompare(keyPattern.globalMax()) < 0) {
- appendChunk(params,
- lastChunkMax,
- keyPattern.globalMax(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, keyPattern.globalMax(), &version, nextShardIdForHole(), &chunks);
}
return {std::move(chunks)};
@@ -765,13 +744,13 @@ InitialSplitPolicy::ShardCollectionConfig ReshardingSplitPolicy::createFirstChun
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
splitPoints.insert(keyPattern.globalMax());
for (const auto& splitPoint : splitPoints) {
auto bestShard = selectBestShard(
chunkDistribution, zoneInfo, zoneToShardMap, {lastChunkMax, splitPoint});
- appendChunk(params, lastChunkMax, splitPoint, &version, validAfter, bestShard, &chunks);
+ appendChunk(params, lastChunkMax, splitPoint, &version, bestShard, &chunks);
lastChunkMax = splitPoint;
chunkDistribution[bestShard]++;
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 2eea0b6905f..9fc9a5576d0 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/json.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/initial_split_policy.h"
@@ -40,7 +37,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -208,7 +204,7 @@ public:
std::vector<ChunkType> chunks;
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
- ChunkVersion version(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 0});
ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 762961eaac3..9f883997a3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/read_preference.h"
@@ -72,7 +70,7 @@ protected:
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- chunk.setVersion({12, 7, epoch, timestamp});
+ chunk.setVersion(ChunkVersion({epoch, timestamp}, {12, 7}));
chunk.setShard(_shardName);
chunk.setMin(jumboChunk().getMin());
chunk.setMax(jumboChunk().getMax());
@@ -81,7 +79,7 @@ protected:
ChunkType otherChunk;
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- otherChunk.setVersion({14, 7, epoch, timestamp});
+ otherChunk.setVersion(ChunkVersion({epoch, timestamp}, {14, 7}));
otherChunk.setShard(_shardName);
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
@@ -107,7 +105,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
operationContext(), collUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
auto chunkVersion = chunkDoc.getVersion();
- ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkVersion);
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {15, 0}), chunkVersion);
};
test(_nss2, Timestamp(42));
@@ -125,7 +123,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
auto chunkDoc = uassertStatusOK(getChunkDoc(
operationContext(), collUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {14, 7}), chunkDoc.getVersion());
};
test(_nss2, Timestamp(42));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 20e8b2ecc6a..8921d0c2e8b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
@@ -95,7 +93,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
const auto requestedChunkType =
generateChunkType(_nss,
_collUuid,
- ChunkVersion(10, 2, OID::gen(), Timestamp(1, 1)),
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {10, 2}),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
@@ -112,12 +110,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Min key is different.
@@ -140,12 +139,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Max key is different.
@@ -169,20 +169,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -195,8 +197,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(EnsureChunkVersionIsGreaterThanTest,
@@ -204,20 +206,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -230,8 +234,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(
@@ -240,15 +244,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
@@ -269,15 +274,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 15ae1c5eb5c..9d7e68c9a93 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/read_preference.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
@@ -85,7 +83,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -126,10 +124,9 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
- origVersion.minorVersion() + 1,
- origVersion.epoch(),
- origVersion.getTimestamp());
+ auto expectedShardVersion =
+ ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 1});
ASSERT_EQ(expectedShardVersion, shardVersion);
@@ -170,7 +167,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -251,7 +248,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -273,7 +270,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
// Set up other chunk with competing version
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
otherChunk.setVersion(competingVersion);
otherChunk.setShard(_shardId);
otherChunk.setMin(BSON("a" << 10));
@@ -334,7 +331,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(shardId);
@@ -415,7 +412,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
// Construct chunk to be merged
@@ -457,7 +454,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -503,7 +500,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
ChunkRange rangeToBeMerged(chunkMin, chunkMax);
// Store a chunk that matches the range that will be requested
- auto mergedVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto mergedVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
mergedVersion.incMinor();
ChunkType mergedChunk;
mergedChunk.setVersion(mergedVersion);
@@ -559,7 +556,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk1.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk1.setVersion(origVersion);
chunk1.setShard(_shardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index bcb5557ff53..9b9e48cfe0b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -80,7 +80,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -112,8 +112,9 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp);
+ auto expectedShardVersion =
+ ChunkVersion({collEpoch, collTimestamp},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 2});
ASSERT_EQ(expectedShardVersion, shardVersion);
ASSERT_EQ(shardVersion, collVersion);
@@ -163,7 +164,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -255,7 +256,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setCollectionUUID(collUuid);
// set up first chunk
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -269,7 +270,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
splitPoints.push_back(chunkSplitPoint);
// set up second chunk (chunk2)
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
chunk2.setVersion(competingVersion);
chunk2.setShard(ShardId(_shardName));
chunk2.setMin(BSON("a" << 10));
@@ -323,7 +324,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -361,7 +362,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -397,7 +398,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -433,7 +434,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -469,7 +470,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -506,7 +507,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -542,7 +543,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -587,7 +588,7 @@ TEST_F(SplitChunkTest, CantCommitSplitFromChunkSplitterDuringDefragmentation) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto version = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto version = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(version);
chunk.setShard(ShardId(_shardName));
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 1f0ac4ece12..ccbad667d35 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -359,29 +359,8 @@ void broadcastDropCollection(OperationContext* opCtx,
} // namespace
-boost::optional<BSONObj> CreateCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "CreateCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
+void CreateCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
void CreateCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
diff --git a/src/mongo/db/s/create_collection_coordinator.h b/src/mongo/db/s/create_collection_coordinator.h
index a6e7d0b9709..a1f8bbea4e8 100644
--- a/src/mongo/db/s/create_collection_coordinator.h
+++ b/src/mongo/db/s/create_collection_coordinator.h
@@ -47,7 +47,7 @@ public:
using Phase = CreateCollectionCoordinatorPhaseEnum;
CreateCollectionCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState),
+ : RecoverableShardingDDLCoordinator(service, "CreateCollectionCoordinator", initialState),
_request(_doc.getCreateCollectionRequest()),
_critSecReason(BSON("command"
<< "createCollection"
@@ -58,9 +58,7 @@ public:
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 720ac168b5b..fa1e2f4b84e 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -47,31 +47,6 @@
namespace mongo {
-boost::optional<BSONObj> DropCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "DropCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
const NamespaceString& nss) {
{
diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h
index 93435c0dcf1..46b37d2a415 100644
--- a/src/mongo/db/s/drop_collection_coordinator.h
+++ b/src/mongo/db/s/drop_collection_coordinator.h
@@ -43,16 +43,12 @@ public:
using Phase = DropCollectionCoordinatorPhaseEnum;
DropCollectionCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState) {}
+ : RecoverableShardingDDLCoordinator(service, "DropCollectionCoordinator", initialState) {}
~DropCollectionCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& doc) const override {}
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
-
/**
* Locally drops a collection, cleans its CollectionShardingRuntime metadata and refreshes the
* catalog cache.
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index 84d64665831..54b8ef1108e 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -144,30 +144,6 @@ void DropDatabaseCoordinator::_dropShardedCollection(
opCtx, nss, {primaryShardId}, **executor, getCurrentSession());
}
-boost::optional<BSONObj> DropDatabaseCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "DropDatabaseCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
void DropDatabaseCoordinator::_clearDatabaseInfoOnPrimary(OperationContext* opCtx) {
Lock::DBLock dbLock(opCtx, _dbName, MODE_X);
auto dss = DatabaseShardingState::get(opCtx, _dbName);
diff --git a/src/mongo/db/s/drop_database_coordinator.h b/src/mongo/db/s/drop_database_coordinator.h
index 62b3a418b8d..f70ea2981cb 100644
--- a/src/mongo/db/s/drop_database_coordinator.h
+++ b/src/mongo/db/s/drop_database_coordinator.h
@@ -43,15 +43,12 @@ public:
using Phase = DropDatabaseCoordinatorPhaseEnum;
DropDatabaseCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState), _dbName(nss().db()) {}
+ : RecoverableShardingDDLCoordinator(service, "DropDatabaseCoordinator", initialState),
+ _dbName(nss().db()) {}
~DropDatabaseCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& doc) const override {}
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
-
private:
StringData serializePhase(const Phase& phase) const override {
return DropDatabaseCoordinatorPhase_serializer(phase);
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2f39ef09147..a842e4cfe03 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include "mongo/bson/bsonobjbuilder.h"
@@ -93,7 +91,7 @@ protected:
boost::none,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{uuid, range, ChunkVersion(1, 0, epoch, Timestamp(1, 1)), kOtherShard}});
+ {ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index d82f5c1790c..a0fc3e650ee 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_coordinator.h"
#include "mongo/db/s/migration_util.h"
@@ -59,7 +60,6 @@
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/grid.h"
#include "mongo/s/pm2423_feature_flags_gen.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/duration.h"
#include "mongo/util/elapsed_tracker.h"
@@ -557,20 +557,18 @@ void MigrationSourceManager::commitChunkMetadataOnConfig() {
{
const auto metadata = _getCurrentMetadataAndCheckEpoch();
- ChunkType migratedChunkType;
- migratedChunkType.setMin(*_args.getMin());
- migratedChunkType.setMax(*_args.getMax());
- migratedChunkType.setVersion(*_chunkVersion);
+ auto migratedChunk = MigratedChunkType(*_chunkVersion, *_args.getMin(), *_args.getMax());
const auto currentTime = VectorClock::get(_opCtx)->getTime();
- CommitChunkMigrationRequest::appendAsCommand(&builder,
- nss(),
- _args.getFromShard(),
- _args.getToShard(),
- migratedChunkType,
- metadata.getCollVersion(),
- currentTime.clusterTime().asTimestamp());
+ CommitChunkMigrationRequest request(nss(),
+ _args.getFromShard(),
+ _args.getToShard(),
+ migratedChunk,
+ metadata.getCollVersion(),
+ currentTime.clusterTime().asTimestamp());
+
+ request.serialize({}, &builder);
builder.append(kWriteConcernField, kMajorityWriteConcern.toBSON());
}
diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp
index 32d7a6af9bf..863a4c17b9d 100644
--- a/src/mongo/db/s/move_primary_coordinator.cpp
+++ b/src/mongo/db/s/move_primary_coordinator.cpp
@@ -46,24 +46,10 @@
namespace mongo {
-boost::optional<BSONObj> MovePrimaryCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.append("request", BSON(_doc.kToShardIdFieldName << _doc.getToShardId()));
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "MovePrimaryCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
-}
+void MovePrimaryCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ stdx::lock_guard lk{_docMutex};
+ cmdInfoBuilder->append("request", BSON(_doc.kToShardIdFieldName << _doc.getToShardId()));
+};
void MovePrimaryCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two shard collections on the same namespace, then the arguments must be the same.
diff --git a/src/mongo/db/s/move_primary_coordinator.h b/src/mongo/db/s/move_primary_coordinator.h
index c8d01060a4a..80a1586e0a4 100644
--- a/src/mongo/db/s/move_primary_coordinator.h
+++ b/src/mongo/db/s/move_primary_coordinator.h
@@ -39,15 +39,13 @@ class MovePrimaryCoordinator final
: public ShardingDDLCoordinatorImpl<MovePrimaryCoordinatorDocument> {
public:
MovePrimaryCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : ShardingDDLCoordinatorImpl(service, initialState) {}
+ : ShardingDDLCoordinatorImpl(service, "MovePrimaryCoordinator", initialState) {}
~MovePrimaryCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
bool canAlwaysStartWhenUserWritesAreDisabled() const override {
return true;
diff --git a/src/mongo/db/s/operation_sharding_state_test.cpp b/src/mongo/db/s/operation_sharding_state_test.cpp
index 0c4732b51ab..9c275398f85 100644
--- a/src/mongo/db/s/operation_sharding_state_test.cpp
+++ b/src/mongo/db/s/operation_sharding_state_test.cpp
@@ -47,7 +47,7 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleDbVersion) {
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleShardVersion) {
- ChunkVersion shardVersion(1, 0, OID::gen(), Timestamp(1, 0));
+ ChunkVersion shardVersion({OID::gen(), Timestamp(1, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, shardVersion, boost::none);
auto& oss = OperationShardingState::get(operationContext());
@@ -58,13 +58,13 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
auto& oss = OperationShardingState::get(operationContext());
{
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(
operationContext(), kNss, shardVersion1, boost::none);
ASSERT_EQ(shardVersion1, *oss.getShardVersion(kNss));
}
{
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole2(
operationContext(), kNss, shardVersion2, boost::none);
ASSERT_EQ(shardVersion2, *oss.getShardVersion(kNss));
@@ -72,8 +72,8 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleRecursiveShardVersionDifferentNamespaces) {
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(operationContext(), kNss, shardVersion1, boost::none);
ScopedSetShardRole scopedSetShardRole2(
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
index a4786584ab2..b8d981bb4ce 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
@@ -86,7 +86,8 @@ void notifyChangeStreamsOnRefineCollectionShardKeyComplete(OperationContext* opC
RefineCollectionShardKeyCoordinator::RefineCollectionShardKeyCoordinator(
ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState),
+ : RecoverableShardingDDLCoordinator(
+ service, "RefineCollectionShardKeyCoordinator", initialState),
_request(_doc.getRefineCollectionShardKeyRequest()),
_newShardKey(_doc.getNewShardKey()) {}
@@ -102,23 +103,8 @@ void RefineCollectionShardKeyCoordinator::checkIfOptionsConflict(const BSONObj&
_request.toBSON() == otherDoc.getRefineCollectionShardKeyRequest().toBSON()));
}
-boost::optional<BSONObj> RefineCollectionShardKeyCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "RefineCollectionShardKeyCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
+void RefineCollectionShardKeyCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
ExecutorFuture<void> RefineCollectionShardKeyCoordinator::_runImpl(
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.h b/src/mongo/db/s/refine_collection_shard_key_coordinator.h
index 21a1a46d5fe..c461383e876 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.h
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.h
@@ -47,9 +47,7 @@ public:
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
private:
StringData serializePhase(const Phase& phase) const override {
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 8032fbd1922..64680e96cc2 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -90,7 +90,7 @@ boost::optional<UUID> getCollectionUUID(OperationContext* opCtx,
RenameCollectionCoordinator::RenameCollectionCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
- : RecoverableShardingDDLCoordinator(service, initialState),
+ : RecoverableShardingDDLCoordinator(service, "RenameCollectionCoordinator", initialState),
_request(_doc.getRenameCollectionRequest()) {}
void RenameCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
@@ -111,30 +111,8 @@ std::vector<StringData> RenameCollectionCoordinator::_acquireAdditionalLocks(
return {_request.getTo().ns()};
}
-boost::optional<BSONObj> RenameCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "RenameCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
+void RenameCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
diff --git a/src/mongo/db/s/rename_collection_coordinator.h b/src/mongo/db/s/rename_collection_coordinator.h
index 502bd0cead7..32621bb6ea4 100644
--- a/src/mongo/db/s/rename_collection_coordinator.h
+++ b/src/mongo/db/s/rename_collection_coordinator.h
@@ -47,9 +47,7 @@ public:
void checkIfOptionsConflict(const BSONObj& doc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the rename to complete and returns the collection version.
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index ee9f7115e93..c0c9648e0a4 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -107,7 +107,7 @@ ReshardCollectionCoordinator::ReshardCollectionCoordinator(ShardingDDLCoordinato
ReshardCollectionCoordinator::ReshardCollectionCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState,
bool persistCoordinatorDocument)
- : RecoverableShardingDDLCoordinator(service, initialState),
+ : RecoverableShardingDDLCoordinator(service, "ReshardCollectionCoordinator", initialState),
_request(_doc.getReshardCollectionRequest()),
_persistCoordinatorDocument(persistCoordinatorDocument) {}
@@ -122,23 +122,8 @@ void ReshardCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) co
_request.toBSON() == otherDoc.getReshardCollectionRequest().toBSON()));
}
-boost::optional<BSONObj> ReshardCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "ReshardCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
+void ReshardCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
void ReshardCollectionCoordinator::_enterPhase(Phase newPhase) {
diff --git a/src/mongo/db/s/reshard_collection_coordinator.h b/src/mongo/db/s/reshard_collection_coordinator.h
index a17edf9f2e8..085c183dc55 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.h
+++ b/src/mongo/db/s/reshard_collection_coordinator.h
@@ -46,9 +46,7 @@ public:
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
protected:
ReshardCollectionCoordinator(ShardingDDLCoordinatorService* service,
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 26ba0393ccc..fe15da0a5ca 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -1858,30 +1858,8 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllParticipantShardsD
const auto cmdObj =
ShardsvrDropCollectionIfUUIDNotMatchingRequest(nss, notMatchingThisUUID)
.toBSON({});
-
- try {
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx.get(), nss.db(), cmdObj, allShardIds, **executor);
- } catch (const DBException& ex) {
- if (ex.code() == ErrorCodes::CommandNotFound) {
- // TODO SERVER-60531 get rid of the catch logic
- // Cleanup failed because at least one shard could is using a binary
- // not supporting the ShardsvrDropCollectionIfUUIDNotMatching command.
- LOGV2_INFO(5423100,
- "Resharding coordinator couldn't guarantee older incarnations "
- "of the collection were dropped. A chunk migration to a shard "
- "with an older incarnation of the collection will fail",
- "namespace"_attr = nss.ns());
- } else if (opCtx->checkForInterruptNoAssert().isOK()) {
- LOGV2_INFO(
- 5423101,
- "Resharding coordinator failed while trying to drop possible older "
- "incarnations of the collection. A chunk migration to a shard with "
- "an older incarnation of the collection will fail",
- "namespace"_attr = nss.ns(),
- "error"_attr = redact(ex.toStatus()));
- }
- }
+ _reshardingCoordinatorExternalState->sendCommandToShards(
+ opCtx.get(), nss.db(), cmdObj, allShardIds, **executor);
}
reshardingPauseCoordinatorBeforeRemovingStateDoc.pauseWhileSetAndNotCanceled(
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index 52c52654e89..1fc380093bf 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include <functional>
@@ -59,7 +56,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -415,7 +411,7 @@ public:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index d02c0babe27..f5f588ac948 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/persistent_task_store.h"
#include "mongo/db/query/collation/collator_factory_mock.h"
@@ -50,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -78,7 +71,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index bb47fe20b83..e5bd8defdbd 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -111,8 +111,10 @@ protected:
const OID& epoch,
const ShardId& shardThatChunkExistsOn) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
- auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), shardThatChunkExistsOn);
+ auto chunk = ChunkType(uuid,
+ std::move(range),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ shardThatChunkExistsOn);
ChunkManager cm(kThisShard.getShardId(),
DatabaseVersion(uuid, timestamp),
makeStandaloneRoutingTableHistory(
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index a9a60cd9aa0..9c09f5ebcf0 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog_raii.h"
@@ -289,16 +283,16 @@ private:
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY),
BSON(_currentShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << -std::numeric_limits<double>::infinity()),
BSON(_currentShardKey << 0)},
- ChunkVersion(100, 1, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 1}),
_otherDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << 0), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 2, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 2}),
_myDonorId}};
return makeChunkManager(
@@ -311,7 +305,7 @@ private:
std::vector<ChunkType> chunks = {
ChunkType{outputUuid,
ChunkRange{BSON(_newShardKey << MINKEY), BSON(_newShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
return makeChunkManager(
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index b72f0ad34e8..4e6a5489f71 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -83,7 +83,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_someDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.cpp b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
index 9362005a11f..d8cb15afb2e 100644
--- a/src/mongo/db/s/set_allow_migrations_coordinator.cpp
+++ b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
@@ -64,23 +64,9 @@ void SetAllowMigrationsCoordinator::checkIfOptionsConflict(const BSONObj& doc) c
otherDoc.getSetAllowMigrationsRequest().toBSON()));
}
-boost::optional<BSONObj> SetAllowMigrationsCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_doc.getSetAllowMigrationsRequest().toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "SetAllowMigrationsCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
+void SetAllowMigrationsCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ stdx::lock_guard lk{_docMutex};
+ cmdInfoBuilder->appendElements(_doc.getSetAllowMigrationsRequest().toBSON());
}
ExecutorFuture<void> SetAllowMigrationsCoordinator::_runImpl(
diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.h b/src/mongo/db/s/set_allow_migrations_coordinator.h
index 9f7915f888b..78d2e03696a 100644
--- a/src/mongo/db/s/set_allow_migrations_coordinator.h
+++ b/src/mongo/db/s/set_allow_migrations_coordinator.h
@@ -44,14 +44,12 @@ class SetAllowMigrationsCoordinator final
public:
SetAllowMigrationsCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
- : ShardingDDLCoordinatorImpl(service, initialState),
+ : ShardingDDLCoordinatorImpl(service, "SetAllowMigrationsCoordinator", initialState),
_allowMigrations(_doc.getAllowMigrations()) {}
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
bool canAlwaysStartWhenUserWritesAreDisabled() const override {
return true;
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index d7d8386d10b..e52a5e28d1a 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -136,12 +136,11 @@ StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
entry.getRefreshing() ? *entry.getRefreshing() : true,
entry.getLastRefreshedCollectionVersion()
? *entry.getLastRefreshedCollectionVersion()
- : ChunkVersion(0, 0, entry.getEpoch(), entry.getTimestamp())};
+ : ChunkVersion({entry.getEpoch(), entry.getTimestamp()}, {0, 0})};
}
StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCtx,
const NamespaceString& nss) {
-
try {
DBDirectClient client(opCtx);
FindCommandRequest findRequest{NamespaceString::kShardConfigCollectionsNamespace};
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 6bad5d66ac1..af35cf373e8 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -27,14 +27,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/shard_metadata_util.h"
-
-#include "mongo/base/status.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/s/shard_metadata_util.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/db/s/type_shard_collection.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -159,7 +155,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
}
}
- ChunkVersion maxCollVersion{0, 0, OID::gen(), Timestamp(1, 1)};
+ ChunkVersion maxCollVersion{{OID::gen(), Timestamp(1, 1)}, {0, 0}};
const KeyPattern keyPattern{BSON("a" << 1)};
const BSONObj defaultCollation{BSON("locale"
<< "fr_CA")};
@@ -216,7 +212,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
ASSERT(state.generation.isSameCollection(maxCollVersion));
ASSERT_EQUALS(state.refreshing, true);
ASSERT_EQUALS(state.lastRefreshedCollectionVersion,
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
// Signal refresh finish
ASSERT_OK(unsetPersistedRefreshFlags(operationContext(), kNss, maxCollVersion));
@@ -235,7 +231,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
// read all the chunks
QueryAndSort allChunkDiff = createShardChunkDiffQuery(
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
allChunkDiff.query,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 9f2f1ddf8d0..a111b9bf592 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
@@ -203,7 +201,7 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
std::pair<CollectionType, vector<ChunkType>>
ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -371,7 +369,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
// Then refresh again and find that the collection has been dropped and recreated.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
@@ -398,7 +396,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
// Then refresh again and retrieve chunks from the config server that have mixed epoches, like
// as if the chunks read yielded around a drop and recreate of the collection.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
vector<ChunkType> mixedChunks;
@@ -441,7 +439,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
}
TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedOnSSCCL) {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -483,7 +481,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
}
void ShardServerCatalogCacheLoaderTest::refreshCollectionEpochOnRemoteLoader() {
- ChunkVersion collectionVersion(1, 2, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 2});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index 11be5152036..51dcc023f60 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -174,11 +174,19 @@ private:
template <class StateDoc>
class ShardingDDLCoordinatorImpl : public ShardingDDLCoordinator {
+public:
+ boost::optional<BSONObj> reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override {
+ return basicReportBuilder().obj();
+ }
protected:
ShardingDDLCoordinatorImpl(ShardingDDLCoordinatorService* service,
+ const std::string& name,
const BSONObj& initialStateDoc)
: ShardingDDLCoordinator(service, initialStateDoc),
+ _coordinatorName(name),
_initialState(initialStateDoc.getOwned()),
_doc(StateDoc::parse(IDLParserErrorContext("CoordinatorDocument"), _initialState)) {}
@@ -186,6 +194,34 @@ protected:
return _doc.getShardingDDLCoordinatorMetadata();
}
+
+ virtual void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {};
+
+ virtual BSONObjBuilder basicReportBuilder() const noexcept {
+ BSONObjBuilder bob;
+
+ // Append static info
+ bob.append("type", "op");
+ bob.append("ns", nss().toString());
+ bob.append("desc", _coordinatorName);
+ bob.append("op", "command");
+ bob.append("active", true);
+
+ // Create command description
+ BSONObjBuilder cmdInfoBuilder;
+ {
+ stdx::lock_guard lk{_docMutex};
+ if (const auto& optComment = getForwardableOpMetadata().getComment()) {
+ cmdInfoBuilder.append(optComment.get().firstElement());
+ }
+ }
+ appendCommandInfo(&cmdInfoBuilder);
+ bob.append("command", cmdInfoBuilder.obj());
+
+ return bob;
+ }
+
+ const std::string _coordinatorName;
const BSONObj _initialState;
mutable Mutex _docMutex = MONGO_MAKE_LATCH("ShardingDDLCoordinator::_docMutex");
StateDoc _doc;
@@ -193,14 +229,14 @@ protected:
template <class StateDoc, class Phase>
class RecoverableShardingDDLCoordinator : public ShardingDDLCoordinatorImpl<StateDoc> {
-
protected:
using ShardingDDLCoordinatorImpl<StateDoc>::_doc;
using ShardingDDLCoordinatorImpl<StateDoc>::_docMutex;
RecoverableShardingDDLCoordinator(ShardingDDLCoordinatorService* service,
+ const std::string& name,
const BSONObj& initialStateDoc)
- : ShardingDDLCoordinatorImpl<StateDoc>(service, initialStateDoc) {}
+ : ShardingDDLCoordinatorImpl<StateDoc>(service, name, initialStateDoc) {}
virtual StringData serializePhase(const Phase& phase) const = 0;
@@ -245,6 +281,18 @@ protected:
}
}
+ BSONObjBuilder basicReportBuilder() const noexcept override {
+ auto baseReportBuilder = ShardingDDLCoordinatorImpl<StateDoc>::basicReportBuilder();
+
+ const auto currPhase = [&]() {
+ stdx::lock_guard l{_docMutex};
+ return _doc.getPhase();
+ }();
+
+ baseReportBuilder.append("currentPhase", serializePhase(currPhase));
+ return baseReportBuilder;
+ }
+
void _insertStateDocument(OperationContext* opCtx, StateDoc&& newDoc) {
auto copyMetadata = newDoc.getShardingDDLCoordinatorMetadata();
copyMetadata.setRecoveredFromDisk(true);
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index fd4e3905980..2ff3925c53e 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
#include "mongo/db/namespace_string.h"
@@ -47,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -119,7 +115,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const int nChunks = 10;
std::vector<ChunkType> chunks;
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, fromEpoch, collTimestamp);
+ ChunkVersion chunkVersion({fromEpoch, collTimestamp}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUUID);
@@ -138,7 +134,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const auto toEpoch = OID::gen();
const auto toUUID = UUID::gen();
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, toEpoch, Timestamp(2));
+ ChunkVersion chunkVersion({toEpoch, Timestamp(2)}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(toUUID);
@@ -215,7 +211,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsAreMet) {
opCtx, false /* sourceIsSharded */, kToNss, false /* dropTarget */);
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
@@ -256,7 +252,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsTargetCollectionExists) {
auto opCtx = operationContext();
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
diff --git a/src/mongo/db/s/sharding_server_status.cpp b/src/mongo/db/s/sharding_server_status.cpp
index 8d560454382..82de4cfc5c9 100644
--- a/src/mongo/db/s/sharding_server_status.cpp
+++ b/src/mongo/db/s/sharding_server_status.cpp
@@ -73,14 +73,20 @@ public:
result.append("configsvrConnectionString",
shardRegistry->getConfigServerConnectionString().toString());
+ const auto vcTime = VectorClock::get(opCtx)->getTime();
+
const auto configOpTime = [&]() {
- const auto vcTime = VectorClock::get(opCtx)->getTime();
const auto vcConfigTimeTs = vcTime.configTime().asTimestamp();
return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm);
}();
-
configOpTime.append(&result, "lastSeenConfigServerOpTime");
+ const auto topologyOpTime = [&]() {
+ const auto vcTopologyTimeTs = vcTime.topologyTime().asTimestamp();
+ return mongo::repl::OpTime(vcTopologyTimeTs, mongo::repl::OpTime::kUninitializedTerm);
+ }();
+ topologyOpTime.append(&result, "lastSeenTopologyOpTime");
+
const long long maxChunkSizeInBytes =
grid->getBalancerConfiguration()->getMaxChunkSizeBytes();
result.append("maxChunkSizeInBytes", maxChunkSizeInBytes);
diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp
index 7a47c6eed21..6d20ad82215 100644
--- a/src/mongo/db/s/sharding_write_router_bm.cpp
+++ b/src/mongo/db/s/sharding_write_router_bm.cpp
@@ -103,7 +103,7 @@ std::pair<std::vector<mongo::ChunkType>, mongo::ChunkManager> createChunks(
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collIdentifier,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, collTimestamp},
+ ChunkVersion({collEpoch, collTimestamp}, {i + 1, 0}),
pessimalShardSelector(i, nShards, nChunks));
}
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index 41b758cffec..c317922c251 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -379,6 +379,10 @@ TransactionCoordinatorService::getAllRemovalFuturesForCoordinatorsForInternalTra
std::shared_ptr<CatalogAndScheduler> cas = _getCatalogAndScheduler(opCtx);
auto& catalog = cas->catalog;
+ // On step up, we want to wait until the catalog has recovered all active transaction
+ // coordinators before getting the removal futures.
+ cas->recoveryTaskCompleted->get(opCtx);
+
auto predicate = [](const LogicalSessionId lsid,
const TxnNumberAndRetryCounter txnNumberAndRetryCounter,
const std::shared_ptr<TransactionCoordinator> transactionCoordinator) {
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index f66746b71c0..957888cfa35 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -160,55 +160,6 @@ std::unique_ptr<DBClientCursor> MockDBClientConnection::find(
return nullptr;
}
-std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- checkConnection();
-
- try {
- mongo::BSONArray result(_remoteServer->query(_remoteServerInstanceID,
- nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj));
-
- BSONArray resultsInCursor;
-
- // A simple mock implementation of a resumable query, where we skip the first 'n' fields
- // where 'n' is given by the mock resume token.
- auto nToSkip = 0;
- BSONObj querySettingsAsBSON = querySettings.getFullSettingsDeprecated();
- if (querySettingsAsBSON.hasField("$_resumeAfter")) {
- nToSkip = nToSkipFromResumeAfter(querySettingsAsBSON.getField("$_resumeAfter").Obj());
- }
-
- bool provideResumeToken = false;
- if (querySettingsAsBSON.hasField("$_requestResumeToken")) {
- provideResumeToken = true;
- }
-
-
- return bsonArrayToCursor(std::move(result), nToSkip, provideResumeToken, batchSize);
- } catch (const mongo::DBException&) {
- _failed.store(true);
- throw;
- }
-
- return nullptr;
-}
-
mongo::ConnectionString::ConnectionType MockDBClientConnection::type() const {
return mongo::ConnectionString::ConnectionType::kCustom;
}
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index 0baac0ebfba..4b60f2bec4a 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -104,7 +104,6 @@ public:
// DBClientBase methods
//
using DBClientBase::find;
- using DBClientBase::query_DEPRECATED;
bool connect(const char* hostName, StringData applicationName, std::string& errmsg);
@@ -125,17 +124,6 @@ public:
const ReadPreferenceSetting& /*unused*/,
ExhaustMode /*unused*/) override;
- std::unique_ptr<mongo::DBClientCursor> query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter = BSONObj{},
- const client_deprecated::Query& querySettings = client_deprecated::Query(),
- int limit = 0,
- int nToSkip = 0,
- const mongo::BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none) override;
-
uint64_t getSockCreationMicroSec() const override;
void insert(const std::string& ns,
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index b25a4021beb..0b98308d1d2 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -228,20 +228,6 @@ mongo::BSONArray MockRemoteDBServer::find(MockRemoteDBServer::InstanceID id,
return findImpl(id, findRequest.getNamespaceOrUUID(), findRequest.getProjection());
}
-mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- BSONObj projection = fieldsToReturn ? *fieldsToReturn : BSONObj{};
- return findImpl(id, nsOrUuid, std::move(projection));
-}
-
mongo::ConnectionString::ConnectionType MockRemoteDBServer::type() const {
return mongo::ConnectionString::ConnectionType::kCustom;
}
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h
index c20dc851580..034ad8e7ea4 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.h
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.h
@@ -32,7 +32,6 @@
#include <string>
#include <vector>
-#include "mongo/client/client_deprecated.h"
#include "mongo/client/connection_string.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/find_command_gen.h"
@@ -168,20 +167,6 @@ public:
*/
mongo::BSONArray find(InstanceID id, const FindCommandRequest& findRequest);
- /**
- * Legacy query API: New callers should use 'find()' rather than this method.
- */
- mongo::BSONArray query(InstanceID id,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit = 0,
- int nToSkip = 0,
- const mongo::BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- int batchSize = 0,
- boost::optional<BSONObj> readConcernObj = boost::none);
-
//
// Getters
//
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 91740b4358f..b9228513cf6 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -77,45 +77,6 @@ TEST(MockDBClientConnTest, QueryCount) {
}
}
-// This test should be removed when the legacy query API is removed.
-TEST(MockDBClientConnTest, LegacyQueryApiBumpsQueryCount) {
- MockRemoteDBServer server("test");
- MockDBClientConnection conn(&server);
- ASSERT_EQUALS(0U, server.getQueryCount());
- conn.query_DEPRECATED(NamespaceString("foo.bar"));
- ASSERT_EQUALS(1U, server.getQueryCount());
-}
-
-// This test should be removed when the legacy query API is removed.
-TEST(MockDBClientConnTest, LegacyQueryApiReturnsInsertedDocuments) {
- MockRemoteDBServer server("test");
- const std::string ns("test.user");
-
- {
- MockDBClientConnection conn(&server);
- std::unique_ptr<mongo::DBClientCursor> cursor = conn.query_DEPRECATED(NamespaceString(ns));
- ASSERT(!cursor->more());
-
- server.insert(ns, BSON("x" << 1));
- server.insert(ns, BSON("y" << 2));
- }
-
- {
- MockDBClientConnection conn(&server);
- std::unique_ptr<mongo::DBClientCursor> cursor = conn.query_DEPRECATED(NamespaceString(ns));
-
- ASSERT(cursor->more());
- BSONObj firstDoc = cursor->next();
- ASSERT_EQUALS(1, firstDoc["x"].numberInt());
-
- ASSERT(cursor->more());
- BSONObj secondDoc = cursor->next();
- ASSERT_EQUALS(2, secondDoc["y"].numberInt());
-
- ASSERT(!cursor->more());
- }
-}
-
TEST(MockDBClientConnTest, SkipBasedOnResumeAfter) {
MockRemoteDBServer server{"test"};
const std::string ns{"test.user"};
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 002b6e5f804..80cef8dfc81 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -192,7 +192,6 @@ env.Library(
'request_types/balancer_collection_status.idl',
'request_types/cleanup_reshard_collection.idl',
'request_types/clone_catalog_data.idl',
- 'request_types/commit_chunk_migration_request_type.cpp',
'request_types/commit_reshard_collection.idl',
'request_types/configure_collection_balancing.idl',
'request_types/drop_collection_if_uuid_not_matching.idl',
@@ -529,6 +528,7 @@ env.Library(
'$BUILD_DIR/mongo/db/audit',
'$BUILD_DIR/mongo/db/auth/authmongos',
'$BUILD_DIR/mongo/db/change_stream_options_manager',
+ '$BUILD_DIR/mongo/db/change_streams_cluster_parameter',
'$BUILD_DIR/mongo/db/commands/rwc_defaults_commands',
'$BUILD_DIR/mongo/db/ftdc/ftdc_mongos',
'$BUILD_DIR/mongo/db/process_health/fault_manager',
@@ -640,7 +640,6 @@ env.CppUnitTest(
'request_types/add_shard_request_test.cpp',
'request_types/add_shard_to_zone_request_test.cpp',
'request_types/balance_chunk_request_test.cpp',
- 'request_types/commit_chunk_migration_request_test.cpp',
'request_types/merge_chunks_request_test.cpp',
'request_types/migration_secondary_throttle_options_test.cpp',
'request_types/move_chunk_request_test.cpp',
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index 528e8ba4876..1bcfb9c8bc8 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -27,10 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/unittest/unittest.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -39,6 +35,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/sharding_router_test_fixture.h"
+#include "mongo/unittest/unittest.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
@@ -200,7 +197,7 @@ protected:
Timestamp timestamp{1, 0};
return StaleConfigInfo(
NamespaceString("Foo.Bar"),
- ChunkVersion(1, 0, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
boost::none,
ShardId{"dummy"});
}(),
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 8ce3d377491..7ed4cc739cc 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -27,13 +27,8 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/s/catalog/type_chunk.h"
-#include <cstring>
-
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
@@ -46,7 +41,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
const NamespaceString ChunkType::ConfigNS("config.chunks");
@@ -296,7 +290,7 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSON(const BSONObj& source,
if (versionElem.type() == bsonTimestamp || versionElem.type() == Date) {
auto chunkLastmod = Timestamp(versionElem._numberLong());
chunk._version =
- ChunkVersion(chunkLastmod.getSecs(), chunkLastmod.getInc(), epoch, timestamp);
+ ChunkVersion({epoch, timestamp}, {chunkLastmod.getSecs(), chunkLastmod.getInc()});
} else {
return {ErrorCodes::BadValue,
str::stream() << "The field " << ChunkType::lastmod() << " cannot be parsed."};
@@ -381,7 +375,7 @@ StatusWith<ChunkType> ChunkType::parseFromShardBSON(const BSONObj& source,
if (lastmodElem.type() == bsonTimestamp || lastmodElem.type() == Date) {
auto chunkLastmod = Timestamp(lastmodElem._numberLong());
chunk._version =
- ChunkVersion(chunkLastmod.getSecs(), chunkLastmod.getInc(), epoch, timestamp);
+ ChunkVersion({epoch, timestamp}, {chunkLastmod.getSecs(), chunkLastmod.getInc()});
} else {
return {ErrorCodes::NoSuchKey,
str::stream() << "Expected field " << ChunkType::lastmod() << " not found."};
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index bc8d012f290..18c199b69ea 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -47,7 +47,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -81,7 +81,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
TEST(ChunkType, MissingShardRequiredFields) {
const OID epoch = OID::gen();
const Timestamp timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, epoch, timestamp);
+ ChunkVersion chunkVersion({epoch, timestamp}, {1, 2});
const auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj objModMin =
@@ -109,15 +109,16 @@ TEST(ChunkType, MissingShardRequiredFields) {
}
TEST(ChunkType, ToFromShardBSON) {
- const OID epoch = OID::gen();
- const Timestamp timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, epoch, timestamp);
+ const OID collEpoch = OID::gen();
+ const Timestamp collTimestamp(1, 1);
+
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj obj = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
<< lastmod);
- ChunkType shardChunk = assertGet(ChunkType::parseFromShardBSON(obj, epoch, timestamp));
+ ChunkType shardChunk = assertGet(ChunkType::parseFromShardBSON(obj, collEpoch, collTimestamp));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -132,7 +133,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj = BSON(
ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10 << "b" << 10))
@@ -149,7 +150,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
@@ -166,7 +167,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 20))
@@ -182,7 +183,7 @@ TEST(ChunkType, ToFromConfigBSON) {
const auto collTimestamp = Timestamp(1);
const auto chunkID = OID::gen();
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj = BSON(ChunkType::name(chunkID)
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
@@ -217,7 +218,7 @@ TEST(ChunkType, BothNsAndUUID) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -235,7 +236,7 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS = BSON(
ChunkType::name(OID::gen())
@@ -249,7 +250,10 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
}
TEST(ChunkType, ParseFromNetworkRequest) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), Timestamp(1, 0));
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = Timestamp(1, 0);
+
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
auto chunk = assertGet(ChunkType::parseFromNetworkRequest(
BSON(ChunkType::name(OID::gen())
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index 8df10d20a43..4b44f5693de 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/concurrency/locker_noop.h"
#include "mongo/db/pipeline/aggregation_request_helper.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -42,7 +39,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace {
@@ -115,7 +111,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoad) {
expectGetDatabase();
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(reshardingUUID,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
@@ -328,7 +324,7 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
const auto chunk1 =
ChunkType(coll.getUuid(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
{"0"});
return std::vector<BSONObj>{/* collection */
coll.toBSON(),
@@ -359,7 +355,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithLowestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -415,7 +411,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithHighestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -473,7 +469,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithLowestVersion) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -531,7 +527,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithHighestVersion) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -621,7 +617,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
// recreated collection.
ChunkType chunk3(coll.getUuid(),
{BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
- ChunkVersion(5, 2, newEpoch, newTimestamp),
+ ChunkVersion({newEpoch, newTimestamp}, {5, 2}),
{"1"});
chunk3.setName(OID::gen());
@@ -631,7 +627,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
});
// On the second retry attempt, return the correct set of chunks from the recreated collection
- ChunkVersion newVersion(5, 0, newEpoch, newTimestamp);
+ ChunkVersion newVersion({newEpoch, newTimestamp}, {5, 0});
onFindCommand([&](const RemoteCommandRequest& request) {
const auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto aggRequest = unittest::assertGet(
@@ -676,9 +672,9 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
ASSERT(cm.isSharded());
ASSERT_EQ(3, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(5, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {5, 1}),
cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(5, 2, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {5, 2}),
cm.getVersion({"1"}));
}
@@ -693,7 +689,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
ChunkVersion oldVersion = initialRoutingInfo.getVersion();
- ChunkVersion newVersion(1, 0, OID::gen(), Timestamp(2));
+ ChunkVersion newVersion({OID::gen(), Timestamp(2)}, {1, 0});
const UUID uuid = initialRoutingInfo.getUUID();
// Return collection with a different epoch and a set of chunks, which represent a split
@@ -736,9 +732,9 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
ASSERT(cm.isSharded());
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(1, 0, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {1, 0}),
cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(1, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {1, 1}),
cm.getVersion({"1"}));
}
@@ -798,7 +794,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
ASSERT_EQ(version, cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"1"}));
+ ASSERT_EQ(ChunkVersion({version.epoch(), version.getTimestamp()}, {0, 0}),
+ cm.getVersion({"1"}));
}
TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveWithReshardingFieldsAdded) {
@@ -877,7 +874,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveLastChunkWithReshardingF
ASSERT(cm.isSharded());
ASSERT_EQ(1, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion({version.epoch(), version.getTimestamp()}, {0, 0}),
+ cm.getVersion({"0"}));
ASSERT_EQ(version, cm.getVersion({"1"}));
ASSERT(boost::none == cm.getReshardingFields());
}
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index bb22d6c0915..b41aafde12c 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include "mongo/s/catalog/type_database_gen.h"
@@ -264,7 +261,7 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -275,7 +272,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -288,9 +285,9 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const auto wantedCollVersion =
- ChunkVersion(2, 0, cachedCollVersion.epoch(), cachedCollVersion.getTimestamp());
+ ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -304,7 +301,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto epoch = OID::gen();
- const auto version = ChunkVersion(1, 0, epoch, Timestamp(42));
+ const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
@@ -360,7 +357,7 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
TEST_F(CatalogCacheTest, LookupCollectionWithInvalidOptions) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto epoch = OID::gen();
- const auto version = ChunkVersion(1, 0, epoch, Timestamp(42));
+ const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
diff --git a/src/mongo/s/catalog_cache_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp
index 6e66a30d6b2..b83657c246e 100644
--- a/src/mongo/s/catalog_cache_test_fixture.cpp
+++ b/src/mongo/s/catalog_cache_test_fixture.cpp
@@ -27,14 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/catalog_cache_test_fixture.h"
-#include <memory>
-#include <set>
-#include <vector>
-
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/client.h"
@@ -130,7 +124,7 @@ ChunkManager CatalogCacheTestFixture::makeChunkManager(
bool unique,
const std::vector<BSONObj>& splitPoints,
boost::optional<ReshardingFields> reshardingFields) {
- ChunkVersion version(1, 0, OID::gen(), Timestamp(42) /* timestamp */);
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {1, 0});
DatabaseType db(nss.db().toString(), {"0"}, DatabaseVersion(UUID::gen(), Timestamp()));
@@ -270,7 +264,7 @@ ChunkManager CatalogCacheTestFixture::loadRoutingTableWithTwoChunksAndTwoShardsI
CollectionType collType(
nss, epoch, timestamp, Date_t::now(), uuid, shardKeyPattern.toBSON());
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(
uuid, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index c7a95b8020c..936175610f7 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <set>
#include "mongo/db/catalog/catalog_test_fixture.h"
@@ -42,7 +39,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace {
@@ -506,7 +502,7 @@ TEST_F(ChunkManagerQueryTest, SimpleCollationNumbersMultiShard) {
TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
const auto uuid = UUID::gen();
const auto epoch = OID::gen();
- ChunkVersion version(1, 0, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunk0(uuid, {BSON("x" << MINKEY), BSON("x" << 0)}, version, ShardId("0"));
chunk0.setName(OID::gen());
diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp
index 3c7f3adb6b3..12253be4ab2 100644
--- a/src/mongo/s/chunk_manager_refresh_bm.cpp
+++ b/src/mongo/s/chunk_manager_refresh_bm.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <benchmark/benchmark.h>
#include "mongo/base/init.h"
@@ -77,7 +75,7 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collUuid,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, Timestamp(1, 0)},
+ ChunkVersion({collEpoch, Timestamp(1, 0)}, {i + 1, 0}),
selectShard(i, nShards, nChunks));
}
@@ -169,7 +167,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collUuid,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, Timestamp(1, 0)},
+ ChunkVersion({collEpoch, Timestamp(1, 0)}, {i + 1, 0}),
selectShard(i, nShards, nChunks));
}
diff --git a/src/mongo/s/chunk_map_test.cpp b/src/mongo/s/chunk_map_test.cpp
index 6514fc00745..88378ff53e1 100644
--- a/src/mongo/s/chunk_map_test.cpp
+++ b/src/mongo/s/chunk_map_test.cpp
@@ -27,13 +27,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/chunk_manager.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-
namespace {
const NamespaceString kNss("TestDB", "TestColl");
@@ -58,7 +55,7 @@ private:
TEST_F(ChunkMapTest, TestAddChunk) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto chunk = std::make_shared<ChunkInfo>(
ChunkType{uuid(),
@@ -75,7 +72,7 @@ TEST_F(ChunkMapTest, TestAddChunk) {
TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -110,7 +107,7 @@ TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
TEST_F(ChunkMapTest, TestIntersectingChunk) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -140,7 +137,7 @@ TEST_F(ChunkMapTest, TestIntersectingChunk) {
TEST_F(ChunkMapTest, TestEnumerateOverlappingChunks) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp
index 2902c0e41dd..d1c595c05e8 100644
--- a/src/mongo/s/chunk_test.cpp
+++ b/src/mongo/s/chunk_test.cpp
@@ -27,14 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/shard_id.h"
-
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -48,7 +45,7 @@ const KeyPattern kShardKeyPattern(BSON("a" << 1));
TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -65,7 +62,7 @@ TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -83,7 +80,7 @@ TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -101,7 +98,7 @@ TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -117,7 +114,7 @@ TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_MoreThanOneEntry) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h
index 68f33c6b018..2a7911bfefe 100644
--- a/src/mongo/s/chunk_version.h
+++ b/src/mongo/s/chunk_version.h
@@ -124,10 +124,6 @@ public:
ChunkVersion() : ChunkVersion({OID(), Timestamp()}, {0, 0}) {}
- // TODO: Do not add any new usages of this constructor. Use the one above instead.
- ChunkVersion(uint32_t major, uint32_t minor, OID epoch, Timestamp timestamp)
- : CollectionGeneration(epoch, timestamp), CollectionPlacement(major, minor) {}
-
/**
* Indicates that the collection is not sharded.
*/
diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp
index f735bfd7ef0..7693ff55c85 100644
--- a/src/mongo/s/chunk_version_test.cpp
+++ b/src/mongo/s/chunk_version_test.cpp
@@ -40,13 +40,15 @@ TEST(ChunkVersionTest, EqualityOperators) {
OID epoch = OID::gen();
Timestamp timestamp = Timestamp(1);
- ASSERT_EQ(ChunkVersion(3, 1, epoch, Timestamp(1, 1)),
- ChunkVersion(3, 1, epoch, Timestamp(1, 1)));
- ASSERT_EQ(ChunkVersion(3, 1, OID(), timestamp), ChunkVersion(3, 1, OID(), timestamp));
-
- ASSERT_NE(ChunkVersion(3, 1, epoch, timestamp), ChunkVersion(3, 1, OID(), Timestamp(1, 1)));
- ASSERT_NE(ChunkVersion(3, 1, OID(), Timestamp(1, 1)), ChunkVersion(3, 1, epoch, timestamp));
- ASSERT_NE(ChunkVersion(4, 2, epoch, timestamp), ChunkVersion(4, 1, epoch, timestamp));
+ ASSERT_EQ(ChunkVersion({epoch, Timestamp(1, 1)}, {3, 1}),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {3, 1}));
+ ASSERT_EQ(ChunkVersion({OID(), timestamp}, {3, 1}), ChunkVersion({OID(), timestamp}, {3, 1}));
+
+ ASSERT_NE(ChunkVersion({epoch, timestamp}, {3, 1}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {3, 1}));
+ ASSERT_NE(ChunkVersion({OID(), Timestamp(1, 1)}, {3, 1}),
+ ChunkVersion({epoch, timestamp}, {3, 1}));
+ ASSERT_NE(ChunkVersion({epoch, timestamp}, {4, 2}), ChunkVersion({epoch, timestamp}, {4, 1}));
}
TEST(ChunkVersionTest, OlderThan) {
@@ -54,19 +56,23 @@ TEST(ChunkVersionTest, OlderThan) {
Timestamp timestamp(1);
Timestamp newerTimestamp(2);
- ASSERT(ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion(4, 1, epoch, timestamp)));
- ASSERT(!ChunkVersion(4, 1, epoch, timestamp).isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {4, 1})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {4, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
- ASSERT(ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion(3, 2, epoch, timestamp)));
- ASSERT(!ChunkVersion(3, 2, epoch, timestamp).isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 2})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {3, 2})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
- ASSERT(ChunkVersion(3, 1, epoch, timestamp)
- .isOlderThan(ChunkVersion(3, 1, OID::gen(), newerTimestamp)));
- ASSERT(!ChunkVersion(3, 1, epoch, newerTimestamp)
- .isOlderThan(ChunkVersion(3, 1, OID::gen(), timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({OID::gen(), newerTimestamp}, {3, 1})));
+ ASSERT(!ChunkVersion({epoch, newerTimestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({OID::gen(), timestamp}, {3, 1})));
- ASSERT(!ChunkVersion::UNSHARDED().isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
- ASSERT(!ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion::UNSHARDED()));
+ ASSERT(!ChunkVersion::UNSHARDED().isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {3, 1}).isOlderThan(ChunkVersion::UNSHARDED()));
}
TEST(ChunkVersionTest, CreateWithLargeValues) {
@@ -74,7 +80,7 @@ TEST(ChunkVersionTest, CreateWithLargeValues) {
const uint32_t minorVersion = std::numeric_limits<uint32_t>::max();
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -86,7 +92,7 @@ TEST(ChunkVersionTest, ThrowsErrorIfOverflowIsAttemptedForMajorVersion) {
const uint32_t minorVersion = 0;
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -99,7 +105,7 @@ TEST(ChunkVersionTest, ThrowsErrorIfOverflowIsAttemptedForMinorVersion) {
const uint32_t minorVersion = std::numeric_limits<uint32_t>::max();
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
diff --git a/src/mongo/s/commands/cluster_commands.idl b/src/mongo/s/commands/cluster_commands.idl
index 26d5ad8760c..bc5edc8c7a4 100644
--- a/src/mongo/s/commands/cluster_commands.idl
+++ b/src/mongo/s/commands/cluster_commands.idl
@@ -107,3 +107,52 @@ commands:
description: "The shard key value that is within a chunk's boundaries.
Cannot be used on collections with hashed shard keys."
optional: true
+
+ moveChunk:
+ description : "The public moveChunk command on mongos."
+ command_name : moveChunk
+ command_alias: movechunk
+ cpp_name: ClusterMoveChunkRequest
+ strict: false
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ fields:
+ bounds:
+ type: array<object>
+ description: "The bounds of a specific chunk to move. The array must consist of two documents that specify the lower and upper shard key values of a chunk to move. Specify either the bounds field or the find field but not both."
+ optional: true
+ find:
+ type: object
+ description: "An equality match on the shard key that specifies the shard-key value of the chunk to move. Specify either the bounds field or the find field but not both."
+ optional: true
+ to:
+ type: string
+ description: "The name of the destination shard for the chunk."
+
+ forceJumbo:
+ type: bool
+ description: "Specifies whether or not forcing jumbo chunks move"
+ default: false
+
+ writeConcern:
+ type: object_owned
+ description: "A document that expresses the write concern that the _secondaryThrottle will use to wait for secondaries during the chunk migration."
+ default: BSONObj()
+
+ # Secondary throttle can be specified by passing one of the following 2 parameters
+ secondaryThrottle:
+ type: optionalBool
+ description: "Secondary throttle policy to adopt during the migration"
+ _secondaryThrottle:
+ type: optionalBool
+ description: "Secondary throttle policy to adopt during the migration"
+
+ # Wait for delete can be specified with one of the following 2 parameters
+ waitForDelete:
+ type: optionalBool
+ description: "Internal option for testing purposes. The default is false. If set to true, the delete phase of a moveChunk operation blocks."
+ _waitForDelete:
+ type: optionalBool
+ description: "Internal option for testing purposes. The default is false. If set to true, the delete phase of a moveChunk operation blocks."
+
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index b1149c67c5c..9ab1d5a45ae 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -42,6 +42,7 @@
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/commands/cluster_commands_gen.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
#include "mongo/s/request_types/move_range_request_gen.h"
@@ -53,20 +54,22 @@
namespace mongo {
namespace {
-class MoveChunkCmd : public ErrmsgCommandDeprecated {
+class MoveChunkCmd final : public TypedCommand<MoveChunkCmd> {
public:
- MoveChunkCmd() : ErrmsgCommandDeprecated("moveChunk", "movechunk") {}
+ MoveChunkCmd()
+ : TypedCommand(ClusterMoveChunkRequest::kCommandName,
+ ClusterMoveChunkRequest::kCommandAlias) {}
+
+ using Request = ClusterMoveChunkRequest;
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kAlways;
}
+
bool adminOnly() const override {
return true;
}
- bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
std::string help() const override {
return "Example: move chunk that contains the doc {num : 7} to shard001\n"
@@ -76,148 +79,150 @@ public:
" , to : 'shard001' }\n";
}
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
- ActionType::moveChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- return Status::OK();
- }
+ class Invocation : public MinimalInvocationBase {
+ public:
+ using MinimalInvocationBase::MinimalInvocationBase;
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
+ private:
+ bool supportsWriteConcern() const override {
+ return true;
+ }
- bool errmsgRun(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- Timer t;
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
-
- const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
-
- const auto toElt = cmdObj["to"];
- uassert(ErrorCodes::TypeMismatch,
- "'to' must be of type String",
- toElt.type() == BSONType::String);
- const std::string toString = toElt.str();
- if (!toString.size()) {
- errmsg = "you have to specify where you want to move the chunk";
- return false;
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()),
+ ActionType::moveChunk));
}
- const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
- if (!toStatus.isOK()) {
- LOGV2_OPTIONS(22755,
- {logv2::UserAssertAfterLog(ErrorCodes::ShardNotFound)},
- "Could not move chunk in {namespace} to {toShardId} because that shard"
- " does not exist",
- "moveChunk destination shard does not exist",
- "toShardId"_attr = toString,
- "namespace"_attr = nss.ns());
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
}
- const auto to = toStatus.getValue();
- const auto forceJumboElt = cmdObj["forceJumbo"];
- const auto forceJumbo = forceJumboElt && forceJumboElt.Bool();
+ void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) {
- BSONObj find = cmdObj.getObjectField("find");
- BSONObj bounds = cmdObj.getObjectField("bounds");
+ Timer t;
+ const auto chunkManager = uassertStatusOK(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ ns()));
- // check that only one of the two chunk specification methods is used
- if (find.isEmpty() == bounds.isEmpty()) {
- errmsg = "need to specify either a find query, or both lower and upper bounds.";
- return false;
- }
+ uassert(ErrorCodes::InvalidOptions,
+ "bounds can only have exactly 2 elements",
+ !request().getBounds() || request().getBounds()->size() == 2);
- boost::optional<Chunk> chunk;
+ uassert(ErrorCodes::InvalidOptions,
+ "cannot specify bounds and query at the same time",
+ !(request().getFind() && request().getBounds()));
- if (!find.isEmpty()) {
- // find
- BSONObj shardKey =
- uassertStatusOK(cm.getShardKeyPattern().extractShardKeyFromQuery(opCtx, nss, find));
- if (shardKey.isEmpty()) {
- errmsg = str::stream() << "no shard key found in chunk query " << find;
- return false;
- }
+ uassert(ErrorCodes::InvalidOptions,
+ "need to specify query or bounds",
+ request().getFind() || request().getBounds());
- chunk.emplace(cm.findIntersectingChunkWithSimpleCollation(shardKey));
- } else {
- // bounds
- if (!cm.getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
- !cm.getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream()
- << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern " << cm.getShardKeyPattern().toBSON();
- return false;
+
+ std::string destination = request().getTo().toString();
+ const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, destination);
+
+ if (!toStatus.isOK()) {
+ LOGV2_OPTIONS(
+ 22755,
+ {logv2::UserAssertAfterLog(ErrorCodes::ShardNotFound)},
+ "Could not move chunk in {namespace} to {toShardId} because that shard"
+ " does not exist",
+ "moveChunk destination shard does not exist",
+ "toShardId"_attr = destination,
+ "namespace"_attr = ns());
}
- BSONObj minKey = cm.getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = cm.getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- chunk.emplace(cm.findIntersectingChunkWithSimpleCollation(minKey));
+ const auto to = toStatus.getValue();
+
+ auto find = request().getFind();
+ auto bounds = request().getBounds();
- if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
- errmsg = str::stream() << "no chunk found with the shard key bounds "
- << ChunkRange(minKey, maxKey).toString();
- return false;
+
+ boost::optional<Chunk> chunk;
+
+ if (find) {
+ // find
+ BSONObj shardKey = uassertStatusOK(
+ chunkManager.getShardKeyPattern().extractShardKeyFromQuery(opCtx, ns(), *find));
+
+ uassert(656450,
+ str::stream() << "no shard key found in chunk query " << *find,
+ !shardKey.isEmpty());
+
+ chunk.emplace(chunkManager.findIntersectingChunkWithSimpleCollation(shardKey));
+ } else {
+
+ auto minBound = bounds->front();
+ auto maxBound = bounds->back();
+ uassert(656451,
+ str::stream() << "shard key bounds "
+ << "[" << minBound << "," << maxBound << ")"
+ << " are not valid for shard key pattern "
+ << chunkManager.getShardKeyPattern().toBSON(),
+ chunkManager.getShardKeyPattern().isShardKey(minBound) &&
+ chunkManager.getShardKeyPattern().isShardKey(maxBound));
+
+ BSONObj minKey = chunkManager.getShardKeyPattern().normalizeShardKey(minBound);
+ BSONObj maxKey = chunkManager.getShardKeyPattern().normalizeShardKey(maxBound);
+
+ chunk.emplace(chunkManager.findIntersectingChunkWithSimpleCollation(minKey));
+ uassert(656452,
+ str::stream() << "no chunk found with the shard key bounds "
+ << ChunkRange(minKey, maxKey).toString(),
+ chunk->getMin().woCompare(minKey) == 0 &&
+ chunk->getMax().woCompare(maxKey) == 0);
}
- }
- const auto secondaryThrottle =
- uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
- const bool waitForDelete =
- cmdObj["_waitForDelete"].trueValue() || cmdObj["waitForDelete"].trueValue();
+ MoveRangeRequestBase moveRangeReq;
+ moveRangeReq.setToShard(to->getId());
+ moveRangeReq.setMin(chunk->getMin());
+ moveRangeReq.setMax(chunk->getMax());
+ moveRangeReq.setWaitForDelete(request().getWaitForDelete().value_or(false) ||
+ request().get_waitForDelete().value_or(false));
+
+
+ ConfigsvrMoveRange configsvrRequest(ns());
+ configsvrRequest.setDbName(NamespaceString::kAdminDb);
+ configsvrRequest.setMoveRangeRequestBase(moveRangeReq);
- MoveRangeRequestBase moveRangeReq;
- moveRangeReq.setToShard(to->getId());
- moveRangeReq.setMin(chunk->getMin());
- moveRangeReq.setMax(chunk->getMax());
- moveRangeReq.setWaitForDelete(waitForDelete);
+ const auto secondaryThrottle = uassertStatusOK(
+ MigrationSecondaryThrottleOptions::createFromCommand(request().toBSON({})));
- ConfigsvrMoveRange configsvrRequest(nss);
- configsvrRequest.setDbName(NamespaceString::kAdminDb);
- configsvrRequest.setMoveRangeRequestBase(moveRangeReq);
- configsvrRequest.setForceJumbo(forceJumbo ? ForceJumbo::kForceManual
- : ForceJumbo::kDoNotForce);
- if (secondaryThrottle.getSecondaryThrottle() == MigrationSecondaryThrottleOptions::kOn) {
configsvrRequest.setSecondaryThrottle(secondaryThrottle);
+
+ configsvrRequest.setForceJumbo(request().getForceJumbo() ? ForceJumbo::kForceManual
+ : ForceJumbo::kDoNotForce);
+
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto commandResponse = configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ NamespaceString::kAdminDb.toString(),
+ CommandHelpers::appendMajorityWriteConcern(configsvrRequest.toBSON({})),
+ Shard::RetryPolicy::kIdempotent);
+ uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(commandResponse)));
+
+ Grid::get(opCtx)
+ ->catalogCache()
+ ->invalidateShardOrEntireCollectionEntryForShardedCollection(
+ ns(), boost::none, chunk->getShardId());
+ Grid::get(opCtx)
+ ->catalogCache()
+ ->invalidateShardOrEntireCollectionEntryForShardedCollection(
+ ns(), boost::none, to->getId());
+
+ BSONObjBuilder resultbson;
+ resultbson.append("millis", t.millis());
+ result->getBodyBuilder().appendElements(resultbson.obj());
}
+ };
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto commandResponse = configShard->runCommand(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- NamespaceString::kAdminDb.toString(),
- CommandHelpers::appendMajorityWriteConcern(configsvrRequest.toBSON({})),
- Shard::RetryPolicy::kIdempotent);
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(commandResponse)));
-
- Grid::get(opCtx)
- ->catalogCache()
- ->invalidateShardOrEntireCollectionEntryForShardedCollection(
- nss, boost::none, chunk->getShardId());
- Grid::get(opCtx)
- ->catalogCache()
- ->invalidateShardOrEntireCollectionEntryForShardedCollection(
- nss, boost::none, to->getId());
-
- result.append("millis", t.millis());
- return true;
- }
-} moveChunk;
+} clusterMoveChunk;
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/comparable_chunk_version_test.cpp b/src/mongo/s/comparable_chunk_version_test.cpp
index a5d47981709..63f6ca4a59c 100644
--- a/src/mongo/s/comparable_chunk_version_test.cpp
+++ b/src/mongo/s/comparable_chunk_version_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/chunk_manager.h"
#include "mongo/unittest/unittest.h"
@@ -38,15 +36,15 @@ namespace {
TEST(ComparableChunkVersionTest, VersionsEqual) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(version1 == version2);
}
TEST(ComparableChunkVersionTest, VersionsEqualAfterCopy) {
- const ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const auto version2 = version1;
ASSERT(version1 == version2);
@@ -54,8 +52,8 @@ TEST(ComparableChunkVersionTest, VersionsEqualAfterCopy) {
TEST(ComparableChunkVersionTest, CompareDifferentTimestamps) {
- const ChunkVersion v1(2, 0, OID::gen(), Timestamp(1));
- const ChunkVersion v2(1, 0, OID::gen(), Timestamp(2));
+ const ChunkVersion v1({OID::gen(), Timestamp(1)}, {2, 0});
+ const ChunkVersion v2({OID::gen(), Timestamp(2)}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(version2 != version1);
@@ -65,9 +63,9 @@ TEST(ComparableChunkVersionTest, CompareDifferentTimestamps) {
TEST(ComparableChunkVersionTest, CompareDifferentVersionsTimestampsIgnoreSequenceNumber) {
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
- ChunkVersion(2, 0, OID::gen(), Timestamp(2)));
+ ChunkVersion({OID::gen(), Timestamp(2)}, {2, 0}));
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
- ChunkVersion(2, 0, OID::gen(), Timestamp(1)));
+ ChunkVersion({OID::gen(), Timestamp(1)}, {2, 0}));
ASSERT(version1 != version2);
ASSERT(version1 > version2);
ASSERT_FALSE(version1 < version2);
@@ -76,9 +74,9 @@ TEST(ComparableChunkVersionTest, CompareDifferentVersionsTimestampsIgnoreSequenc
TEST(ComparableChunkVersionTest, VersionGreaterSameTimestamps) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 2, epoch, timestamp);
- const ChunkVersion v3(2, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 2});
+ const ChunkVersion v3({epoch, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -93,9 +91,9 @@ TEST(ComparableChunkVersionTest, VersionGreaterSameTimestamps) {
TEST(ComparableChunkVersionTest, VersionLessSameTimestamps) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 2, epoch, timestamp);
- const ChunkVersion v3(2, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 2});
+ const ChunkVersion v3({epoch, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -115,7 +113,7 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionsAreEqual) {
}
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithChunksVersion) {
- const ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const ComparableChunkVersion defaultVersion{};
const auto withChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
ASSERT(defaultVersion != withChunksVersion);
@@ -124,7 +122,7 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithCh
}
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanNoChunksVersion) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0});
const ComparableChunkVersion defaultVersion{};
const auto noChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
ASSERT(defaultVersion != noChunksVersion);
@@ -143,8 +141,8 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanUnshar
TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
const auto oid = OID::gen();
- const ChunkVersion v1(0, 0, oid, Timestamp(1, 1));
- const ChunkVersion v2(0, 0, oid, Timestamp(1, 1));
+ const ChunkVersion v1({oid, Timestamp(1, 1)}, {0, 0});
+ const ChunkVersion v2({oid, Timestamp(1, 1)}, {0, 0});
const auto noChunksVersion1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto noChunksVersion2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(noChunksVersion1 == noChunksVersion2);
@@ -155,9 +153,9 @@ TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
TEST(ComparableChunkVersionTest, NoChunksComparedBySequenceNum) {
const auto oid = OID::gen();
const Timestamp timestamp(1);
- const ChunkVersion v1(1, 0, oid, timestamp);
- const ChunkVersion v2(0, 0, oid, timestamp);
- const ChunkVersion v3(2, 0, oid, timestamp);
+ const ChunkVersion v1({oid, timestamp}, {1, 0});
+ const ChunkVersion v2({oid, timestamp}, {0, 0});
+ const ChunkVersion v3({oid, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto noChunksVersion2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -168,7 +166,7 @@ TEST(ComparableChunkVersionTest, NoChunksComparedBySequenceNum) {
}
TEST(ComparableChunkVersionTest, NoChunksGreaterThanUnshardedBySequenceNum) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
@@ -177,7 +175,7 @@ TEST(ComparableChunkVersionTest, NoChunksGreaterThanUnshardedBySequenceNum) {
}
TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
@@ -186,7 +184,7 @@ TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
}
TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const ComparableChunkVersion defaultVersion{};
ASSERT(noChunkSV != defaultVersion);
@@ -194,7 +192,7 @@ TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
}
TEST(ComparableChunkVersionTest, CompareForcedRefreshVersionVersusValidChunkVersion) {
- const ChunkVersion chunkVersion(100, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {100, 0});
const ComparableChunkVersion defaultVersionBeforeForce;
const auto versionBeforeForce =
ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
diff --git a/src/mongo/s/query/sharded_agg_test_fixture.h b/src/mongo/s/query/sharded_agg_test_fixture.h
index d5c02d84b3c..f36ae36eabb 100644
--- a/src/mongo/s/query/sharded_agg_test_fixture.h
+++ b/src/mongo/s/query/sharded_agg_test_fixture.h
@@ -80,7 +80,7 @@ public:
const OID epoch,
const Timestamp timestamp,
std::vector<std::pair<ChunkRange, ShardId>> chunkInfos) {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
std::vector<ChunkType> chunks;
for (auto&& pair : chunkInfos) {
chunks.emplace_back(uuid, pair.first, version, pair.second);
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
deleted file mode 100644
index 38254acf008..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-
-using unittest::assertGet;
-
-namespace {
-
-const auto kNamespaceString = NamespaceString("TestDB", "TestColl");
-
-const auto kShardId0 = ShardId("shard0");
-const auto kShardId1 = ShardId("shard1");
-
-const auto kKey0 = BSON("Key" << -100);
-const auto kKey1 = BSON("Key" << 100);
-const auto kKey2 = BSON("Key" << -50);
-const auto kKey3 = BSON("Key" << 50);
-
-const char kConfigSvrCommitChunkMigration[] = "_configsvrCommitChunkMigration";
-
-TEST(CommitChunkMigrationRequest, WithoutControlChunk) {
- BSONObjBuilder builder;
-
- ChunkType migratedChunk;
- migratedChunk.setCollectionUUID(UUID::gen());
- migratedChunk.setMin(kKey0);
- migratedChunk.setMax(kKey1);
- migratedChunk.setVersion({12, 7, OID::gen(), Timestamp(1, 1)});
-
- ChunkVersion fromShardCollectionVersion(1, 2, OID::gen(), Timestamp(1, 1));
-
- Timestamp validAfter{1};
-
- CommitChunkMigrationRequest::appendAsCommand(&builder,
- kNamespaceString,
- kShardId0,
- kShardId1,
- migratedChunk,
- fromShardCollectionVersion,
- validAfter);
-
- BSONObj cmdObj = builder.obj();
-
- auto request = assertGet(CommitChunkMigrationRequest::createFromCommand(
- NamespaceString(cmdObj[kConfigSvrCommitChunkMigration].String()), cmdObj));
-
- ASSERT_EQ(kNamespaceString, request.getNss());
- ASSERT_EQ(kShardId0, request.getFromShard());
- ASSERT_EQ(kShardId1, request.getToShard());
- ASSERT_BSONOBJ_EQ(kKey0, request.getMigratedChunk().getMin());
- ASSERT_BSONOBJ_EQ(kKey1, request.getMigratedChunk().getMax());
- ASSERT_TRUE(request.getMigratedChunk().isVersionSet() &&
- request.getMigratedChunk().getVersion().isSet() &&
- request.getMigratedChunk().getVersion().epoch().isSet());
- ASSERT_EQ(fromShardCollectionVersion.epoch(), request.getCollectionEpoch());
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
deleted file mode 100644
index 15cb397a2af..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
-
-#include "mongo/bson/util/bson_extract.h"
-
-namespace mongo {
-namespace {
-
-const char kConfigSvrCommitChunkMigration[] = "_configsvrCommitChunkMigration";
-const char kFromShard[] = "fromShard";
-const char kToShard[] = "toShard";
-const char kMigratedChunk[] = "migratedChunk";
-const char kFromShardCollectionVersion[] = "fromShardCollectionVersion";
-const char kValidAfter[] = "validAfter";
-
-/**
- * Attempts to parse a (range-only!) ChunkType from "field" in "source".
- */
-StatusWith<ChunkType> extractChunk(const BSONObj& source, StringData field) {
- BSONElement fieldElement;
- auto status = bsonExtractTypedField(source, field, BSONType::Object, &fieldElement);
- if (!status.isOK())
- return status;
-
- const auto fieldObj = fieldElement.Obj();
-
- auto rangeWith = ChunkRange::fromBSON(fieldObj);
- if (!rangeWith.isOK())
- return rangeWith.getStatus();
-
- ChunkVersion version;
- try {
- version = ChunkVersion::parse(fieldObj[ChunkType::lastmod()]);
- uassert(644490, "Version must be set", version.isSet());
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-
- ChunkType chunk;
- chunk.setMin(rangeWith.getValue().getMin());
- chunk.setMax(rangeWith.getValue().getMax());
- chunk.setVersion(version);
- return chunk;
-}
-
-/**
- * Attempts to parse a ShardId from "field" in "source".
- */
-StatusWith<ShardId> extractShardId(const BSONObj& source, StringData field) {
- std::string stringResult;
-
- auto status = bsonExtractStringField(source, field, &stringResult);
- if (!status.isOK()) {
- return status;
- }
-
- if (stringResult.empty()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "The field '" + field.toString() + "' cannot be empty");
- }
-
- return ShardId(stringResult);
-}
-
-} // namespace
-
-StatusWith<CommitChunkMigrationRequest> CommitChunkMigrationRequest::createFromCommand(
- const NamespaceString& nss, const BSONObj& obj) {
-
- auto migratedChunk = extractChunk(obj, kMigratedChunk);
- if (!migratedChunk.isOK()) {
- return migratedChunk.getStatus();
- }
-
- CommitChunkMigrationRequest request(nss, std::move(migratedChunk.getValue()));
-
- {
- auto fromShard = extractShardId(obj, kFromShard);
- if (!fromShard.isOK()) {
- return fromShard.getStatus();
- }
-
- request._fromShard = std::move(fromShard.getValue());
- }
-
- {
- auto toShard = extractShardId(obj, kToShard);
- if (!toShard.isOK()) {
- return toShard.getStatus();
- }
-
- request._toShard = std::move(toShard.getValue());
- }
-
- try {
- auto fromShardVersion = ChunkVersion::parse(obj[kFromShardCollectionVersion]);
- request._collectionEpoch = fromShardVersion.epoch();
- request._collectionTimestamp = fromShardVersion.getTimestamp();
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-
- {
- Timestamp validAfter;
- auto status = bsonExtractTimestampField(obj, kValidAfter, &validAfter);
- if (!status.isOK() && status != ErrorCodes::NoSuchKey) {
- return status;
- }
-
- if (status.isOK()) {
- request._validAfter = validAfter;
- } else {
- request._validAfter = boost::none;
- }
- }
-
- return request;
-}
-
-void CommitChunkMigrationRequest::appendAsCommand(BSONObjBuilder* builder,
- const NamespaceString& nss,
- const ShardId& fromShard,
- const ShardId& toShard,
- const ChunkType& migratedChunk,
- const ChunkVersion& fromShardCollectionVersion,
- const Timestamp& validAfter) {
- invariant(builder->asTempObj().isEmpty());
- invariant(nss.isValid());
-
- builder->append(kConfigSvrCommitChunkMigration, nss.ns());
- builder->append(kFromShard, fromShard.toString());
- builder->append(kToShard, toShard.toString());
- {
- BSONObjBuilder migrateChunk(builder->subobjStart(kMigratedChunk));
- migratedChunk.getRange().append(&migrateChunk);
- migratedChunk.getVersion().serializeToBSON(ChunkType::lastmod(), &migrateChunk);
- }
- fromShardCollectionVersion.serializeToBSON(kFromShardCollectionVersion, builder);
- builder->append(kValidAfter, validAfter);
-}
-
-} // namespace mongo
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_type.h b/src/mongo/s/request_types/commit_chunk_migration_request_type.h
deleted file mode 100644
index 16d5f0ef8ce..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_type.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <string>
-
-#include "mongo/db/namespace_string.h"
-#include "mongo/s/catalog/type_chunk.h"
-
-namespace mongo {
-
-/**
- * Creates and parses commit chunk migration command BSON objects.
- */
-class CommitChunkMigrationRequest {
-public:
- CommitChunkMigrationRequest(const NamespaceString& nss, const ChunkType& chunk)
- : _nss(nss), _migratedChunk(chunk) {}
-
- /**
- * Parses the input command and produces a request corresponding to its arguments.
- */
- static StatusWith<CommitChunkMigrationRequest> createFromCommand(const NamespaceString& nss,
- const BSONObj& obj);
-
- /**
- * Constructs a commitChunkMigration command with the specified parameters and writes it to
- * the builder, without closing the builder. The builder must be empty, but callers are free
- * to append more fields once the command has been constructed.
- */
- static void appendAsCommand(BSONObjBuilder* builder,
- const NamespaceString& nss,
- const ShardId& fromShard,
- const ShardId& toShard,
- const ChunkType& migratedChunkType,
- const ChunkVersion& fromShardChunkVersion,
- const Timestamp& validAfter);
-
- const NamespaceString& getNss() const {
- return _nss;
- }
- const ShardId& getFromShard() const {
- return _fromShard;
- }
- const ShardId& getToShard() const {
- return _toShard;
- }
- const ChunkType& getMigratedChunk() const {
- return _migratedChunk;
- }
- const OID& getCollectionEpoch() {
- return _collectionEpoch;
- }
- const Timestamp& getCollectionTimestamp() {
- return _collectionTimestamp;
- }
- const boost::optional<Timestamp>& getValidAfter() {
- return _validAfter;
- }
-
-private:
- // The collection for which this request applies.
- NamespaceString _nss;
-
- // The source shard name.
- ShardId _fromShard;
-
- // The recipient shard name.
- ShardId _toShard;
-
- // The chunk being moved.
- ChunkType _migratedChunk;
-
- // Epoch/Timestamp of the collection, matches the ones set in `_migratedChunk`.
- OID _collectionEpoch;
- Timestamp _collectionTimestamp;
-
- // The time of the move
- boost::optional<Timestamp> _validAfter;
-};
-
-} // namespace mongo
diff --git a/src/mongo/s/request_types/move_chunk_request_test.cpp b/src/mongo/s/request_types/move_chunk_request_test.cpp
index e8020086d35..688d117b5c2 100644
--- a/src/mongo/s/request_types/move_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/move_chunk_request_test.cpp
@@ -49,7 +49,7 @@ const int kMaxChunkSizeBytes = 1024;
const bool kWaitForDelete = true;
TEST(MoveChunkRequest, Roundtrip) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -81,7 +81,7 @@ TEST(MoveChunkRequest, Roundtrip) {
}
TEST(MoveChunkRequest, EqualityOperatorSameValue) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -106,7 +106,7 @@ TEST(MoveChunkRequest, EqualityOperatorSameValue) {
}
TEST(MoveChunkRequest, EqualityOperatorDifferentValues) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder1;
MoveChunkRequest::appendAsCommand(
diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp
index 9651911ee64..7c8973a7237 100644
--- a/src/mongo/s/routing_table_history_test.cpp
+++ b/src/mongo/s/routing_table_history_test.cpp
@@ -154,7 +154,7 @@ public:
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
const Timestamp timestamp(1);
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
auto initChunk =
ChunkType{uuid,
@@ -332,7 +332,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
const Timestamp timestamp(1);
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
auto chunkAll =
ChunkType{uuid,
@@ -356,35 +356,35 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
std::vector<ChunkType> chunks1 = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 =
rt.makeUpdated(boost::none /* timeseriesFields */, boost::none, boost::none, true, chunks1);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
std::vector<ChunkType> chunks2 = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -1)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << -1), BSON("a" << 0)},
- ChunkVersion{3, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 2}),
kThisShard}};
auto rt2 = rt1.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, chunks2);
- auto v2 = ChunkVersion{3, 2, epoch, timestamp};
+ auto v2 = ChunkVersion({epoch, timestamp}, {3, 2});
ASSERT_EQ(v2, rt2.getVersion(kThisShard));
}
@@ -396,7 +396,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -416,16 +416,16 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -451,7 +451,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -471,20 +471,20 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -497,11 +497,11 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -521,21 +521,21 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 0, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 0}));
ASSERT_EQ(chunk1->getMin().woCompare(BSON("a" << 0)), 0);
ASSERT_EQ(chunk1->getMax().woCompare(getShardKeyPattern().globalMax()), 0);
}
@@ -548,15 +548,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -572,21 +572,21 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2}));
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -599,15 +599,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << -10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -500)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -623,26 +623,26 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2}));
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -10)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << -500));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 1, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 1}));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << -10)), 0);
}
@@ -655,27 +655,27 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), BSON("a" << 20)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 20), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{4, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {4, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{4, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {4, 1}),
kThisShard},
};
@@ -692,10 +692,10 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(4, 1, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {4, 1}));
auto chunk1 = rt.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(4, 0, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {4, 0}));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << 10)), 0);
}
diff --git a/src/mongo/s/s_sharding_server_status.cpp b/src/mongo/s/s_sharding_server_status.cpp
index a1515a609f4..791d40aa0fb 100644
--- a/src/mongo/s/s_sharding_server_status.cpp
+++ b/src/mongo/s/s_sharding_server_status.cpp
@@ -60,14 +60,20 @@ public:
result.append("configsvrConnectionString",
shardRegistry->getConfigServerConnectionString().toString());
+ const auto vcTime = VectorClock::get(opCtx)->getTime();
+
const auto configOpTime = [&]() {
- const auto vcTime = VectorClock::get(opCtx)->getTime();
const auto vcConfigTimeTs = vcTime.configTime().asTimestamp();
return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm);
}();
-
configOpTime.append(&result, "lastSeenConfigServerOpTime");
+ const auto topologyOpTime = [&]() {
+ const auto vcTopologyTimeTs = vcTime.topologyTime().asTimestamp();
+ return mongo::repl::OpTime(vcTopologyTimeTs, mongo::repl::OpTime::kUninitializedTerm);
+ }();
+ topologyOpTime.append(&result, "lastSeenTopologyOpTime");
+
const long long maxChunkSizeInBytes =
grid->getBalancerConfiguration()->getMaxChunkSizeBytes();
result.append("maxChunkSizeInBytes", maxChunkSizeInBytes);
diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp
index 89a7c0d9d11..0acedd12eae 100644
--- a/src/mongo/s/stale_shard_version_helpers_test.cpp
+++ b/src/mongo/s/stale_shard_version_helpers_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/logv2/log.h"
#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/stale_shard_version_helpers.h"
@@ -38,7 +35,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -98,8 +94,8 @@ TEST_F(AsyncShardVersionRetry, LimitedStaleErrorsShouldReturnCorrectValue) {
service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) {
if (++tries < 5) {
uassert(StaleConfigInfo(nss(),
- ChunkVersion(5, 23, OID::gen(), {}),
- ChunkVersion(6, 99, OID::gen(), {}),
+ ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23}),
+ ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99}),
ShardId("sB")),
"testX",
false);
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index a0ec8867628..aba9c8367c2 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
@@ -94,8 +92,8 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss,
staleResponse.addToErrDetails(
write_ops::WriteError(i,
Status(StaleConfigInfo(nss,
- ChunkVersion(1, 0, epoch, timestamp),
- ChunkVersion(2, 0, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ ChunkVersion({epoch, timestamp}, {2, 0}),
ShardId(kShardName1)),
"Stale error")));
++i;
@@ -335,7 +333,7 @@ public:
MockNSTargeter singleShardNSTargeter{
nss,
{MockRange(ShardEndpoint(kShardName1,
- ChunkVersion(100, 200, OID::gen(), Timestamp(1, 1)),
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {100, 200}),
boost::none),
BSON("x" << MINKEY),
BSON("x" << MAXKEY))}};
@@ -406,19 +404,19 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
- return std::vector{
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ return std::vector{ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -493,18 +491,20 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) {
std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, Timestamp(1, 1)), boost::none)};
+ kShardName2, ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(
- kShardName1, ChunkVersion(100, 200, epoch, Timestamp(1, 1)), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, Timestamp(1, 1)), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -685,19 +685,21 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -726,13 +728,13 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -783,19 +785,21 @@ TEST_F(BatchWriteExecTest,
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -824,20 +828,20 @@ TEST_F(BatchWriteExecTest,
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -887,19 +891,21 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -918,13 +924,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -934,13 +940,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1001,19 +1007,21 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1032,13 +1040,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1048,13 +1056,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1112,12 +1120,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
if (targetAll) {
return std::vector{
ShardEndpoint(
- kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
} else {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
}
@@ -1127,11 +1135,11 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1151,13 +1159,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
response.setStatus(Status::OK());
response.setNModified(0);
response.setN(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
// This simulates a migration of the last chunk on shard 1 to shard 2, which means that
// future rounds on the batchExecutor should only target shard 2
@@ -1874,19 +1882,21 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -2010,19 +2020,21 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -2154,19 +2166,21 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index baa0c786e2a..0ba795e44a5 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -27,10 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/bson/json.h"
#include "mongo/db/ops/write_ops_parsers_test_helpers.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -76,7 +72,7 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns());
ASSERT(insertRequest.hasShardVersion());
- ASSERT_EQ(ChunkVersion(1, 2, epoch, timestamp).toString(),
+ ASSERT_EQ(ChunkVersion({epoch, timestamp}, {1, 2}).toString(),
insertRequest.getShardVersion().toString());
}
}
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index f17637ade04..4d7acf32c22 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -67,45 +67,12 @@ TEST(BatchedCommandResponseTest, Basic) {
ASSERT_BSONOBJ_EQ(origResponseObj, genResponseObj);
}
-// TODO (SERVER-64449): Get rid of this entire test case
-TEST(BatchedCommandResponseTest, StaleErrorAsStaleShardVersionCompatibility) {
+TEST(BatchedCommandResponseTest, StaleConfigInfo) {
OID epoch = OID::gen();
StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ChunkVersion(1, 0, epoch, Timestamp(100, 0)),
- ChunkVersion(2, 0, epoch, Timestamp(100, 0)),
- ShardId("TestShard"));
- BSONObjBuilder builder;
- staleInfo.serialize(&builder);
-
- BSONArray writeErrorsArray(
- BSON_ARRAY(BSON("index" << 0 << "code" << ErrorCodes::OBSOLETE_StaleShardVersion << "errmsg"
- << "OBSOLETE_StaleShardVersion error"
- << "errInfo" << builder.obj())
- << BSON("index" << 1 << "code" << ErrorCodes::InvalidNamespace << "errmsg"
- << "index 1 failed too")));
-
- BSONObj origResponseObj =
- BSON("n" << 0 << "opTime" << mongo::Timestamp(1ULL) << "writeErrors" << writeErrorsArray
- << "retriedStmtIds" << BSON_ARRAY(1 << 3) << "ok" << 1.0);
-
- std::string errMsg;
- BatchedCommandResponse response;
- ASSERT_TRUE(response.parseBSON(origResponseObj, &errMsg));
- ASSERT_EQ(0, response.getErrDetailsAt(0).getIndex());
- ASSERT_EQ(ErrorCodes::StaleConfig, response.getErrDetailsAt(0).getStatus().code());
- auto extraInfo = response.getErrDetailsAt(0).getStatus().extraInfo<StaleConfigInfo>();
- ASSERT_EQ(staleInfo.getVersionReceived(), extraInfo->getVersionReceived());
- ASSERT_EQ(*staleInfo.getVersionWanted(), *extraInfo->getVersionWanted());
- ASSERT_EQ(staleInfo.getShardId(), extraInfo->getShardId());
-}
-
-TEST(BatchedCommandResponseTest, StaleErrorAsStaleConfigCompatibility) {
- OID epoch = OID::gen();
-
- StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ChunkVersion(1, 0, epoch, Timestamp(100, 0)),
- ChunkVersion(2, 0, epoch, Timestamp(100, 0)),
+ ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0}),
+ ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0}),
ShardId("TestShard"));
BSONObjBuilder builder(BSON("index" << 0 << "code" << ErrorCodes::StaleConfig << "errmsg"
<< "StaleConfig error"));
@@ -189,7 +156,7 @@ TEST(BatchedCommandResponseTest, TooManyBigErrors) {
}
TEST(BatchedCommandResponseTest, CompatibilityFromWriteErrorToBatchCommandResponse) {
- ChunkVersion versionReceived(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion versionReceived({OID::gen(), Timestamp(2, 0)}, {1, 0});
write_ops::UpdateCommandReply reply;
reply.getWriteCommandReplyBase().setN(1);
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 884ffc906c3..2d179b6593f 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -119,11 +119,11 @@ TEST_F(WriteOpTest, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST_F(WriteOpTest, TargetMultiOneShard) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -154,11 +154,11 @@ TEST_F(WriteOpTest, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST_F(WriteOpTest, TargetMultiAllShards) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -196,9 +196,9 @@ TEST_F(WriteOpTest, TargetMultiAllShards) {
TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -228,8 +228,8 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
write_ops::WriteError retryableError(
0,
{StaleConfigInfo(kNss,
- ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
- ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}),
ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
@@ -346,11 +346,11 @@ private:
TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -386,9 +386,9 @@ TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -422,8 +422,8 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
write_ops::WriteError retryableError(
0,
{StaleConfigInfo(kNss,
- ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
- ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}),
ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index dffc1163e47..849d74aca36 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -34,6 +34,7 @@
#include "mongo/bson/simple_bsonelement_comparator.h"
#include "mongo/client/client_api_version_parameters_gen.h"
+#include "mongo/client/client_deprecated.h"
#include "mongo/client/dbclient_base.h"
#include "mongo/client/dbclient_rs.h"
#include "mongo/client/global_conn_pool.h"
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index 0aeeb911885..ba83ab07471 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -571,27 +571,6 @@ std::unique_ptr<DBClientCursor> EncryptedDBClientBase::find(FindCommandRequest f
return _conn->find(std::move(findRequest), readPref, exhaustMode);
}
-std::unique_ptr<DBClientCursor> EncryptedDBClientBase::query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
- return _conn->query_DEPRECATED(nsOrUuid,
- filter,
- querySettings,
- limit,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize,
- readConcernObj);
-}
-
bool EncryptedDBClientBase::isFailed() const {
return _conn->isFailed();
}
diff --git a/src/mongo/shell/encrypted_dbclient_base.h b/src/mongo/shell/encrypted_dbclient_base.h
index 4af6eb03804..ddb0c18e235 100644
--- a/src/mongo/shell/encrypted_dbclient_base.h
+++ b/src/mongo/shell/encrypted_dbclient_base.h
@@ -87,7 +87,6 @@ class EncryptedDBClientBase : public DBClientBase,
public FLEKeyVault {
public:
using DBClientBase::find;
- using DBClientBase::query_DEPRECATED;
EncryptedDBClientBase(std::unique_ptr<DBClientBase> conn,
ClientSideFLEOptions encryptionOptions,
@@ -131,17 +130,6 @@ public:
const ReadPreferenceSetting& readPref,
ExhaustMode exhaustMode) final;
- std::unique_ptr<DBClientCursor> query_DEPRECATED(
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const client_deprecated::Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj = boost::none) final;
-
bool isFailed() const final;
bool isStillConnected() final;
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index dab1615f2f9..08ffea17f3c 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -725,8 +725,13 @@ var ShardingTest = function(params) {
var result;
for (var i = 0; i < 5; i++) {
var otherShard = this.getOther(this.getPrimaryShard(dbName)).name;
- result = this.s.adminCommand(
- {movechunk: c, find: move, to: otherShard, _waitForDelete: waitForDelete});
+ let cmd = {movechunk: c, find: move, to: otherShard};
+
+ if (waitForDelete != null) {
+ cmd._waitForDelete = waitForDelete;
+ }
+
+ result = this.s.adminCommand(cmd);
if (result.ok)
break;
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index e3ca9855b4c..71ed502aecc 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -481,12 +481,13 @@ inline void massertStatusOKWithLocation(const Status& status, const char* file,
}
}
-#define MONGO_BASE_ASSERT_VA_FAILED(fail_func, ...) \
- do { \
- [&]() MONGO_COMPILER_COLD_FUNCTION { \
- fail_func(::mongo::error_details::makeStatus(__VA_ARGS__), MONGO_SOURCE_LOCATION()); \
- }(); \
- MONGO_COMPILER_UNREACHABLE; \
+#define MONGO_BASE_ASSERT_VA_FAILED(fail_func, ...) \
+ do { \
+ auto mongoSourceLocation = MONGO_SOURCE_LOCATION(); \
+ [&]() MONGO_COMPILER_COLD_FUNCTION { \
+ fail_func(::mongo::error_details::makeStatus(__VA_ARGS__), mongoSourceLocation); \
+ }(); \
+ MONGO_COMPILER_UNREACHABLE; \
} while (false)
#define MONGO_BASE_ASSERT_VA_4(fail_func, code, msg, cond) \
@@ -709,3 +710,21 @@ Status exceptionToStatus() noexcept;
* Like `MONGO_UNREACHABLE`, but triggers a `tassert` instead of an `invariant`
*/
#define MONGO_UNREACHABLE_TASSERT(msgid) tasserted(msgid, "Hit a MONGO_UNREACHABLE_TASSERT!")
+
+/**
+ * Produces an invariant failure if executed. Subset of MONGO_UNREACHABLE, but specifically
+ * to indicate that the program has reached a function that is unimplemented and should be
+ * unreachable from production.
+ * Example:
+ *
+ * void myFuncToDo() {
+ * MONGO_UNIMPLEMENTED;
+ * }
+ */
+#define MONGO_UNIMPLEMENTED \
+ ::mongo::invariantFailed("Hit a MONGO_UNIMPLEMENTED!", __FILE__, __LINE__);
+
+/**
+ * Like `MONGO_UNIMPLEMENTED`, but triggers a `tassert` instead of an `invariant`
+ */
+#define MONGO_UNIMPLEMENTED_TASSERT(msgid) tasserted(msgid, "Hit a MONGO_UNIMPLEMENTED_TASSERT!")
diff --git a/src/mongo/util/assert_util_test.cpp b/src/mongo/util/assert_util_test.cpp
index 8d2688eaacc..ff2f0243aef 100644
--- a/src/mongo/util/assert_util_test.cpp
+++ b/src/mongo/util/assert_util_test.cpp
@@ -336,6 +336,16 @@ DEATH_TEST(TassertTerminationTest, mongoUnreachableNonFatal, "Hit a MONGO_UNREAC
}
}
+DEATH_TEST_REGEX(TassertTerminationTest,
+ mongoUnimplementedNonFatal,
+ "6634500.*Hit a MONGO_UNIMPLEMENTED_TASSERT!") {
+ try {
+ MONGO_UNIMPLEMENTED_TASSERT(6634500);
+ } catch (const DBException&) {
+ // Catch the DBException, to ensure that we eventually abort during clean exit.
+ }
+}
+
// fassert and its friends
DEATH_TEST(FassertionTerminationTest, fassert, "40206") {
fassert(40206, false);
@@ -392,6 +402,10 @@ DEATH_TEST(InvariantTerminationTest, invariantOverload, "Terminating with invari
invariant(Status(ErrorCodes::InternalError, "Terminating with invariant"));
}
+DEATH_TEST(InvariantTerminationTest, mongoUnimplementedFatal, "Hit a MONGO_UNIMPLEMENTED!") {
+ MONGO_UNIMPLEMENTED;
+}
+
DEATH_TEST(InvariantTerminationTest, invariantStatusWithOverload, "Terminating with invariant") {
invariant(StatusWith<std::string>(ErrorCodes::InternalError,
"Terminating with invariantStatusWithOverload"));
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index d9d35bf20c1..e985432246c 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "2666d037c9ebfc84db2c739dfc10dab0e28b3e02"
+ "commit": "f8635d2114e04b0eb12b1884c1f6d76013a3e3fc"
}
diff --git a/src/third_party/wiredtiger/src/btree/bt_split.c b/src/third_party/wiredtiger/src/btree/bt_split.c
index 2c6f8449e3c..1fd13c47fe0 100644
--- a/src/third_party/wiredtiger/src/btree/bt_split.c
+++ b/src/third_party/wiredtiger/src/btree/bt_split.c
@@ -1271,8 +1271,15 @@ __split_parent_climb(WT_SESSION_IMPL *session, WT_PAGE *page)
* to a different part of the tree where it will be written; in other words, in one part of the
* tree we'll skip the newly created insert split chunk, but we'll write it upon finding it in a
* different part of the tree.
- */
- if (__wt_btree_syncing_by_other_session(session)) {
+ *
+ * Historically we allowed checkpoint itself to trigger an internal split here. That wasn't
+ * correct, since if that split climbs the tree above the immediate parent the checkpoint walk
+ * will potentially miss some internal pages. This is wrong as checkpoint needs to reconcile the
+ * entire internal tree structure. Non checkpoint cursor traversal doesn't care the internal
+ * tree structure as they just want to get the next leaf page correctly. Therefore, it is OK to
+ * split concurrently to cursor operations.
+ */
+ if (WT_BTREE_SYNCING(S2BT(session))) {
__split_internal_unlock(session, page);
return (0);
}
diff --git a/src/third_party/wiredtiger/src/docs/explain-isolation.dox b/src/third_party/wiredtiger/src/docs/explain-isolation.dox
index ecb8175e51c..d07074f8e31 100644
--- a/src/third_party/wiredtiger/src/docs/explain-isolation.dox
+++ b/src/third_party/wiredtiger/src/docs/explain-isolation.dox
@@ -10,8 +10,8 @@ phantoms are possible.
Transactions cannot see changes made by other transactions before those
transactions are committed. Dirty reads are not possible;
non-repeatable reads and phantoms are possible. Committed changes from
-concurrent transactions become visible when no cursor is positioned in
-the read-committed transaction.
+concurrent transactions become visible periodically during the lifecycle
+of the transaction.
- <code>snapshot</code>:
Transactions read the versions of records committed before the transaction
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-txn.dox b/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
index 9bafa55f2f8..a900ed763fd 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
@@ -65,6 +65,66 @@ the library will log an error message and drop core at the failing check.
These are best-effort checks by WiredTiger, and there are cases where
application misbehavior will not be detected.
+@section timestamp_txn_api_commit_timestamp Setting the transaction's commit timestamp
+
+The \c commit time is the time at which other transactions with appropriately set
+read timestamps will see the transaction's updates.
+
+The commit timestamp can be set at any point in the transaction's lifecycle.
+For prepared transactions, however, it can only be set after the transaction
+has been successfully prepared.
+
+\warning Commit (and prepare) timestamps must not be set in the past
+of any read timestamp
+that has ever been used. This rule is enforced by assertions in diagnostic
+builds, but if applications violate this rule in non-diagnostic builds, data
+consistency can be violated.
+Similarly, because reading without a read timestamp reads the latest
+values for all keys, one must not commit into the past of such a
+transaction.
+
+Applications using timestamps usually specify a timestamp to the
+WT_SESSION::commit_transaction method to set the commit time for all updates in
+the transaction.
+
+For prepared transactions, the commit timestamp must not be before the prepare
+timestamp. Otherwise, the commit timestamp must be after the stable timestamp.
+
+@section timestamp_txn_api_commit_multi_timestamp Setting multiple commit timestamps
+
+Applications may set different commit timestamps for different updates in a
+single transaction by calling WT_SESSION::timestamp_transaction repeatedly to
+set a new commit timestamp between updates in the transaction. Each new commit
+timestamp is applied to any subsequent updates. This gives applications the
+ability to commit updates that take effect at different times;
+that is, it is possible to create chains of updates where each
+update appears at a different time to readers. For transactions that set
+multiple commit timestamps, the first commit timestamp set is also required to
+be the earliest: the second and subsequent commit timestamps may not be
+earlier than the first commit timestamp. This feature is not compatible with
+prepared transactions, which must use only a single commit timestamp.
+
+This functionality is generally available as a mechanism to allow an optimized
+implementation of re-creating a timestamp view of a data set. For example, in
+a MongoDB replica set, content is generated on one node where transactions are
+assigned a single commit timestamp, that content is re-created on each other
+member in a replica set. When re-creating the content multiple changes from
+the original node are batched together into a single WiredTiger transaction.
+
+@section timestamp_txn_api_read_timestamp Setting the transaction's read timestamp
+
+Setting the transaction's read timestamp causes a transaction to not see any
+commits with a newer timestamp. (Updates may still conflict with commits having
+a newer timestamp, of course.),
+
+The read timestamp may be set to any time equal to or after the system's
+\c oldest timestamp.
+
+This restriction is enforced and applications can rely on an error return to
+detect attempts to set the read timestamp older than the \c oldest timestamp.
+
+The read timestamp may only be set once in the lifetime of a transaction.
+
@section timestamp_txn_api_query Querying transaction timestamp information
The following table lists the timestamps that can be queried using
@@ -119,55 +179,4 @@ points in the transaction's lifetime, using WT_SESSION::timestamp_transaction:
| prepare_timestamp | > stable and >= any system read timestamp | the transaction's prepare timestamp, see @ref timestamp_prepare for details |
| read_timestamp | >= oldest | the transaction's read timestamp, see @ref timestamp_txn_api_read_timestamp for details |
-@section timestamp_txn_api_commit_timestamp Setting the transaction's commit timestamp
-
-The \c commit time is the time at which other transactions with appropriately set
-read timestamps will see the transaction's updates.
-
-The commit timestamp can be set at any point in the transaction's lifecycle.
-For prepared transactions, however, it can only be set after the transaction
-has been successfully prepared.
-
-\warning Commit (and prepare) timestamps must not be set in the past
-of any read timestamp
-that has ever been used. This rule is enforced by assertions in diagnostic
-builds, but if applications violate this rule in non-diagnostic builds, data
-consistency can be violated.
-Similarly, because reading without a read timestamp reads the latest
-values for all keys, one must not commit into the past of such a
-transaction.
-
-Applications using timestamps usually specify a timestamp to the
-WT_SESSION::commit_transaction method to set the commit time for all updates in
-the transaction.
-
-Applications may set different commit timestamps for different updates in a
-single transaction by calling WT_SESSION::timestamp_transaction repeatedly to
-set a new commit timestamp between updates in the transaction. Each new commit
-timestamp is applied to any subsequent updates. This gives applications the
-ability to commit updates that take effect at different times;
-that is, it is possible to create chains of updates where each
-update appears at a different time to readers. For transactions that set
-multiple commit timestamps, the first commit timestamp set is also required to
-be the earliest: the second and subsequent commit timestamps may not be
-earlier than the first commit timestamp. This feature is not compatible with
-prepared transactions, which must use only a single commit timestamp.
-
-For prepared transactions, the commit timestamp must not be before the prepare
-timestamp. Otherwise, the commit timestamp must be after the stable timestamp.
-
-@section timestamp_txn_api_read_timestamp Setting the transaction's read timestamp
-
-Setting the transaction's read timestamp causes a transaction to not see any
-commits with a newer timestamp. (Updates may still conflict with commits having
-a newer timestamp, of course.),
-
-The read timestamp may be set to any time equal to or after the system's
-\c oldest timestamp.
-
-This restriction is enforced and applications can rely on an error return to
-detect attempts to set the read timestamp older than the \c oldest timestamp.
-
-The read timestamp may only be set once in the lifetime of a transaction.
-
*/
diff --git a/src/third_party/wiredtiger/src/docs/transactions_api.dox b/src/third_party/wiredtiger/src/docs/transactions_api.dox
index 3ccf2776fea..641ba2e8ed2 100644
--- a/src/third_party/wiredtiger/src/docs/transactions_api.dox
+++ b/src/third_party/wiredtiger/src/docs/transactions_api.dox
@@ -25,6 +25,12 @@ effects may be discarded by calling WT_SESSION::rollback_transaction. If
WT_SESSION::commit_transaction returns any error, the transaction was rolled
back, not committed.
+Schema changing operations are not generally transactional in WiredTiger, they
+can't be grouped together within the scope of a transaction and atomically
+committed or aborted. Think of them as one-shot transactions where the operation
+will either succeed or fail. Examples of schema changing operations are table
+create, drop and rename.
+
A data operation executed within a transaction can fail if it conflicts with an
operation in another concurrently running transaction. (A conflict occurs
between two operations when letting both of them continue would lead to a
diff --git a/src/third_party/wiredtiger/test/cppsuite/configs/cache_resize_default.txt b/src/third_party/wiredtiger/test/cppsuite/configs/cache_resize_default.txt
index 9b4093238e5..ceadb982b2c 100644
--- a/src/third_party/wiredtiger/test/cppsuite/configs/cache_resize_default.txt
+++ b/src/third_party/wiredtiger/test/cppsuite/configs/cache_resize_default.txt
@@ -22,7 +22,7 @@ workload_manager=
insert_config=
(
key_size=1000000,
- op_rate=4s,
+ op_rate=3s,
ops_per_transaction=(min=2,max=2),
thread_count=5,
),
@@ -37,5 +37,5 @@ operation_tracker=
# Timestamp, transaction id,
tracking_key_format=QQ,
# Operation type, cache size
- tracking_value_format=iS
+ tracking_value_format=iQ
) \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.cpp b/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.cpp
index aa997875dab..ade74bf7b1c 100644
--- a/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.cpp
+++ b/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.cpp
@@ -193,7 +193,7 @@ operation_tracker::save_schema_operation(
}
int
-operation_tracker::save_operation(const uint64_t txn_id, const tracking_operation &operation,
+operation_tracker::save_operation(WT_SESSION *session, const tracking_operation &operation,
const uint64_t &collection_id, const std::string &key, const std::string &value,
wt_timestamp_t ts, scoped_cursor &op_track_cursor)
{
@@ -210,15 +210,15 @@ operation_tracker::save_operation(const uint64_t txn_id, const tracking_operatio
"save_operation: invalid operation " + std::to_string(static_cast<int>(operation));
testutil_die(EINVAL, error_message.c_str());
} else {
- set_tracking_cursor(txn_id, operation, collection_id, key, value, ts, op_track_cursor);
+ set_tracking_cursor(session, operation, collection_id, key, value, ts, op_track_cursor);
ret = op_track_cursor->insert(op_track_cursor.get());
}
return (ret);
}
-/* Note that the transaction id is not used in the default implementation of the tracking table. */
+/* Note that session is not used in the default implementation of the tracking table. */
void
-operation_tracker::set_tracking_cursor(const uint64_t txn_id, const tracking_operation &operation,
+operation_tracker::set_tracking_cursor(WT_SESSION *session, const tracking_operation &operation,
const uint64_t &collection_id, const std::string &key, const std::string &value,
wt_timestamp_t ts, scoped_cursor &op_track_cursor)
{
diff --git a/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.h b/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.h
index 07e1a711a03..c4331501fe1 100644
--- a/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.h
+++ b/src/third_party/wiredtiger/test/cppsuite/src/component/operation_tracker.h
@@ -75,11 +75,11 @@ class operation_tracker : public component {
void save_schema_operation(
const tracking_operation &operation, const uint64_t &collection_id, wt_timestamp_t ts);
- virtual void set_tracking_cursor(const uint64_t txn_id, const tracking_operation &operation,
+ virtual void set_tracking_cursor(WT_SESSION *session, const tracking_operation &operation,
const uint64_t &collection_id, const std::string &key, const std::string &value,
wt_timestamp_t ts, scoped_cursor &op_track_cursor);
- int save_operation(const uint64_t txn_id, const tracking_operation &operation,
+ int save_operation(WT_SESSION *session, const tracking_operation &operation,
const uint64_t &collection_id, const std::string &key, const std::string &value,
wt_timestamp_t ts, scoped_cursor &op_track_cursor);
diff --git a/src/third_party/wiredtiger/test/cppsuite/src/main/thread_worker.cpp b/src/third_party/wiredtiger/test/cppsuite/src/main/thread_worker.cpp
index cacaa0f509a..5cf57d73944 100644
--- a/src/third_party/wiredtiger/test/cppsuite/src/main/thread_worker.cpp
+++ b/src/third_party/wiredtiger/test/cppsuite/src/main/thread_worker.cpp
@@ -120,9 +120,8 @@ thread_worker::update(
testutil_die(ret, "unhandled error while trying to update a key");
}
- uint64_t txn_id = ((WT_SESSION_IMPL *)session.get())->txn->id;
ret = op_tracker->save_operation(
- txn_id, tracking_operation::INSERT, collection_id, key, value, ts, op_track_cursor);
+ session.get(), tracking_operation::INSERT, collection_id, key, value, ts, op_track_cursor);
if (ret == 0)
txn.add_op();
@@ -162,9 +161,8 @@ thread_worker::insert(
testutil_die(ret, "unhandled error while trying to insert a key");
}
- uint64_t txn_id = ((WT_SESSION_IMPL *)session.get())->txn->id;
ret = op_tracker->save_operation(
- txn_id, tracking_operation::INSERT, collection_id, key, value, ts, op_track_cursor);
+ session.get(), tracking_operation::INSERT, collection_id, key, value, ts, op_track_cursor);
if (ret == 0)
txn.add_op();
@@ -200,9 +198,8 @@ thread_worker::remove(scoped_cursor &cursor, uint64_t collection_id, const std::
testutil_die(ret, "unhandled error while trying to remove a key");
}
- uint64_t txn_id = ((WT_SESSION_IMPL *)session.get())->txn->id;
ret = op_tracker->save_operation(
- txn_id, tracking_operation::DELETE_KEY, collection_id, key, "", ts, op_track_cursor);
+ session.get(), tracking_operation::DELETE_KEY, collection_id, key, "", ts, op_track_cursor);
if (ret == 0)
txn.add_op();
diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/cache_resize.cpp b/src/third_party/wiredtiger/test/cppsuite/tests/cache_resize.cpp
index f1254b1de24..68744f554f4 100644
--- a/src/third_party/wiredtiger/test/cppsuite/tests/cache_resize.cpp
+++ b/src/third_party/wiredtiger/test/cppsuite/tests/cache_resize.cpp
@@ -45,12 +45,21 @@ class operation_tracker_cache_resize : public operation_tracker {
}
void
- set_tracking_cursor(const uint64_t txn_id, const tracking_operation &operation,
- const uint64_t &, const std::string &, const std::string &value, wt_timestamp_t ts,
+ set_tracking_cursor(WT_SESSION *session, const tracking_operation &operation, const uint64_t &,
+ const std::string &, const std::string &value, wt_timestamp_t ts,
scoped_cursor &op_track_cursor) override final
{
+ uint64_t txn_id = ((WT_SESSION_IMPL *)session)->txn->id;
+ /*
+ * The cache_size may have been changed between the time we make an insert to the DB and
+ * when we write the details to the tracking table, as such we can't take cache_size from
+ * the connection. Instead, write the cache size as part of the atomic insert into the DB
+ * and when populating the tracking table take it from there.
+ */
+ uint64_t cache_size = std::stoull(value);
+
op_track_cursor->set_key(op_track_cursor.get(), ts, txn_id);
- op_track_cursor->set_value(op_track_cursor.get(), operation, value.c_str());
+ op_track_cursor->set_value(op_track_cursor.get(), operation, cache_size);
}
};
@@ -103,12 +112,9 @@ class cache_resize : public test {
const std::string key;
const std::string value = std::to_string(new_cache_size);
- /* Retrieve the current transaction id. */
- uint64_t txn_id = ((WT_SESSION_IMPL *)tc->session.get())->txn->id;
-
/* Save the change of cache size in the tracking table. */
tc->txn.begin();
- int ret = tc->op_tracker->save_operation(txn_id, tracking_operation::CUSTOM,
+ int ret = tc->op_tracker->save_operation(tc->session.get(), tracking_operation::CUSTOM,
collection_id, key, value, tc->tsm->get_next_ts(), tc->op_track_cursor);
if (ret == 0)
@@ -188,7 +194,7 @@ class cache_resize : public test {
uint64_t tracked_ts, tracked_txn_id;
int tracked_op_type;
- const char *tracked_cache_size;
+ uint64_t tracked_cache_size;
testutil_check(cursor->get_key(cursor.get(), &tracked_ts, &tracked_txn_id));
testutil_check(cursor->get_value(cursor.get(), &tracked_op_type, &tracked_cache_size));
@@ -196,7 +202,7 @@ class cache_resize : public test {
logger::log_msg(LOG_TRACE,
"Timestamp: " + std::to_string(tracked_ts) +
", transaction id: " + std::to_string(tracked_txn_id) +
- ", cache size: " + std::to_string(std::stoull(tracked_cache_size)));
+ ", cache size: " + std::to_string(tracked_cache_size));
tracking_operation op_type = static_cast<tracking_operation>(tracked_op_type);
/* There are only two types of operation tracked. */
@@ -226,7 +232,7 @@ class cache_resize : public test {
/*
* FIXME-WT-9339 - Save the last cache size seen by the transaction.
*
- * cache_size = std::stoull(tracked_cache_size);
+ * cache_size = tracked_cache_size;
*/
++num_records;
}
diff --git a/src/third_party/wiredtiger/test/cppsuite/tests/test_template.cpp b/src/third_party/wiredtiger/test/cppsuite/tests/test_template.cpp
index 217e90c3aae..ee75735152d 100644
--- a/src/third_party/wiredtiger/test/cppsuite/tests/test_template.cpp
+++ b/src/third_party/wiredtiger/test/cppsuite/tests/test_template.cpp
@@ -42,13 +42,13 @@ class operation_tracker_template : public operation_tracker {
}
void
- set_tracking_cursor(const uint64_t txn_id, const tracking_operation &operation,
+ set_tracking_cursor(WT_SESSION *session, const tracking_operation &operation,
const uint64_t &collection_id, const std::string &key, const std::string &value,
wt_timestamp_t ts, scoped_cursor &op_track_cursor) override final
{
/* You can replace this call to define your own tracking table contents. */
operation_tracker::set_tracking_cursor(
- txn_id, operation, collection_id, key, value, ts, op_track_cursor);
+ session, operation, collection_id, key, value, ts, op_track_cursor);
}
};
diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml
index ed086b341e0..bc2011a7604 100755
--- a/src/third_party/wiredtiger/test/evergreen.yml
+++ b/src/third_party/wiredtiger/test/evergreen.yml
@@ -2998,6 +2998,7 @@ tasks:
working_dir: "wiredtiger"
shell: bash
script: |
+ t=__wt.$$
set -o verbose
# Install Metrix++, ensuring it is outside the 'src' directory
@@ -3013,8 +3014,14 @@ tasks:
python "../metrixplusplus/metrix++.py" limit --max-limit=std.code.complexity:cyclomatic:20
# Fail if there are functions with cyclomatic complexity larger than 91
- set -o errexit
- python "../metrixplusplus/metrix++.py" limit --max-limit=std.code.complexity:cyclomatic:91
+ python "../metrixplusplus/metrix++.py" limit --max-limit=std.code.complexity:cyclomatic:91 > $t
+ if grep -q 'exceeds' $t; then
+ echo "[ERROR]:complexity:cyclomatic: Complexity limit exceeded."
+ cat $t
+ echo "[ERROR]:complexity:cyclomatic: Finished " && rm $t && exit 1
+ else
+ cat $t && rm $t
+ fi
#############################
# Performance Tests for lsm #