summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/README.md12
-rw-r--r--src/mongo/db/s/SConscript12
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp74
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp27
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp5
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp3
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp3
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp17
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp8
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp1
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp12
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp79
-rw-r--r--src/mongo/db/s/collmod_coordinator.h35
-rw-r--r--src/mongo/db/s/collmod_coordinator_pre60_compatible.cpp264
-rw-r--r--src/mongo/db/s/collmod_coordinator_pre60_compatible.h101
-rw-r--r--src/mongo/db/s/commit_chunk_migration.idl85
-rw-r--r--src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp93
-rw-r--r--src/mongo/db/s/compact_structured_encryption_data_coordinator.h33
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp25
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h11
-rw-r--r--src/mongo/db/s/config/configsvr_collmod_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp109
-rw-r--r--src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_move_chunk_command.cpp12
-rw-r--r--src/mongo/db/s/config/configsvr_remove_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_remove_tags_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp12
-rw-r--r--src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp6
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp47
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp19
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp67
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp76
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp120
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp31
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp34
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp148
-rw-r--r--src/mongo/db/s/create_collection_coordinator.h83
-rw-r--r--src/mongo/db/s/create_collection_coordinator_test.cpp133
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp5
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp71
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.h38
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp70
-rw-r--r--src/mongo/db/s/drop_database_coordinator.h41
-rw-r--r--src/mongo/db/s/flush_resharding_state_change_command.cpp2
-rw-r--r--src/mongo/db/s/flush_routing_table_cache_updates_command.cpp3
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp4
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp59
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h18
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp7
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp10
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp17
-rw-r--r--src/mongo/db/s/migration_destination_manager_legacy_commands.cpp8
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp27
-rw-r--r--src/mongo/db/s/migration_util.cpp22
-rw-r--r--src/mongo/db/s/migration_util_test.cpp10
-rw-r--r--src/mongo/db/s/move_primary_coordinator.cpp28
-rw-r--r--src/mongo/db/s/move_primary_coordinator.h17
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp8
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp8
-rw-r--r--src/mongo/db/s/operation_sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp4
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.cpp48
-rw-r--r--src/mongo/db/s/refine_collection_shard_key_coordinator.h34
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.cpp136
-rw-r--r--src/mongo/db/s/rename_collection_coordinator.h38
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp9
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.cpp48
-rw-r--r--src/mongo/db/s/reshard_collection_coordinator.h34
-rw-r--r--src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_agg_test.cpp34
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_observer.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp84
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp175
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.h14
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp17
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp35
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication.cpp20
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp11
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp10
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common.h4
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp112
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.cpp46
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service.h8
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp20
-rw-r--r--src/mongo/db/s/resharding/resharding_manual_cleanup.cpp5
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics.cpp (renamed from src/mongo/db/s/resharding/resharding_metrics_new.cpp)102
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics.h (renamed from src/mongo/db/s/resharding/resharding_metrics_new.h)46
-rw-r--r--src/mongo/db/s/resharding/resharding_metrics_test.cpp (renamed from src/mongo/db/s/resharding/resharding_metrics_new_test.cpp)38
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp9
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.h1
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.h2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp18
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp16
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp25
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp36
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp41
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp20
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher.h12
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp20
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp10
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.cpp78
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service.h10
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp8
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state.h8
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp10
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp31
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp1
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp20
-rw-r--r--src/mongo/db/s/resharding/resharding_util.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_util.h2
-rw-r--r--src/mongo/db/s/resharding/resharding_util_test.cpp4
-rw-r--r--src/mongo/db/s/resharding_test_commands.cpp6
-rw-r--r--src/mongo/db/s/sessions_collection_config_server.cpp6
-rw-r--r--src/mongo/db/s/set_allow_migrations_coordinator.cpp28
-rw-r--r--src/mongo/db/s/set_allow_migrations_coordinator.h16
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp340
-rw-r--r--src/mongo/db/s/shard_key_index_util.cpp73
-rw-r--r--src/mongo/db/s/shard_key_index_util.h9
-rw-r--r--src/mongo/db/s/shard_key_util.cpp49
-rw-r--r--src/mongo/db/s/shard_key_util.h12
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp17
-rw-r--r--src/mongo/db/s/shard_metadata_util.h47
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp12
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp64
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp12
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics.h4
-rw-r--r--src/mongo/db/s/sharding_data_transform_cumulative_metrics_test.cpp8
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics.cpp5
-rw-r--r--src/mongo/db/s/sharding_data_transform_instance_metrics.h4
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.h271
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator.idl4
-rw-r--r--src/mongo/db/s/sharding_ddl_coordinator_service.cpp9
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp21
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.cpp1
-rw-r--r--src/mongo/db/s/sharding_server_status.cpp10
-rw-r--r--src/mongo/db/s/sharding_util.cpp46
-rw-r--r--src/mongo/db/s/sharding_util.h9
-rw-r--r--src/mongo/db/s/sharding_write_router_bm.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_collmod_command.cpp96
-rw-r--r--src/mongo/db/s/shardsvr_collmod_participant_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_create_collection_command.cpp20
-rw-r--r--src/mongo/db/s/shardsvr_create_collection_participant_command.cpp35
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp36
-rw-r--r--src/mongo/db/s/shardsvr_merge_chunks_command.cpp3
-rw-r--r--src/mongo/db/s/shardsvr_participant_block_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp71
-rw-r--r--src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp7
-rw-r--r--src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp4
-rw-r--r--src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp4
-rw-r--r--src/mongo/db/s/split_chunk.cpp3
-rw-r--r--src/mongo/db/s/transaction_coordinator_service.cpp4
-rw-r--r--src/mongo/db/s/txn_two_phase_commit_cmds.cpp20
-rw-r--r--src/mongo/db/s/type_shard_collection.cpp20
-rw-r--r--src/mongo/db/s/type_shard_collection.h5
-rw-r--r--src/mongo/db/s/type_shard_collection.idl15
-rw-r--r--src/mongo/db/s/type_shard_collection_test.cpp21
184 files changed, 1991 insertions, 3456 deletions
diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md
index b7d8bdff562..f3e67bce8b8 100644
--- a/src/mongo/db/s/README.md
+++ b/src/mongo/db/s/README.md
@@ -752,10 +752,14 @@ operations. The metadata is reaped if the cluster does not receive a new operati
session for a reasonably long time (the default is 30 minutes).
A logical session is identified by its "logical session id," or `lsid`. An `lsid` is a combination
-of two pieces of information:
+of up to four pieces of information:
1. `id` - A globally unique id (UUID) generated by the mongo shell, driver, or the `startSession` server command
1. `uid` (user id) - The identification information for the logged-in user (if authentication is enabled)
+1. `txnNumber` - An optional parameter set only for internal transactions spawned from retryable writes. Strictly-increasing counter set by the transaction API to match the txnNumber of the corresponding retryable write.
+1. `txnUUID` - An optional parameter set only for internal transactions spawned inside client sessions. The txnUUID is a globally unique id generated by the transaction API.
+
+A logical session with a `txnNumber` and `txnUUID` is considered a child of the session with matching `id` and `uid` values. There may be multiple child sessions per parent session, and checking out a child/parents session checks out the other and updates the `lastUsedTime` of both. Killing a parent session also kills all of its child sessions.
The order of operations in the logical session that need to durably store metadata is defined by an
integer counter, called the `txnNumber`. When the cluster receives a retryable write or transaction
@@ -848,8 +852,12 @@ and to check the session back in upon completion. When a session is checked out,
until it is checked back in, forcing other operations to wait for the ongoing operation to complete
or yield the session.
+Checking out an internal/child session additionally checks out its parent session (the session with the same `id` and `uid` value in the lsid, but without a `txnNumber` or `txnUUID` value), and vice versa.
+
The runtime state for a session consists of the last checkout time and operation, the number of operations
-waiting to check out the session, and the number of kills requested. The last checkout time is used by
+waiting to check out the session, and the number of kills requested. Retryable internal sessions are reaped from the logical session catalog [eagerly](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/session_catalog.cpp#L342), meaning that if a transaction session with a higher transaction number has successfully started, sessions with lower txnNumbers are removed from the session catalog and inserted into an in-memory buffer by the [InternalTransactionsReapService](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/internal_transactions_reap_service.h#L42) until a configurable threshold is met (1000 by default), after which they are deleted from the transactions table (`config.transactions`) and `config.image_collection` all at once. Eager reaping is best-effort, in that the in-memory buffer is cleared on stepdown or restart. Any missed sessions will be reaped once the session expires or their `config.transactions` entries have not been written to for `TransactionRecordMinimumLifetimeMinutes` minutes.
+
+The last checkout time is used by
the [periodic job inside the logical session cache](#periodic-cleanup-of-the-session-catalog-and-transactions-table)
to determine when a session should be reaped from the session catalog, whereas the number of
operations waiting to check out a session is used to block reaping of sessions that are still in
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 415bd49e852..96f4e84813a 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -55,6 +55,7 @@ env.Library(
'collection_critical_section_document.idl',
'collection_sharding_runtime.cpp',
'collection_sharding_state_factory_shard.cpp',
+ 'commit_chunk_migration.idl',
'config_server_op_observer.cpp',
'global_index_metrics.cpp',
'metadata_manager.cpp',
@@ -96,7 +97,7 @@ env.Library(
'resharding/resharding_future_util.cpp',
'resharding/resharding_manual_cleanup.cpp',
'resharding/resharding_metrics_helpers.cpp',
- 'resharding/resharding_metrics_new.cpp',
+ 'resharding/resharding_metrics.cpp',
'resharding/resharding_op_observer.cpp',
'resharding/resharding_oplog_applier.cpp',
'resharding/resharding_oplog_applier_metrics.cpp',
@@ -137,6 +138,7 @@ env.Library(
'type_shard_collection.idl',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/client/remote_command_targeter',
'$BUILD_DIR/mongo/db/catalog/multi_index_block',
'$BUILD_DIR/mongo/db/client_metadata_propagation_egress_hook',
'$BUILD_DIR/mongo/db/commands/mongod_fcv',
@@ -173,6 +175,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/image_collection_entry',
'$BUILD_DIR/mongo/db/rs_local_client',
'$BUILD_DIR/mongo/db/session_catalog',
+ '$BUILD_DIR/mongo/db/timeseries/bucket_catalog',
'$BUILD_DIR/mongo/idl/server_parameter',
'$BUILD_DIR/mongo/util/future_util',
],
@@ -334,7 +337,6 @@ env.Library(
'cluster_pipeline_cmd_d.cpp',
'cluster_write_cmd_d.cpp',
'collmod_coordinator_document.idl',
- 'collmod_coordinator_pre60_compatible.cpp',
'collmod_coordinator.cpp',
'compact_structured_encryption_data_coordinator.cpp',
'compact_structured_encryption_data_coordinator.idl',
@@ -402,7 +404,6 @@ env.Library(
'resharding_test_commands.idl',
'set_allow_migrations_coordinator_document.idl',
'set_allow_migrations_coordinator.cpp',
- 'set_shard_version_command.cpp',
'sharded_collmod.idl',
'sharded_index_consistency_server_status.cpp',
'sharded_rename_collection.idl',
@@ -550,7 +551,6 @@ env.CppUnitTest(
'collection_metadata_filtering_test.cpp',
'collection_metadata_test.cpp',
'collection_sharding_runtime_test.cpp',
- 'create_collection_coordinator_test.cpp',
'database_sharding_state_test.cpp',
'dist_lock_catalog_mock.cpp',
'dist_lock_catalog_replset_test.cpp',
@@ -574,7 +574,7 @@ env.CppUnitTest(
'resharding/resharding_donor_oplog_iterator_test.cpp',
'resharding/resharding_donor_recipient_common_test.cpp',
'resharding/resharding_donor_service_test.cpp',
- 'resharding/resharding_metrics_new_test.cpp',
+ 'resharding/resharding_metrics_test.cpp',
'resharding/resharding_oplog_applier_test.cpp',
'resharding/resharding_oplog_applier_metrics_test.cpp',
'resharding/resharding_oplog_batch_applier_test.cpp',
@@ -618,6 +618,8 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/exec/document_value/document_value_test_util',
'$BUILD_DIR/mongo/db/keys_collection_client_direct',
'$BUILD_DIR/mongo/db/logical_session_cache_impl',
+ '$BUILD_DIR/mongo/db/op_observer',
+ '$BUILD_DIR/mongo/db/op_observer_util',
'$BUILD_DIR/mongo/db/ops/write_ops_exec',
'$BUILD_DIR/mongo/db/pipeline/document_source_mock',
'$BUILD_DIR/mongo/db/pipeline/expression_context',
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index 9381e0a2da6..aa7b056ae34 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/oid.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balance_stats.h"
@@ -79,7 +77,7 @@ private:
const Timestamp _timestamp{Timestamp(1, 1)};
const ShardId _shardPrimary{"dummyShardPrimary"};
const DatabaseVersion _dbVersion{UUID::gen(), _timestamp};
- ChunkVersion _nextVersion{1, 0, _epoch, _timestamp};
+ ChunkVersion _nextVersion{{_epoch, _timestamp}, {1, 0}};
};
TEST_F(BalanceStatsTest, SingleChunkNoZones) {
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 2ec66bc8ffd..fc2c42a59c1 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -80,13 +80,13 @@ namespace {
MONGO_FAIL_POINT_DEFINE(overrideBalanceRoundInterval);
-const Seconds kBalanceRoundDefaultInterval(10);
+const Milliseconds kBalanceRoundDefaultInterval(10 * 1000);
// Sleep between balancer rounds in the case where the last round found some chunks which needed to
// be balanced. This value should be set sufficiently low so that imbalanced clusters will quickly
// reach balanced state, but setting it too low may cause CRUD operations to start failing due to
// not being able to establish a stable shard version.
-const Seconds kShortBalanceRoundInterval(1);
+const Milliseconds kBalancerMigrationsThrottling(1 * 1000);
/**
* Balancer status response
@@ -293,11 +293,11 @@ void Balancer::initiateBalancer(OperationContext* opCtx) {
void Balancer::interruptBalancer() {
stdx::lock_guard<Latch> scopedLock(_mutex);
- if (_state != kRunning)
+ if (_state != kRunning) {
return;
+ }
_state = kStopping;
- _thread.detach();
// Interrupt the balancer thread if it has been started. We are guaranteed that the operation
// context of that thread is still alive, because we hold the balancer mutex.
@@ -312,8 +312,10 @@ void Balancer::interruptBalancer() {
void Balancer::waitForBalancerToStop() {
stdx::unique_lock<Latch> scopedLock(_mutex);
-
_joinCond.wait(scopedLock, [this] { return _state == kStopped; });
+ if (_thread.joinable()) {
+ _thread.join();
+ }
}
void Balancer::joinCurrentRound(OperationContext* opCtx) {
@@ -612,12 +614,12 @@ void Balancer::_consumeActionStreamLoop() {
void Balancer::_mainThread() {
ON_BLOCK_EXIT([this] {
- stdx::lock_guard<Latch> scopedLock(_mutex);
-
- _state = kStopped;
+ {
+ stdx::lock_guard<Latch> scopedLock(_mutex);
+ _state = kStopped;
+ LOGV2_DEBUG(21855, 1, "Balancer thread terminated");
+ }
_joinCond.notify_all();
-
- LOGV2_DEBUG(21855, 1, "Balancer thread terminated");
});
Client::initThread("Balancer");
@@ -664,6 +666,7 @@ void Balancer::_mainThread() {
LOGV2(6036606, "Balancer worker thread initialised. Entering main loop.");
// Main balancer loop
+ auto lastMigrationTime = Date_t::fromMillisSinceEpoch(0);
while (!_stopRequested()) {
BalanceRoundDetails roundDetails;
@@ -691,6 +694,14 @@ void Balancer::_mainThread() {
continue;
}
+ boost::optional<Milliseconds> forcedBalancerRoundInterval(boost::none);
+ overrideBalanceRoundInterval.execute([&](const BSONObj& data) {
+ forcedBalancerRoundInterval = Milliseconds(data["intervalMs"].numberInt());
+ LOGV2(21864,
+ "overrideBalanceRoundInterval: using customized balancing interval",
+ "balancerInterval"_attr = *forcedBalancerRoundInterval);
+ });
+
// The current configuration is allowing the balancer to perform operations.
// Unblock the secondary thread if needed.
_defragmentationCondVar.notify_all();
@@ -739,9 +750,20 @@ void Balancer::_mainThread() {
if (chunksToRebalance.empty() && chunksToDefragment.empty()) {
LOGV2_DEBUG(21862, 1, "No need to move any chunk");
_balancedLastTime = 0;
+ LOGV2_DEBUG(21863, 1, "End balancing round");
+ _endRound(opCtx.get(),
+ forcedBalancerRoundInterval ? *forcedBalancerRoundInterval
+ : kBalanceRoundDefaultInterval);
} else {
+ auto timeSinceLastMigration = Date_t::now() - lastMigrationTime;
+ _sleepFor(opCtx.get(),
+ forcedBalancerRoundInterval
+ ? *forcedBalancerRoundInterval - timeSinceLastMigration
+ : kBalancerMigrationsThrottling - timeSinceLastMigration);
+
_balancedLastTime =
_moveChunks(opCtx.get(), chunksToRebalance, chunksToDefragment);
+ lastMigrationTime = Date_t::now();
roundDetails.setSucceeded(
static_cast<int>(chunksToRebalance.size() + chunksToDefragment.size()),
@@ -750,24 +772,13 @@ void Balancer::_mainThread() {
ShardingLogging::get(opCtx.get())
->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
.ignore();
- }
- LOGV2_DEBUG(21863, 1, "End balancing round");
+ LOGV2_DEBUG(6679500, 1, "End balancing round");
+ // Migration throttling of kBalancerMigrationsThrottling will be applied before
+ // the next call to _moveChunks, so don't sleep here.
+ _endRound(opCtx.get(), Milliseconds(0));
+ }
}
-
- Milliseconds balancerInterval =
- _balancedLastTime ? kShortBalanceRoundInterval : kBalanceRoundDefaultInterval;
-
- overrideBalanceRoundInterval.execute([&](const BSONObj& data) {
- balancerInterval = Milliseconds(data["intervalMs"].numberInt());
- LOGV2(21864,
- "overrideBalanceRoundInterval: using shorter balancing interval: "
- "{balancerInterval}",
- "overrideBalanceRoundInterval: using shorter balancing interval",
- "balancerInterval"_attr = balancerInterval);
- });
-
- _endRound(opCtx.get(), balancerInterval);
} catch (const DBException& e) {
LOGV2(21865,
"caught exception while doing balance: {error}",
@@ -976,15 +987,6 @@ int Balancer::_moveChunks(OperationContext* opCtx,
return coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes());
}();
- if (serverGlobalParams.featureCompatibility.isLessThan(
- multiversion::FeatureCompatibilityVersion::kVersion_6_0)) {
- // TODO SERVER-65322 only use `moveRange` once v6.0 branches out
- MoveChunkSettings settings(maxChunkSizeBytes,
- balancerConfig->getSecondaryThrottle(),
- balancerConfig->waitForDelete());
- return _commandScheduler->requestMoveChunk(opCtx, migrateInfo, settings);
- }
-
MoveRangeRequestBase requestBase(migrateInfo.to);
requestBase.setWaitForDelete(balancerConfig->waitForDelete());
requestBase.setMin(migrateInfo.minKey);
@@ -1086,7 +1088,7 @@ SharedSemiFuture<void> Balancer::applyLegacyChunkSizeConstraintsOnClusterData(
NamespaceString::kLogicalSessionsNamespace,
0,
boost::none /*defragmentCollection*/,
- boost::none /*enableAutoSplitter*/);
+ false /*enableAutoSplitter*/);
} catch (const ExceptionFor<ErrorCodes::NamespaceNotSharded>&) {
// config.system.collections does not appear in config.collections; continue.
}
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index bf22d67619e..8b50d3d002f 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/commands.h"
#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/db/s/balancer/cluster_statistics_impl.h"
@@ -133,7 +131,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -192,7 +190,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -251,7 +249,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
@@ -302,7 +300,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
index e78ae862393..7ebe9dac42c 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp
@@ -155,7 +155,7 @@ std::vector<RequestData> rebuildRequestsFromRecoveryInfo(
DBDirectClient dbClient(opCtx);
try {
FindCommandRequest findRequest{MigrationType::ConfigNS};
- dbClient.find(std::move(findRequest), ReadPreferenceSetting{}, documentProcessor);
+ dbClient.find(std::move(findRequest), documentProcessor);
} catch (const DBException& e) {
LOGV2_ERROR(5847215, "Failed to fetch requests to recover", "error"_attr = redact(e));
}
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index 678e5f63f9f..72e86413aa9 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h"
@@ -65,7 +63,7 @@ public:
chunk.setMax(BSON("x" << min + 10));
chunk.setJumbo(false);
chunk.setShard(shardId);
- chunk.setVersion(ChunkVersion(1, 1, OID::gen(), Timestamp(10)));
+ chunk.setVersion(ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}));
return chunk;
}
@@ -76,7 +74,7 @@ public:
kUuid,
BSON("x" << min),
BSON("x" << min + 10),
- ChunkVersion(1, 1, OID::gen(), Timestamp(10)),
+ ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}),
MoveChunkRequest::ForceJumbo::kDoNotForce);
}
@@ -234,7 +232,7 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulMergeChunkCommand) {
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse =
_scheduler.requestMergeChunks(operationContext(), kNss, kShardId0, range, version);
ASSERT_OK(futureResponse.getNoThrow());
@@ -246,7 +244,7 @@ TEST_F(BalancerCommandsSchedulerTest, MergeChunkNonexistentShard) {
auto remoteResponsesFuture = setRemoteResponses();
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse = _scheduler.requestMergeChunks(
operationContext(), kNss, ShardId("nonexistent"), range, version);
auto shardNotFoundError = Status{ErrorCodes::ShardNotFound, "Shard nonexistent not found"};
diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
index c42f7e86cd7..d1f431b4082 100644
--- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp
@@ -47,7 +47,7 @@ protected:
const ShardId kShardId1 = ShardId("shard1");
const ShardId kShardId2 = ShardId("shard2");
const ShardId kShardId3 = ShardId("shard3");
- const ChunkVersion kCollectionVersion = ChunkVersion(1, 1, OID::gen(), Timestamp(10));
+ const ChunkVersion kCollectionVersion = ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1});
const KeyPattern kShardKeyPattern = KeyPattern(BSON("x" << 1));
const BSONObj kKeyAtMin = BSONObjBuilder().appendMinKey("x").obj();
const BSONObj kKeyAtZero = BSON("x" << 0);
@@ -494,7 +494,8 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) {
ChunkType chunk(
kUuid,
ChunkRange(minKey, maxKey),
- ChunkVersion(1, i, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()},
+ {1, uint32_t(i)}),
kShardId0);
chunkList.push_back(chunk);
}
@@ -504,7 +505,8 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseOneAllConsecutive) {
ChunkType chunk(
kUuid,
ChunkRange(minKey, maxKey),
- ChunkVersion(1, i, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()},
+ {1, uint32_t(i)}),
kShardId1);
chunkList.push_back(chunk);
}
@@ -543,7 +545,8 @@ TEST_F(BalancerDefragmentationPolicyTest, PhaseOneNotConsecutive) {
ChunkType chunk(
kUuid,
ChunkRange(minKey, maxKey),
- ChunkVersion(1, i, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()},
+ {1, uint32_t(i)}),
chosenShard);
chunkList.push_back(chunk);
}
@@ -620,13 +623,13 @@ TEST_F(BalancerDefragmentationPolicyTest, TestPhaseTwoChunkCanBeMovedAndMergedWi
ChunkType biggestChunk(
kUuid,
ChunkRange(kKeyAtMin, kKeyAtZero),
- ChunkVersion(1, 0, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 0}),
kShardId0);
biggestChunk.setEstimatedSizeBytes(2048);
ChunkType smallestChunk(
kUuid,
ChunkRange(kKeyAtZero, kKeyAtMax),
- ChunkVersion(1, 1, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 1}),
kShardId1);
smallestChunk.setEstimatedSizeBytes(1024);
@@ -682,42 +685,42 @@ TEST_F(BalancerDefragmentationPolicyTest,
ChunkType firstChunkOnShard0(
kUuid,
ChunkRange(kKeyAtMin, kKeyAtZero),
- ChunkVersion(1, 0, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 0}),
kShardId0);
firstChunkOnShard0.setEstimatedSizeBytes(1);
ChunkType firstChunkOnShard1(
kUuid,
ChunkRange(kKeyAtZero, kKeyAtTen),
- ChunkVersion(1, 1, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 1}),
kShardId1);
firstChunkOnShard1.setEstimatedSizeBytes(1);
ChunkType chunkOnShard2(
kUuid,
ChunkRange(kKeyAtTen, kKeyAtTwenty),
- ChunkVersion(1, 2, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 2}),
kShardId2);
chunkOnShard2.setEstimatedSizeBytes(1);
ChunkType chunkOnShard3(
kUuid,
ChunkRange(kKeyAtTwenty, kKeyAtThirty),
- ChunkVersion(1, 3, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 3}),
kShardId3);
chunkOnShard3.setEstimatedSizeBytes(1);
ChunkType secondChunkOnShard0(
kUuid,
ChunkRange(kKeyAtThirty, kKeyAtForty),
- ChunkVersion(1, 4, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 4}),
kShardId0);
secondChunkOnShard0.setEstimatedSizeBytes(1);
ChunkType secondChunkOnShard1(
kUuid,
ChunkRange(kKeyAtForty, kKeyAtMax),
- ChunkVersion(1, 5, kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()),
+ ChunkVersion({kCollectionVersion.epoch(), kCollectionVersion.getTimestamp()}, {1, 5}),
kShardId1);
secondChunkOnShard1.setEstimatedSizeBytes(1);
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index fb98d610b00..be3532fee56 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/keypattern.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/platform/random.h"
@@ -79,7 +76,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
int64_t currentChunk = 0;
- ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const UUID uuid = UUID::gen();
const KeyPattern shardKeyPattern(BSON("x" << 1));
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
index 607e57dab44..94b6e874cbf 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/s/balancer/cluster_chunks_resize_policy_impl.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
+
namespace mongo {
namespace {
@@ -37,7 +38,7 @@ class ClusterChunksResizePolicyTest : public ConfigServerTestFixture {
protected:
const NamespaceString kNss{"testDb.testColl"};
const UUID kUuid = UUID::gen();
- const ChunkVersion kCollectionVersion = ChunkVersion(1, 1, OID::gen(), Timestamp(10));
+ const ChunkVersion kCollectionVersion = ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1});
const ShardId kShardId0 = ShardId("shard0");
const ShardId kShardId1 = ShardId("shard1");
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index 1aac063b940..a47fdff6197 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -113,8 +113,7 @@ StatusWith<MigrationType> MigrationType::fromBSON(const BSONObj& source) {
}
try {
- auto chunkVersionStatus =
- ChunkVersion::fromBSONPositionalOrNewerFormat(source[chunkVersion.name()]);
+ auto chunkVersionStatus = ChunkVersion::parse(source[chunkVersion.name()]);
migrationType._chunkVersion = chunkVersionStatus;
} catch (const DBException& ex) {
return ex.toStatus();
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index f605983fe2c..610e150c963 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -27,12 +27,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/jsobj.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/s/catalog/type_chunk.h"
-
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -48,7 +45,7 @@ const ShardId kToShard("shard0001");
const bool kWaitForDelete{true};
TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -68,7 +65,7 @@ TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
}
TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
const auto secondaryThrottle =
MigrationSecondaryThrottleOptions::createWithWriteConcern(WriteConcernOptions(
"majority", WriteConcernOptions::SyncMode::JOURNAL, Milliseconds(60000)));
@@ -94,7 +91,7 @@ TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::min(), kMin);
@@ -111,7 +108,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
}
TEST(MigrationTypeTest, MissingRequiredMinField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -128,7 +125,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
}
TEST(MigrationTypeTest, MissingRequiredMaxField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -145,7 +142,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
}
TEST(MigrationTypeTest, MissingRequiredFromShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -162,7 +159,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
}
TEST(MigrationTypeTest, MissingRequiredToShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index 1422dc7c4c8..004c23b2d31 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-
#include "mongo/platform/basic.h"
#include "mongo/db/auth/action_type.h"
@@ -40,7 +39,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -96,13 +94,15 @@ public:
return false;
}
+ std::string tmpErrMsg = "couldn't find valid index for shard key";
auto shardKeyIdx = findShardKeyPrefixedIndex(opCtx,
*collection,
collection->getIndexCatalog(),
keyPattern,
- /*requireSingleKey=*/true);
+ /*requireSingleKey=*/true,
+ &tmpErrMsg);
if (!shardKeyIdx) {
- errmsg = "couldn't find valid index for shard key";
+ errmsg = tmpErrMsg;
return false;
}
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index e0fb5839a09..043b0139b20 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -31,7 +31,6 @@
#include "mongo/db/s/chunk_splitter.h"
#include "mongo/client/dbclient_cursor.h"
-#include "mongo/client/query.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index e2e3081b436..74dc6a9e655 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
@@ -79,7 +77,7 @@ protected:
boost::none,
true,
[&] {
- ChunkVersion version(1, 0, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunk1(uuid,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 8f789549796..4084fe8e9e2 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/base/status.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/s/collection_metadata.h"
@@ -62,7 +60,7 @@ CollectionMetadata makeCollectionMetadataImpl(
std::vector<ChunkType> allChunks;
auto nextMinKey = shardKeyPattern.globalMin();
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
for (const auto& myNextChunk : thisShardsChunks) {
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
@@ -125,7 +123,7 @@ protected:
reshardingFields.setRecipientFields(std::move(recipientFields));
} else if (state == CoordinatorStateEnum::kBlockingWrites) {
TypeCollectionDonorFields donorFields{
- constructTemporaryReshardingNss(kNss.db(), existingUuid),
+ resharding::constructTemporaryReshardingNss(kNss.db(), existingUuid),
KeyPattern{BSON("newKey" << 1)},
{kThisShard, kOtherShard}};
reshardingFields.setDonorFields(std::move(donorFields));
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index dcee5b73ac0..c6985aa5742 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "boost/optional/optional_io.hpp"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog_raii.h"
@@ -63,7 +61,7 @@ protected:
const Timestamp timestamp(1, 1);
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), ShardId("other"));
+ uuid, std::move(range), ChunkVersion({epoch, timestamp}, {1, 0}), ShardId("other"));
ChunkManager cm(ShardId("0"),
DatabaseVersion(UUID::gen(), timestamp),
makeStandaloneRoutingTableHistory(
@@ -218,8 +216,8 @@ TEST_F(CollectionShardingRuntimeTest, ReturnUnshardedMetadataInServerlessMode) {
ScopedSetShardRole scopedSetShardRole2{
opCtx,
NamespaceString::kLogicalSessionsNamespace,
- ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1)), /* shardVersion */
- boost::none /* databaseVersion */
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}), /* shardVersion */
+ boost::none /* databaseVersion */
};
CollectionShardingRuntime csrLogicalSession(
@@ -324,11 +322,11 @@ public:
const Timestamp& timestamp) {
auto range1 = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << 5));
ChunkType chunk1(
- uuid, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
+ uuid, range1, ChunkVersion({epoch, timestamp}, {1, 0}), kShardList[0].getName());
auto range2 = ChunkRange(BSON(kShardKey << 5), BSON(kShardKey << MAXKEY));
ChunkType chunk2(
- uuid, range2, ChunkVersion(1, 1, epoch, timestamp), kShardList[0].getName());
+ uuid, range2, ChunkVersion({epoch, timestamp}, {1, 1}), kShardList[0].getName());
return {chunk1, chunk2};
}
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
index ebf179c21c8..50e92b41571 100644
--- a/src/mongo/db/s/collmod_coordinator.cpp
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -77,10 +77,7 @@ bool hasTimeSeriesGranularityUpdate(const CollModRequest& request) {
CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _initialState{initialState.getOwned()},
- _doc{CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"),
- _initialState)},
+ : RecoverableShardingDDLCoordinator(service, "CollModCoordinator", initialState),
_request{_doc.getCollModRequest()} {}
void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
@@ -96,54 +93,9 @@ void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq));
}
-boost::optional<BSONObj> CollModCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- cmdBob.appendElements(_request.toBSON());
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "CollModCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
-void CollModCoordinator::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(6069401,
- 2,
- "CollMod coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = CollModCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = CollModCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
-}
+void CollModCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
+};
void CollModCoordinator::_performNoopRetryableWriteOnParticipants(
OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor) {
@@ -154,9 +106,9 @@ void CollModCoordinator::_performNoopRetryableWriteOnParticipants(
return participants;
}();
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
sharding_ddl_util::performNoopRetryableWriteOnShards(
- opCtx, shardsAndConfigsvr, getCurrentSession(_doc), executor);
+ opCtx, shardsAndConfigsvr, getCurrentSession(), executor);
}
void CollModCoordinator::_saveCollectionInfoOnCoordinatorIfNecessary(OperationContext* opCtx) {
@@ -229,14 +181,15 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
if (_collInfo->isSharded) {
- _doc.setCollUUID(
- sharding_ddl_util::getCollectionUUID(opCtx, nss(), true /* allowViews */));
- sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollUUID());
+ _doc.setCollUUID(sharding_ddl_util::getCollectionUUID(
+ opCtx, _collInfo->nsForTargeting, true /* allowViews */));
+ sharding_ddl_util::stopMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
}
_saveShardingInfoOnCoordinatorIfNecessary(opCtx);
@@ -258,7 +211,7 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
_saveShardingInfoOnCoordinatorIfNecessary(opCtx);
@@ -285,7 +238,7 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
_saveShardingInfoOnCoordinatorIfNecessary(opCtx);
@@ -335,7 +288,8 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
CommandHelpers::appendSimpleCommandStatus(builder, ok, errmsg);
}
_result = builder.obj();
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
+ sharding_ddl_util::resumeMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
} else {
CollMod cmd(nss());
cmd.setCollModRequest(_request);
@@ -370,7 +324,8 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
+ sharding_ddl_util::resumeMigrations(
+ opCtx, _collInfo->nsForTargeting, _doc.getCollUUID());
}
}
return status;
diff --git a/src/mongo/db/s/collmod_coordinator.h b/src/mongo/db/s/collmod_coordinator.h
index b85b6b16d5a..4b65502f78d 100644
--- a/src/mongo/db/s/collmod_coordinator.h
+++ b/src/mongo/db/s/collmod_coordinator.h
@@ -35,7 +35,9 @@
namespace mongo {
-class CollModCoordinator final : public ShardingDDLCoordinator {
+class CollModCoordinator final
+ : public RecoverableShardingDDLCoordinator<CollModCoordinatorDocument,
+ CollModCoordinatorPhaseEnum> {
public:
using StateDoc = CollModCoordinatorDocument;
using Phase = CollModCoordinatorPhaseEnum;
@@ -44,9 +46,7 @@ public:
void checkIfOptionsConflict(const BSONObj& doc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
@@ -74,32 +74,13 @@ private:
std::vector<ShardId> shardsOwningChunks;
};
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return CollModCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
void _performNoopRetryableWriteOnParticipants(
OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor);
@@ -107,10 +88,6 @@ private:
void _saveShardingInfoOnCoordinatorIfNecessary(OperationContext* opCtx);
- BSONObj _initialState;
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("CollModCoordinator::_docMutex");
- CollModCoordinatorDocument _doc;
-
const mongo::CollModRequest _request;
boost::optional<BSONObj> _result;
diff --git a/src/mongo/db/s/collmod_coordinator_pre60_compatible.cpp b/src/mongo/db/s/collmod_coordinator_pre60_compatible.cpp
deleted file mode 100644
index 37005996f3a..00000000000
--- a/src/mongo/db/s/collmod_coordinator_pre60_compatible.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-/**
- * Copyright (C) 2021-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-
-#include "mongo/db/s/collmod_coordinator_pre60_compatible.h"
-
-#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog/database_holder.h"
-#include "mongo/db/coll_mod_gen.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/ops/insert.h"
-#include "mongo/db/s/sharded_collmod_gen.h"
-#include "mongo/db/s/sharding_ddl_util.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/timeseries/catalog_helper.h"
-#include "mongo/db/timeseries/timeseries_collmod.h"
-#include "mongo/idl/idl_parser.h"
-#include "mongo/logv2/log.h"
-#include "mongo/s/async_requests_sender.h"
-#include "mongo/s/cluster_commands_helpers.h"
-#include "mongo/s/grid.h"
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-
-namespace mongo {
-
-namespace {
-
-bool isShardedColl(OperationContext* opCtx, const NamespaceString& nss) {
- try {
- auto coll = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss);
- return true;
- } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
- // The collection is not sharded or doesn't exist.
- return false;
- }
-}
-
-bool hasTimeSeriesGranularityUpdate(const CollModRequest& request) {
- return request.getTimeseries() && request.getTimeseries()->getGranularity();
-}
-
-} // namespace
-
-CollModCoordinatorPre60Compatible::CollModCoordinatorPre60Compatible(
- ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState) {
- _initialState = initialState.getOwned();
- _doc = CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"),
- _initialState);
-}
-
-void CollModCoordinatorPre60Compatible::checkIfOptionsConflict(const BSONObj& doc) const {
- const auto otherDoc =
- CollModCoordinatorDocument::parse(IDLParserErrorContext("CollModCoordinatorDocument"), doc);
-
- const auto& selfReq = _doc.getCollModRequest().toBSON();
- const auto& otherReq = otherDoc.getCollModRequest().toBSON();
-
- uassert(ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Another collMod for namespace " << nss()
- << " is being executed with different parameters: " << selfReq,
- SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq));
-}
-
-boost::optional<BSONObj> CollModCoordinatorPre60Compatible::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- cmdBob.appendElements(_doc.getCollModRequest().toBSON());
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "CollModCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
-void CollModCoordinatorPre60Compatible::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(6482601,
- 2,
- "CollMod coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = CollModCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = CollModCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
-}
-
-void CollModCoordinatorPre60Compatible::_performNoopRetryableWriteOnParticipants(
- OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor) {
- auto shardsAndConfigsvr = [&] {
- const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto participants = shardRegistry->getAllShardIds(opCtx);
- participants.emplace_back(shardRegistry->getConfigShard()->getId());
- return participants;
- }();
-
- _doc = _updateSession(opCtx, _doc);
- sharding_ddl_util::performNoopRetryableWriteOnShards(
- opCtx, shardsAndConfigsvr, getCurrentSession(_doc), executor);
-}
-
-ExecutorFuture<void> CollModCoordinatorPre60Compatible::_runImpl(
- std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancellationToken& token) noexcept {
- return ExecutorFuture<void>(**executor)
- .then(_executePhase(
- Phase::kUpdateShards,
- [this, executor = executor, anchor = shared_from_this()] {
- auto opCtxHolder = cc().makeOperationContext();
- auto* opCtx = opCtxHolder.get();
- getForwardableOpMetadata().setOn(opCtx);
-
- const auto isTimeSeries = timeseries::getTimeseriesOptions(
- opCtx, nss(), !nss().isTimeseriesBucketsCollection());
- const auto collNss = isTimeSeries && !nss().isTimeseriesBucketsCollection()
- ? nss().makeTimeseriesBucketsNamespace()
- : nss();
- const auto isSharded = isShardedColl(opCtx, collNss);
-
- if (isSharded) {
- // Updating granularity on sharded time-series collections is not allowed.
- if (isTimeSeries) {
- uassert(
- ErrorCodes::NotImplemented,
- str::stream()
- << "Cannot update granularity of a sharded time-series collection.",
- !hasTimeSeriesGranularityUpdate(_doc.getCollModRequest()));
- }
- _doc.setCollUUID(
- sharding_ddl_util::getCollectionUUID(opCtx, nss(), true /* allowViews */));
-
- sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollUUID());
-
- if (!_firstExecution) {
- _performNoopRetryableWriteOnParticipants(opCtx, **executor);
- }
-
- _doc = _updateSession(opCtx, _doc);
- const OperationSessionInfo osi = getCurrentSession(_doc);
-
- const auto chunkManager = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
- opCtx, collNss));
- std::unique_ptr<CollatorInterface> collator;
- const auto expCtx =
- make_intrusive<ExpressionContext>(opCtx, std::move(collator), collNss);
- std::set<ShardId> participants;
- chunkManager.getShardIdsForQuery(
- expCtx, {} /* query */, {} /* collation */, &participants);
-
- ShardsvrCollModParticipant request(nss(), _doc.getCollModRequest());
- const auto cmdObj =
- CommandHelpers::appendMajorityWriteConcern(request.toBSON({}));
- const auto& responses = sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx,
- nss().db(),
- cmdObj.addFields(osi.toBSON()),
- {std::make_move_iterator(participants.begin()),
- std::make_move_iterator(participants.end())},
- **executor);
- BSONObjBuilder builder;
- std::string errmsg;
- auto ok = appendRawResponses(opCtx, &errmsg, &builder, responses).responseOK;
- if (!errmsg.empty()) {
- CommandHelpers::appendSimpleCommandStatus(builder, ok, errmsg);
- }
- _result = builder.obj();
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
- } else {
- CollMod cmd(nss());
- cmd.setCollModRequest(_doc.getCollModRequest());
- BSONObjBuilder collModResBuilder;
- uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation(
- opCtx, nss(), cmd, true, &collModResBuilder));
- auto collModRes = collModResBuilder.obj();
-
- const auto dbInfo = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss().db()));
- const auto shard = uassertStatusOK(
- Grid::get(opCtx)->shardRegistry()->getShard(opCtx, dbInfo->getPrimary()));
- BSONObjBuilder builder;
- builder.appendElements(collModRes);
- BSONObjBuilder subBuilder(builder.subobjStart("raw"));
- subBuilder.append(shard->getConnString().toString(), collModRes);
- subBuilder.doneFast();
- _result = builder.obj();
- }
- }))
- .onError([this, anchor = shared_from_this()](const Status& status) {
- if (!status.isA<ErrorCategory::NotPrimaryError>() &&
- !status.isA<ErrorCategory::ShutdownError>()) {
- LOGV2_ERROR(6482602,
- "Error running collMod",
- "namespace"_attr = nss(),
- "error"_attr = redact(status));
- // If we have the collection UUID set, this error happened in a sharded collection,
- // we should restore the migrations.
- if (_doc.getCollUUID()) {
- auto opCtxHolder = cc().makeOperationContext();
- auto* opCtx = opCtxHolder.get();
- getForwardableOpMetadata().setOn(opCtx);
-
- sharding_ddl_util::resumeMigrations(opCtx, nss(), _doc.getCollUUID());
- }
- }
- return status;
- });
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/s/collmod_coordinator_pre60_compatible.h b/src/mongo/db/s/collmod_coordinator_pre60_compatible.h
deleted file mode 100644
index a8de0c67f53..00000000000
--- a/src/mongo/db/s/collmod_coordinator_pre60_compatible.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Copyright (C) 2021-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/s/collmod_coordinator_document_gen.h"
-#include "mongo/db/s/sharding_ddl_coordinator.h"
-#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
-#include "mongo/stdx/mutex.h"
-
-namespace mongo {
-
-class CollModCoordinatorPre60Compatible final : public ShardingDDLCoordinator {
-public:
- using StateDoc = CollModCoordinatorDocument;
- using Phase = CollModCoordinatorPhaseEnum;
-
- CollModCoordinatorPre60Compatible(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState);
-
- void checkIfOptionsConflict(const BSONObj& doc) const override;
-
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
-
- /**
- * Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
- * and then return the result.
- */
- BSONObj getResult(OperationContext* opCtx) {
- getCompletionFuture().get(opCtx);
- invariant(_result.is_initialized());
- return *_result;
- }
-
-private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- stdx::lock_guard l{_docMutex};
- return _doc.getShardingDDLCoordinatorMetadata();
- }
-
- ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
- const CancellationToken& token) noexcept override;
-
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
- void _performNoopRetryableWriteOnParticipants(
- OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor);
-
- BSONObj _initialState;
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("CollModCoordinatorPre60Compatible::_docMutex");
- CollModCoordinatorDocument _doc;
-
- boost::optional<BSONObj> _result;
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/s/commit_chunk_migration.idl b/src/mongo/db/s/commit_chunk_migration.idl
new file mode 100644
index 00000000000..6484623cd5c
--- /dev/null
+++ b/src/mongo/db/s/commit_chunk_migration.idl
@@ -0,0 +1,85 @@
+
+ # Copyright (C) 2019-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+ - "mongo/s/sharding_types.idl"
+ - "mongo/s/chunk_version.idl"
+
+structs:
+ ConfigSvrCommitChunkMigrationResponse:
+ description: "Response of the _configsvrCommitChunkMigration command."
+ strict: false
+ fields:
+ shardVersion:
+ type: ChunkVersion
+ description: "Collection version at the end of the migration."
+
+ MigratedChunkType:
+ description: "ChunkType describing a migrated chunk"
+ strict: false
+ fields:
+ lastmod : ChunkVersion
+ min: object
+ max: object
+
+commands:
+ _configsvrCommitChunkMigration:
+ command_name: _configsvrCommitChunkMigration
+ cpp_name: CommitChunkMigrationRequest
+ description: "internal _configsvrCommitChunkMigration command for config server"
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ strict: false
+ reply_type: ConfigSvrCommitChunkMigrationResponse
+ fields:
+ fromShard:
+ type: shard_id
+ description: "from shard name"
+
+ toShard:
+ type: shard_id
+ description: "to shard name"
+
+ migratedChunk:
+ type: MigratedChunkType
+ description: "ChunkType describing a migrated chunk"
+
+ fromShardCollectionVersion:
+ type: ChunkVersion
+ description: "{ shardVersionField: <version> }"
+
+ validAfter:
+ type: timestamp
+ description: "The time after which this chunk is at the new shard" \ No newline at end of file
diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
index 04048f7946b..69c67d89dcb 100644
--- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
+++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp
@@ -187,94 +187,35 @@ void doDropOperation(const CompactStructuredEncryptionDataState& state) {
boost::optional<BSONObj> CompactStructuredEncryptionDataCoordinator::reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode connMode,
MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder bob;
-
- CompactStructuredEncryptionDataPhaseEnum currPhase;
- std::string nss;
- std::string escNss;
- std::string eccNss;
- std::string ecoNss;
- std::string ecocNss;
- std::string ecocRenameUuid;
- std::string ecocUiid;
- std::string ecocRenameNss;
- {
- stdx::lock_guard l{_docMutex};
- currPhase = _doc.getPhase();
- nss = _doc.getId().getNss().ns();
- escNss = _doc.getEscNss().ns();
- eccNss = _doc.getEccNss().ns();
- ecoNss = _doc.getEcocNss().ns();
- ecocNss = _doc.getEcocNss().ns();
- ecocRenameUuid =
- _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none";
- ecocUiid = _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none";
- ecocRenameNss = _doc.getEcocRenameNss().ns();
- }
-
- bob.append("type", "op");
- bob.append("desc", "CompactStructuredEncryptionDataCoordinator");
- bob.append("op", "command");
- bob.append("nss", nss);
- bob.append("escNss", escNss);
- bob.append("eccNss", eccNss);
- bob.append("ecocNss", ecocNss);
- bob.append("ecocUuid", ecocUiid);
- bob.append("ecocRenameNss", ecocRenameNss);
- bob.append("ecocRenameUuid", ecocRenameUuid);
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
+ auto bob = basicReportBuilder();
+
+ stdx::lock_guard lg{_docMutex};
+ bob.append("escNss", _doc.getEscNss().ns());
+ bob.append("eccNss", _doc.getEccNss().ns());
+ bob.append("ecocNss", _doc.getEcocNss().ns());
+ bob.append("ecocUuid", _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none");
+ bob.append("ecocRenameNss", _doc.getEcocRenameNss().ns());
+ bob.append("ecocRenameUuid",
+ _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none");
return bob.obj();
}
-void CompactStructuredEncryptionDataCoordinator::_enterPhase(Phase newPhase) {
- StateDoc doc(_doc);
- doc.setPhase(newPhase);
-
- LOGV2_DEBUG(6350490,
- 2,
- "Transitioning phase for CompactStructuredEncryptionDataCoordinator",
- "nss"_attr = _doc.getId().getNss().ns(),
- "escNss"_attr = _doc.getEscNss().ns(),
- "eccNss"_attr = _doc.getEccNss().ns(),
- "ecocNss"_attr = _doc.getEcocNss().ns(),
- "ecocUuid"_attr = _doc.getEcocUuid(),
- "ecocRenameNss"_attr = _doc.getEcocRenameNss().ns(),
- "ecocRenameUuid"_attr = _doc.getEcocRenameUuid(),
- "skipCompact"_attr = _doc.getSkipCompact(),
- "compactionTokens"_attr = _doc.getCompactionTokens(),
- "oldPhase"_attr = CompactStructuredEncryptionDataPhase_serializer(_doc.getPhase()),
- "newPhase"_attr = CompactStructuredEncryptionDataPhase_serializer(newPhase));
-
- if (_doc.getPhase() == Phase::kUnset) {
- doc = _insertStateDocument(std::move(doc));
- } else {
- auto opCtx = cc().makeOperationContext();
- doc = _updateStateDocument(opCtx.get(), std::move(doc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(doc);
- }
-}
-
ExecutorFuture<void> CompactStructuredEncryptionDataCoordinator::_runImpl(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept {
return ExecutorFuture<void>(**executor)
.then(_executePhase(Phase::kRenameEcocForCompact,
- [this, anchor = shared_from_this()](const auto& state) {
- doRenameOperation(state, &_skipCompact, &_ecocRenameUuid);
+ [this, anchor = shared_from_this()]() {
+ doRenameOperation(_doc, &_skipCompact, &_ecocRenameUuid);
stdx::unique_lock ul{_docMutex};
_doc.setSkipCompact(_skipCompact);
_doc.setEcocRenameUuid(_ecocRenameUuid);
}))
- .then(_executePhase(Phase::kCompactStructuredEncryptionData,
- [this, anchor = shared_from_this()](const auto& state) {
- _response = doCompactOperation(state);
- }))
- .then(_executePhase(Phase::kDropTempCollection, doDropOperation));
+ .then(_executePhase(
+ Phase::kCompactStructuredEncryptionData,
+ [this, anchor = shared_from_this()]() { _response = doCompactOperation(_doc); }))
+ .then(_executePhase(Phase::kDropTempCollection,
+ [this, anchor = shared_from_this()] { doDropOperation(_doc); }));
}
} // namespace mongo
diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
index 4b8ffd33441..b030e19910a 100644
--- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
+++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h
@@ -40,7 +40,9 @@
namespace mongo {
-class CompactStructuredEncryptionDataCoordinator final : public ShardingDDLCoordinator {
+class CompactStructuredEncryptionDataCoordinator final
+ : public RecoverableShardingDDLCoordinator<CompactStructuredEncryptionDataState,
+ CompactStructuredEncryptionDataPhaseEnum> {
public:
static constexpr auto kStateContext = "CompactStructuredEncryptionDataState"_sd;
using StateDoc = CompactStructuredEncryptionDataState;
@@ -48,7 +50,8 @@ public:
CompactStructuredEncryptionDataCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& doc)
- : ShardingDDLCoordinator(service, doc), _doc(StateDoc::parse({kStateContext}, doc)) {}
+ : RecoverableShardingDDLCoordinator(
+ service, "CompactStructuredEncryptionDataCoordinator", doc) {}
boost::optional<BSONObj> reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode connMode,
@@ -63,36 +66,14 @@ public:
void checkIfOptionsConflict(const BSONObj& doc) const final {}
private:
- void _enterPhase(Phase newPhase);
-
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
- if (currPhase > newPhase) {
- return;
- }
- if (currPhase < newPhase) {
- _enterPhase(newPhase);
- }
-
- return func(_doc);
- };
- }
-
-private:
- ShardingDDLCoordinatorMetadata const& metadata() const final {
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return CompactStructuredEncryptionDataPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept final;
private:
- mutable Mutex _docMutex =
- MONGO_MAKE_LATCH("CompactStructuredEncryptionDataCoordinator::_docMutex");
- StateDoc _doc;
-
boost::optional<CompactStructuredEncryptionDataCommandReply> _response;
bool _skipCompact{false};
boost::optional<UUID> _ecocRenameUuid;
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index 198371cf17a..d697528a86d 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -70,7 +70,6 @@
#include "mongo/s/config_server_catalog_cache_loader.h"
#include "mongo/s/database_version.h"
#include "mongo/s/query/cluster_cursor_manager.h"
-#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/s/shard_id.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/util/clock_source_mock.h"
@@ -452,30 +451,6 @@ std::vector<KeysCollectionDocument> ConfigServerTestFixture::getKeys(OperationCo
return keys;
}
-void ConfigServerTestFixture::expectSetShardVersion(
- const HostAndPort& expectedHost,
- const ShardType& expectedShard,
- const NamespaceString& expectedNs,
- boost::optional<ChunkVersion> expectedChunkVersion) {
- onCommand([&](const RemoteCommandRequest& request) {
- ASSERT_EQ(expectedHost, request.target);
- ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- SetShardVersionRequest ssv =
- assertGet(SetShardVersionRequest::parseFromBSON(request.cmdObj));
-
- ASSERT(ssv.isAuthoritative());
- ASSERT_EQ(expectedNs.toString(), ssv.getNS().ns());
-
- if (expectedChunkVersion) {
- ASSERT_EQ(*expectedChunkVersion, ssv.getNSVersion());
- }
-
- return BSON("ok" << true);
- });
-}
-
void ConfigServerTestFixture::setupOpObservers() {
auto opObserverRegistry =
checked_cast<OpObserverRegistry*>(getServiceContext()->getOpObserver());
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index 05ed2b55a67..bd2a41b41a0 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -166,17 +166,6 @@ protected:
StatusWith<std::vector<BSONObj>> getIndexes(OperationContext* opCtx, const NamespaceString& ns);
/**
- * Expects a setShardVersion command to be executed on the specified shard.
- *
- * The expectedChunkVersion is optional, because in some cases it may not be possible to know
- * the OID of a ChunkVersion generated by some internal code. (See SERVER-29451).
- */
- void expectSetShardVersion(const HostAndPort& expectedHost,
- const ShardType& expectedShard,
- const NamespaceString& expectedNs,
- boost::optional<ChunkVersion> expectedChunkVersion);
-
- /**
* Returns the stored raw pointer to the addShard TaskExecutor's NetworkInterface.
*/
executor::NetworkInterfaceMock* networkForAddShard() const;
diff --git a/src/mongo/db/s/config/configsvr_collmod_command.cpp b/src/mongo/db/s/config/configsvr_collmod_command.cpp
index e4bda1b9995..6d224756002 100644
--- a/src/mongo/db/s/config/configsvr_collmod_command.cpp
+++ b/src/mongo/db/s/config/configsvr_collmod_command.cpp
@@ -66,6 +66,10 @@ public:
return Command::AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 9dcff9c96d0..a50f499662f 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -31,12 +31,14 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/chunk_move_write_concern_options.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -44,7 +46,6 @@
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -79,9 +80,23 @@ namespace {
* }
*
*/
-class ConfigSvrCommitChunkMigrationCommand : public BasicCommand {
+
+
+ChunkType toChunkType(const MigratedChunkType& migratedChunk) {
+
+ ChunkType chunk;
+ chunk.setMin(migratedChunk.getMin());
+ chunk.setMax(migratedChunk.getMax());
+ chunk.setVersion(migratedChunk.getLastmod());
+ return chunk;
+}
+
+
+class ConfigSvrCommitChunkMigrationCommand
+ : public TypedCommand<ConfigSvrCommitChunkMigrationCommand> {
public:
- ConfigSvrCommitChunkMigrationCommand() : BasicCommand("_configsvrCommitChunkMigration") {}
+ using Request = CommitChunkMigrationRequest;
+ using Response = ConfigSvrCommitChunkMigrationResponse;
bool skipApiVersionCheck() const override {
// Internal command (server to server).
@@ -100,51 +115,57 @@ public:
return true;
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
+ class Invocation : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ ConfigSvrCommitChunkMigrationResponse typedRun(OperationContext* opCtx) {
+
+ uassert(ErrorCodes::IllegalOperation,
+ "_configsvrClearJumboFlag can only be run on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+
+ // Set the operation context read concern level to local for reads into the config
+ // database.
+ repl::ReadConcernArgs::get(opCtx) =
+ repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
+
+ const NamespaceString nss = ns();
+ auto migratedChunk = toChunkType(request().getMigratedChunk());
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ StatusWith<BSONObj> chunkVersionResponse =
+ ShardingCatalogManager::get(opCtx)->commitChunkMigration(
+ opCtx,
+ nss,
+ migratedChunk,
+ request().getFromShardCollectionVersion().epoch(),
+ request().getFromShardCollectionVersion().getTimestamp(),
+ request().getFromShard(),
+ request().getToShard(),
+ request().getValidAfter());
+
+ auto chunkVersionObj = uassertStatusOK(chunkVersionResponse);
+
+ return Response{ChunkVersion::parse(chunkVersionObj[ChunkVersion::kShardVersionField])};
}
- return Status::OK();
- }
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
+ private:
+ bool supportsWriteConcern() const override {
+ return true;
+ }
- bool run(OperationContext* opCtx,
- const std::string& dbName,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) override {
-
- // Set the operation context read concern level to local for reads into the config database.
- repl::ReadConcernArgs::get(opCtx) =
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
-
- const NamespaceString nss = NamespaceString(parseNs(dbName, cmdObj));
-
- auto commitRequest =
- uassertStatusOK(CommitChunkMigrationRequest::createFromCommand(nss, cmdObj));
-
- StatusWith<BSONObj> response = ShardingCatalogManager::get(opCtx)->commitChunkMigration(
- opCtx,
- nss,
- commitRequest.getMigratedChunk(),
- commitRequest.getCollectionEpoch(),
- commitRequest.getCollectionTimestamp(),
- commitRequest.getFromShard(),
- commitRequest.getToShard(),
- commitRequest.getValidAfter());
- uassertStatusOK(response.getStatus());
- result.appendElements(response.getValue());
- return true;
- }
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
} configsvrCommitChunkMigrationCommand;
diff --git a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp
index 136af191f6d..8769cbe9b53 100644
--- a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp
+++ b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp
@@ -66,11 +66,6 @@ public:
str::stream() << Request::kCommandName << " can only be run on config servers",
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
- uassert(8423309,
- str::stream() << Request::kCommandName << " command not supported",
- mongo::feature_flags::gPerCollBalancingSettings.isEnabled(
- serverGlobalParams.featureCompatibility));
-
const NamespaceString& nss = ns();
uassert(ErrorCodes::InvalidNamespace,
diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
index ea2823dcdf0..db155fa6bea 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
@@ -96,8 +96,8 @@ public:
request().getChunkRange(),
request().getShard(),
request().getValidAfter()));
- return ConfigSvrMergeResponse{ChunkVersion::fromBSONPositionalOrNewerFormat(
- shardAndCollVers[ChunkVersion::kShardVersionField])};
+ return ConfigSvrMergeResponse{
+ ChunkVersion::parse(shardAndCollVers[ChunkVersion::kShardVersionField])};
}
private:
diff --git a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
index 41d1679b4c9..cfa02c94711 100644
--- a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
@@ -96,20 +96,10 @@ public:
repl::ReadConcernArgs::get(opCtx) =
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
- auto request = uassertStatusOK(
- BalanceChunkRequest::parseFromConfigCommand(cmdObj, false /* requireUUID */));
+ auto request = uassertStatusOK(BalanceChunkRequest::parseFromConfigCommand(cmdObj));
const auto& nss = request.getNss();
- // In case of mixed binaries including v5.0, the collection UUID field may not be attached
- // to the chunk.
- if (!request.getChunk().hasCollectionUUID_UNSAFE()) {
- // TODO (SERVER-60792): Remove the following logic after v6.0 branches out.
- const auto& collection = Grid::get(opCtx)->catalogClient()->getCollection(
- opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern);
- request.setCollectionUUID(collection.getUuid()); // Set collection UUID on chunk member
- }
-
if (request.hasToShardId()) {
uassertStatusOK(Balancer::get(opCtx)->moveSingleChunk(opCtx,
nss,
diff --git a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
index c6ceb8a4ca2..da6ec5ed2b9 100644
--- a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp
@@ -149,6 +149,10 @@ public:
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
}
+
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
} configsvrRemoveChunksCmd;
} // namespace
diff --git a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
index f880d9be4bf..7333b0036dc 100644
--- a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp
@@ -144,6 +144,10 @@ public:
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
}
+
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
} configsvrRemoveTagsCmd;
} // namespace
diff --git a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
index 438a7d3227a..df59c5135ea 100644
--- a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
+++ b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
@@ -68,6 +68,10 @@ public:
return AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
index 7f284e2c642..1a094c7db5f 100644
--- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
+++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp
@@ -63,8 +63,9 @@ getExistingInstanceToJoin(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& newShardKey) {
auto instances =
- getReshardingStateMachines<ReshardingCoordinatorService,
- ReshardingCoordinatorService::ReshardingCoordinator>(opCtx, nss);
+ resharding::getReshardingStateMachines<ReshardingCoordinatorService,
+ ReshardingCoordinatorService::ReshardingCoordinator>(
+ opCtx, nss);
for (const auto& instance : instances) {
if (SimpleBSONObjComparator::kInstance.evaluate(
instance->getMetadata().getReshardingKey().toBSON() == newShardKey)) {
@@ -139,7 +140,7 @@ public:
"Must specify only one of _presetReshardedChunks or numInitialChunks",
!(bool(request().getNumInitialChunks())));
- validateReshardedChunks(
+ resharding::validateReshardedChunks(
*presetChunks, opCtx, ShardKeyPattern(request().getKey()).getKeyPattern());
}
@@ -183,11 +184,12 @@ public:
return boost::none;
}
- auto tempReshardingNss = constructTemporaryReshardingNss(nss.db(), cm.getUUID());
+ auto tempReshardingNss =
+ resharding::constructTemporaryReshardingNss(nss.db(), cm.getUUID());
if (auto zones = request().getZones()) {
- checkForOverlappingZones(*zones);
+ resharding::checkForOverlappingZones(*zones);
}
auto coordinatorDoc =
diff --git a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp
index 31a20120586..3b2a6c883df 100644
--- a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp
+++ b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp
@@ -62,12 +62,6 @@ public:
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
const auto coordinatorCompletionFuture = [&]() -> SharedSemiFuture<void> {
- FixedFCVRegion fcvRegion(opCtx);
- uassert(ErrorCodes::IllegalOperation,
- "featureFlagClusterWideConfig not enabled",
- gFeatureFlagClusterWideConfig.isEnabled(
- serverGlobalParams.featureCompatibility));
-
// Validate parameter before creating coordinator.
{
BSONObj cmdParamObj = request().getCommandParameter();
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 1be2dd486fb..0b2ab1b0474 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/initial_split_policy.h"
#include "mongo/client/read_preference.h"
@@ -50,7 +47,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -73,17 +69,11 @@ void appendChunk(const SplitPolicyParams& params,
const BSONObj& min,
const BSONObj& max,
ChunkVersion* version,
- const Timestamp& creationTimestamp,
const ShardId& shardId,
std::vector<ChunkType>* chunks) {
- chunks->emplace_back(
- params.collectionUUID,
- ChunkRange(min, max),
- ChunkVersion(
- version->majorVersion(), version->minorVersion(), version->epoch(), creationTimestamp),
- shardId);
+ chunks->emplace_back(params.collectionUUID, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setHistory({ChunkHistory(creationTimestamp, shardId)});
+ chunk.setHistory({ChunkHistory(version->getTimestamp(), shardId)});
version->incMinor();
}
@@ -238,7 +228,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
finalSplitPoints.push_back(splitPoint);
}
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern(shardKeyPattern.getKeyPattern());
std::vector<ChunkType> chunks;
@@ -254,7 +244,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
? params.primaryShardId
: allShardIds[(i / numContiguousChunksPerShard) % allShardIds.size()];
- appendChunk(params, min, max, &version, validAfter, shardId, &chunks);
+ appendChunk(params, min, max, &version, shardId, &chunks);
}
return {std::move(chunks)};
@@ -327,14 +317,13 @@ InitialSplitPolicy::ShardCollectionConfig SingleChunkOnPrimarySplitPolicy::creat
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern = shardKeyPattern.getKeyPattern();
std::vector<ChunkType> chunks;
appendChunk(params,
keyPattern.globalMin(),
keyPattern.globalMax(),
&version,
- validAfter,
params.primaryShardId,
&chunks);
@@ -421,19 +410,14 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
return shardIds[indx++ % shardIds.size()];
};
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
auto lastChunkMax = keyPattern.globalMin();
std::vector<ChunkType> chunks;
for (const auto& tag : _tags) {
// Create a chunk for the hole [lastChunkMax, tag.getMinKey)
if (tag.getMinKey().woCompare(lastChunkMax) > 0) {
- appendChunk(params,
- lastChunkMax,
- tag.getMinKey(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, tag.getMinKey(), &version, nextShardIdForHole(), &chunks);
}
// Create chunk for the actual tag - [tag.getMinKey, tag.getMaxKey)
const auto it = tagToShards.find(tag.getTag());
@@ -470,7 +454,7 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
const BSONObj max = (splitPointIdx == splitInfo.splitPoints.size())
? tag.getMaxKey()
: splitInfo.splitPoints[splitPointIdx];
- appendChunk(params, min, max, &version, validAfter, targetShard, &chunks);
+ appendChunk(params, min, max, &version, targetShard, &chunks);
}
}
lastChunkMax = tag.getMaxKey();
@@ -478,13 +462,8 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
// Create a chunk for the hole [lastChunkMax, MaxKey]
if (lastChunkMax.woCompare(keyPattern.globalMax()) < 0) {
- appendChunk(params,
- lastChunkMax,
- keyPattern.globalMax(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, keyPattern.globalMax(), &version, nextShardIdForHole(), &chunks);
}
return {std::move(chunks)};
@@ -765,13 +744,13 @@ InitialSplitPolicy::ShardCollectionConfig ReshardingSplitPolicy::createFirstChun
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
splitPoints.insert(keyPattern.globalMax());
for (const auto& splitPoint : splitPoints) {
auto bestShard = selectBestShard(
chunkDistribution, zoneInfo, zoneToShardMap, {lastChunkMax, splitPoint});
- appendChunk(params, lastChunkMax, splitPoint, &version, validAfter, bestShard, &chunks);
+ appendChunk(params, lastChunkMax, splitPoint, &version, bestShard, &chunks);
lastChunkMax = splitPoint;
chunkDistribution[bestShard]++;
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 2eea0b6905f..9fc9a5576d0 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/json.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/initial_split_policy.h"
@@ -40,7 +37,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -208,7 +204,7 @@ public:
std::vector<ChunkType> chunks;
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
- ChunkVersion version(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 0});
ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 0a45a9d3a6d..bfef69bcb9f 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -175,8 +175,6 @@ protected:
}
void expectClusterParametersRequest(const HostAndPort& target) {
- if (!gFeatureFlagClusterWideConfig.isEnabled(serverGlobalParams.featureCompatibility))
- return;
auto clusterParameterDocs = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
index a4abd0ff45b..fbb502f933b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
@@ -43,7 +41,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -121,17 +118,17 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
const auto collUUID = UUID::gen();
const auto shard0Chunk0 = generateChunkType(collUUID,
- ChunkVersion(10, 1, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {10, 1}),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
const auto shard0Chunk1 = generateChunkType(collUUID,
- ChunkVersion(11, 2, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {11, 2}),
kShard0.getName(),
BSON("a" << 11),
BSON("a" << 20));
const auto shard1Chunk0 = generateChunkType(collUUID,
- ChunkVersion(8, 1, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {8, 1}),
kShard1.getName(),
BSON("a" << 21),
BSON("a" << 100));
@@ -157,7 +154,7 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest, NoChunk
const auto collUUID = UUID::gen();
const auto shard0Chunk0 = generateChunkType(collUUID,
- ChunkVersion(10, 1, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {10, 1}),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
@@ -182,12 +179,12 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
const auto collUUID = UUID::gen();
const auto shard0Chunk0 = generateChunkType(collUUID,
- ChunkVersion(10, 1, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {10, 1}),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
const auto shard1Chunk0 = generateChunkType(collUUID,
- ChunkVersion(11, 2, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {11, 2}),
kShard1.getName(),
BSON("a" << 11),
BSON("a" << 20));
@@ -244,12 +241,12 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
const auto collUUID = UUID::gen();
const auto shard0Chunk0 = generateChunkType(collUUID,
- ChunkVersion(10, 1, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {10, 1}),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
const auto shard1Chunk0 = generateChunkType(collUUID,
- ChunkVersion(11, 2, collEpoch, collTimestamp),
+ ChunkVersion({collEpoch, collTimestamp}, {11, 2}),
kShard1.getName(),
BSON("a" << 11),
BSON("a" << 20));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 0cf64cc5288..f461f1ae0a5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/base/status_with.h"
@@ -67,7 +64,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -87,7 +83,7 @@ void appendShortVersion(BufBuilder* out, const ChunkType& chunk) {
bb.append(ChunkType::min(), chunk.getMin());
bb.append(ChunkType::max(), chunk.getMax());
if (chunk.isVersionSet()) {
- chunk.getVersion().appendLegacyWithField(&bb, ChunkType::lastmod());
+ chunk.getVersion().serializeToBSON(ChunkType::lastmod(), &bb);
}
bb.done();
}
@@ -268,7 +264,8 @@ ChunkVersion getShardVersion(OperationContext* opCtx,
if (swDonorShardVersion.getStatus().code() == 50577) {
// The query to find 'nss' chunks belonging to the donor shard didn't return any chunks,
// meaning the last chunk for fromShard was donated. Gracefully handle the error.
- return ChunkVersion(0, 0, collectionVersion.epoch(), collectionVersion.getTimestamp());
+ return ChunkVersion({collectionVersion.epoch(), collectionVersion.getTimestamp()},
+ {0, 0});
} else {
// Bubble up any other error
uassertStatusOK(swDonorShardVersion);
@@ -391,10 +388,9 @@ void ShardingCatalogManager::bumpMajorVersionOneChunkPerShard(
TxnNumber txnNumber,
const std::vector<ShardId>& shardIds) {
auto curCollectionVersion = uassertStatusOK(getCollectionVersion(opCtx, nss));
- ChunkVersion targetChunkVersion(curCollectionVersion.majorVersion() + 1,
- 0,
- curCollectionVersion.epoch(),
- curCollectionVersion.getTimestamp());
+ ChunkVersion targetChunkVersion(
+ {curCollectionVersion.epoch(), curCollectionVersion.getTimestamp()},
+ {curCollectionVersion.majorVersion() + 1, 0});
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
@@ -684,7 +680,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
BSONObjBuilder b(logDetail.subobjStart("before"));
b.append(ChunkType::min(), range.getMin());
b.append(ChunkType::max(), range.getMax());
- collVersion.appendLegacyWithField(&b, ChunkType::lastmod());
+ collVersion.serializeToBSON(ChunkType::lastmod(), &b);
}
if (splitChunkResult.newChunks->size() == 2) {
@@ -960,8 +956,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
b.append(chunkToMerge.toConfigBSON());
}
}
- initialVersion.appendLegacyWithField(&logDetail, "prevShardVersion");
- mergeVersion.appendLegacyWithField(&logDetail, "mergedVersion");
+ initialVersion.serializeToBSON("prevShardVersion", &logDetail);
+ mergeVersion.serializeToBSON("mergedVersion", &logDetail);
logDetail.append("owningShard", shardId);
ShardingLogging::get(opCtx)->logChange(
@@ -1127,10 +1123,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
newMigratedChunk->setMin(migratedChunk.getMin());
newMigratedChunk->setMax(migratedChunk.getMax());
newMigratedChunk->setShard(toShard);
- newMigratedChunk->setVersion(ChunkVersion(currentCollectionVersion.majorVersion() + 1,
- minVersionIncrement++,
- currentCollectionVersion.epoch(),
- currentCollectionVersion.getTimestamp()));
+ newMigratedChunk->setVersion(
+ ChunkVersion({currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()},
+ {currentCollectionVersion.majorVersion() + 1, minVersionIncrement++}));
// Copy the complete history.
auto newHistory = currentChunk.getHistory();
@@ -1186,10 +1181,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
ChunkType leftSplitChunk = currentChunk;
leftSplitChunk.setName(OID::gen());
leftSplitChunk.setMax(movedChunkMin);
- leftSplitChunk.setVersion(ChunkVersion(movedChunkVersion.majorVersion(),
- minVersionIncrement++,
- movedChunkVersion.epoch(),
- movedChunkVersion.getTimestamp()));
+ leftSplitChunk.setVersion(
+ ChunkVersion({movedChunkVersion.epoch(), movedChunkVersion.getTimestamp()},
+ {movedChunkVersion.majorVersion(), minVersionIncrement++}));
newSplitChunks->emplace_back(std::move(leftSplitChunk));
}
@@ -1199,10 +1193,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
ChunkType rightSplitChunk = currentChunk;
rightSplitChunk.setName(OID::gen());
rightSplitChunk.setMin(movedChunkMax);
- rightSplitChunk.setVersion(ChunkVersion(movedChunkVersion.majorVersion(),
- minVersionIncrement++,
- movedChunkVersion.epoch(),
- movedChunkVersion.getTimestamp()));
+ rightSplitChunk.setVersion(
+ ChunkVersion({movedChunkVersion.epoch(), movedChunkVersion.getTimestamp()},
+ {movedChunkVersion.majorVersion(), minVersionIncrement++}));
newSplitChunks->emplace_back(std::move(rightSplitChunk));
}
}
@@ -1218,10 +1211,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
newControlChunk = std::make_shared<ChunkType>(origControlChunk);
// Setting control chunk's minor version to 1 on the donor shard.
- newControlChunk->setVersion(ChunkVersion(currentCollectionVersion.majorVersion() + 1,
- minVersionIncrement++,
- currentCollectionVersion.epoch(),
- currentCollectionVersion.getTimestamp()));
+ newControlChunk->setVersion(ChunkVersion(
+ {currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()},
+ {currentCollectionVersion.majorVersion() + 1, minVersionIncrement++}));
}
_commitChunkMigrationInTransaction(
@@ -1232,7 +1224,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// We migrated the last chunk from the donor shard.
newMigratedChunk->getVersion().serializeToBSON(kCollectionVersionField, &response);
const ChunkVersion donorShardVersion(
- 0, 0, currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp());
+ {currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()}, {0, 0});
donorShardVersion.serializeToBSON(ChunkVersion::kShardVersionField, &response);
} else {
newControlChunk->getVersion().serializeToBSON(kCollectionVersionField, &response);
@@ -1349,8 +1341,8 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
}();
// Bump the major version in order to be guaranteed to trigger refresh on every shard
- ChunkVersion newCollectionVersion(
- collVersion.majorVersion() + 1, 0, collVersion.epoch(), collVersion.getTimestamp());
+ ChunkVersion newCollectionVersion({collVersion.epoch(), collVersion.getTimestamp()},
+ {collVersion.majorVersion() + 1, 0});
std::set<ShardId> changedShardIds;
for (const auto& chunk : allChunksVector) {
auto upgradeChunk = uassertStatusOK(
@@ -1491,10 +1483,9 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
<< chunk.toString() << ").",
currentCollectionVersion.epoch() == collectionEpoch);
- ChunkVersion newVersion(currentCollectionVersion.majorVersion() + 1,
- 0,
- currentCollectionVersion.epoch(),
- currentCollectionVersion.getTimestamp());
+ ChunkVersion newVersion(
+ {currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()},
+ {currentCollectionVersion.majorVersion() + 1, 0});
BSONObj chunkQuery(BSON(ChunkType::min(chunk.getMin())
<< ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID
@@ -1653,8 +1644,8 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
// Generate a new version for the chunk by incrementing the collectionVersion's major
// version.
auto newChunk = matchingChunk;
- newChunk.setVersion(ChunkVersion(
- highestChunk.getVersion().majorVersion() + 1, 0, coll.getEpoch(), coll.getTimestamp()));
+ newChunk.setVersion(ChunkVersion({coll.getEpoch(), coll.getTimestamp()},
+ {highestChunk.getVersion().majorVersion() + 1, 0}));
// Update the chunk, if it still exists, to have the bumped version.
earlyReturnBeforeDoingWriteGuard.dismiss();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 762961eaac3..9f883997a3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/read_preference.h"
@@ -72,7 +70,7 @@ protected:
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- chunk.setVersion({12, 7, epoch, timestamp});
+ chunk.setVersion(ChunkVersion({epoch, timestamp}, {12, 7}));
chunk.setShard(_shardName);
chunk.setMin(jumboChunk().getMin());
chunk.setMax(jumboChunk().getMax());
@@ -81,7 +79,7 @@ protected:
ChunkType otherChunk;
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- otherChunk.setVersion({14, 7, epoch, timestamp});
+ otherChunk.setVersion(ChunkVersion({epoch, timestamp}, {14, 7}));
otherChunk.setShard(_shardName);
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
@@ -107,7 +105,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
operationContext(), collUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
auto chunkVersion = chunkDoc.getVersion();
- ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkVersion);
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {15, 0}), chunkVersion);
};
test(_nss2, Timestamp(42));
@@ -125,7 +123,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
auto chunkDoc = uassertStatusOK(getChunkDoc(
operationContext(), collUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {14, 7}), chunkDoc.getVersion());
};
test(_nss2, Timestamp(42));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 235954c5d5d..fc8a55a9635 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/read_preference.h"
@@ -49,7 +47,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -104,7 +101,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
ChunkType migratedChunk, controlChunk;
{
- ChunkVersion origVersion(12, 7, collEpoch, collTimestamp);
+ ChunkVersion origVersion({collEpoch, collTimestamp}, {12, 7});
migratedChunk.setName(OID::gen());
migratedChunk.setCollectionUUID(collUUID);
@@ -140,15 +137,14 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
validAfter));
// Verify the versions returned match expected values.
- auto mver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
- ASSERT_EQ(ChunkVersion(migratedChunk.getVersion().majorVersion() + 1,
- 1,
- migratedChunk.getVersion().epoch(),
- migratedChunk.getVersion().getTimestamp()),
+ auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ ASSERT_EQ(ChunkVersion(
+ {migratedChunk.getVersion().epoch(), migratedChunk.getVersion().getTimestamp()},
+ {migratedChunk.getVersion().majorVersion() + 1, 1}),
mver);
// Verify that a collection version is returned
- auto cver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["collectionVersion"]);
+ auto cver = ChunkVersion::parse(versions["collectionVersion"]);
ASSERT_TRUE(mver.isOlderOrEqualThan(cver));
// Verify the chunks ended up in the right shards.
@@ -188,8 +184,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
setupShards({shard0, shard1});
- int origMajorVersion = 15;
- auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp);
+ uint32_t origMajorVersion = 15;
+ auto const origVersion = ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion, 4});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -222,12 +218,12 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
// Verify the version returned matches expected value.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver);
+ auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify that a collection version is returned
- auto cver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["collectionVersion"]);
- ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, collEpoch, collTimestamp), cver);
+ auto cver = ChunkVersion::parse(versions["collectionVersion"]);
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion + 1, 0}), cver);
// Verify the chunk ended up in the right shard.
auto chunkDoc0 =
@@ -253,8 +249,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
setupShards({shard0, shard1});
- int origMajorVersion = 15;
- auto const origVersion = ChunkVersion(origMajorVersion, 4, collEpoch, collTimestamp);
+ uint32_t origMajorVersion = 15;
+ auto const origVersion = ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion, 4});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -288,8 +284,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
// Verify the version returned matches expected value.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver);
+ auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify the chunk ended up in the right shard.
auto chunkDoc0 =
@@ -314,9 +310,8 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
setupShards({shard0, shard1});
- int origMajorVersion = 15;
- auto const origVersion =
- ChunkVersion(origMajorVersion, 4, OID::gen(), Timestamp(42) /* timestamp */);
+ uint32_t origMajorVersion = 15;
+ auto const origVersion = ChunkVersion({OID::gen(), Timestamp(42)}, {origMajorVersion, 4});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -362,9 +357,8 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
setupShards({shard0, shard1});
- int origMajorVersion = 12;
- auto const origVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
+ uint32_t origMajorVersion = 12;
+ auto const origVersion = ChunkVersion({OID::gen(), Timestamp(42)}, {origMajorVersion, 7});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -418,11 +412,9 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
setupShards({shard0, shard1});
- int origMajorVersion = 12;
- auto const origVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
- auto const otherVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
+ uint32_t origMajorVersion = 12;
+ auto const origVersion = ChunkVersion({OID::gen(), Timestamp(42)}, {origMajorVersion, 7});
+ auto const otherVersion = ChunkVersion({OID::gen(), Timestamp(42)}, {origMajorVersion, 7});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -479,8 +471,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
setupShards({shard0, shard1});
- int origMajorVersion = 12;
- auto const origVersion = ChunkVersion(origMajorVersion, 7, collEpoch, collTimestamp);
+ uint32_t origMajorVersion = 12;
+ auto const origVersion = ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion, 7});
ChunkType chunk0;
chunk0.setName(OID::gen());
@@ -525,8 +517,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
// Verify the versions returned match expected values.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
- ASSERT_EQ(ChunkVersion(0, 0, origVersion.epoch(), origVersion.getTimestamp()), mver);
+ auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify the chunks ended up in the right shards.
auto chunkDoc0 =
@@ -560,7 +552,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
+ ChunkVersion origVersion({OID::gen(), Timestamp(42)}, {12, 7});
// Create migrate chunk with no chunk version set.
ChunkType migratedChunk;
@@ -610,7 +602,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
setupShards({shard0, shard1});
auto epoch = OID::gen();
- ChunkVersion origVersion(12, 7, epoch, Timestamp(42) /* timestamp */);
+ ChunkVersion origVersion({epoch, Timestamp(42)}, {12, 7});
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
@@ -621,7 +613,7 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(14, 7, epoch, Timestamp(42) /* timestamp */);
+ ChunkVersion currentChunkVersion({epoch, Timestamp(42)}, {14, 7});
ChunkType currentChunk;
currentChunk.setName(OID::gen());
@@ -662,7 +654,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
+ ChunkVersion origVersion({OID::gen(), Timestamp(42)}, {12, 7});
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
@@ -673,7 +665,7 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
+ ChunkVersion currentChunkVersion({OID::gen(), Timestamp(42)}, {12, 7});
ChunkType currentChunk;
currentChunk.setName(OID::gen());
@@ -730,7 +722,7 @@ public:
void setupCollectionWithNChunks(int numberOfChunks) {
invariant(numberOfChunks > 0);
- int currentMajorVersion = 1;
+ uint32_t currentMajorVersion = 1;
int historyTimestampSecond = 100;
std::vector<ChunkHistory> history;
@@ -745,7 +737,7 @@ public:
const auto max = chunksMin.at(i + 1); // Max key of the chunk being created
const auto shardId = _shardIds.at(i % 2); // Shard owning the chunk
ChunkVersion version =
- ChunkVersion(currentMajorVersion++, 0, _collEpoch, _collTimestamp);
+ ChunkVersion({_collEpoch, _collTimestamp}, {currentMajorVersion++, 0});
history.insert(history.begin(),
{ChunkHistory(Timestamp(historyTimestampSecond++, 0), shardId)});
ChunkType chunk = createChunk(_collUUID, min, max, version, shardId, history);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 20e8b2ecc6a..8921d0c2e8b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
@@ -95,7 +93,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
const auto requestedChunkType =
generateChunkType(_nss,
_collUuid,
- ChunkVersion(10, 2, OID::gen(), Timestamp(1, 1)),
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {10, 2}),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
@@ -112,12 +110,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Min key is different.
@@ -140,12 +139,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Max key is different.
@@ -169,20 +169,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -195,8 +197,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(EnsureChunkVersionIsGreaterThanTest,
@@ -204,20 +206,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -230,8 +234,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(
@@ -240,15 +244,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
@@ -269,15 +274,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 3b5951cd82e..9d7e68c9a93 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/read_preference.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
@@ -85,7 +83,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -119,17 +117,16 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
_shardId,
validAfter));
- auto collVersion = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["collectionVersion"]);
- auto shardVersion = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
+ auto collVersion = ChunkVersion::parse(versions["collectionVersion"]);
+ auto shardVersion = ChunkVersion::parse(versions["shardVersion"]);
ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
- origVersion.minorVersion() + 1,
- origVersion.epoch(),
- origVersion.getTimestamp());
+ auto expectedShardVersion =
+ ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 1});
ASSERT_EQ(expectedShardVersion, shardVersion);
@@ -170,7 +167,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -251,7 +248,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -273,7 +270,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
// Set up other chunk with competing version
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
otherChunk.setVersion(competingVersion);
otherChunk.setShard(_shardId);
otherChunk.setMin(BSON("a" << 10));
@@ -334,7 +331,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(shardId);
@@ -415,7 +412,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
// Construct chunk to be merged
@@ -457,7 +454,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -503,7 +500,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
ChunkRange rangeToBeMerged(chunkMin, chunkMax);
// Store a chunk that matches the range that will be requested
- auto mergedVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto mergedVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
mergedVersion.incMinor();
ChunkType mergedChunk;
mergedChunk.setVersion(mergedVersion);
@@ -559,7 +556,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk1.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk1.setVersion(origVersion);
chunk1.setShard(_shardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index 32544cacc7b..b54338947b1 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <string>
#include <vector>
@@ -58,7 +55,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -227,15 +223,15 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
const auto timestamp = Timestamp(1);
ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 1}),
shard1.getName());
ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 2}),
shard1.getName());
ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 3}),
shard1.getName());
chunk3.setJumbo(true);
@@ -314,15 +310,15 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
Timestamp timestamp = Timestamp(1);
ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
- ChunkVersion(1, 1, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 1}),
shard1.getName());
ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
- ChunkVersion(1, 2, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 2}),
shard1.getName());
ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
- ChunkVersion(1, 3, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 3}),
shard1.getName());
std::vector<ChunkType> chunks{chunk1, chunk2, chunk3};
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 7de1d4c3efe..947ec9fb3c2 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -1201,9 +1201,6 @@ void ShardingCatalogManager::_pushClusterParametersToNewShard(
void ShardingCatalogManager::_standardizeClusterParameters(OperationContext* opCtx,
RemoteCommandTargeter* targeter) {
- if (!gFeatureFlagClusterWideConfig.isEnabled(serverGlobalParams.featureCompatibility))
- return;
-
auto clusterParameterDocs =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 1cc5f1c677d..9b9e48cfe0b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -80,7 +80,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -105,16 +105,16 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
splitPoints,
"shard0000",
false /* fromChunkSplitter*/));
- auto collVersion =
- ChunkVersion::fromBSONPositionalOrNewerFormat(versions["collectionVersion"]);
- auto shardVersion = ChunkVersion::fromBSONPositionalOrNewerFormat(versions["shardVersion"]);
+ auto collVersion = ChunkVersion::parse(versions["collectionVersion"]);
+ auto shardVersion = ChunkVersion::parse(versions["shardVersion"]);
ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp);
+ auto expectedShardVersion =
+ ChunkVersion({collEpoch, collTimestamp},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 2});
ASSERT_EQ(expectedShardVersion, shardVersion);
ASSERT_EQ(shardVersion, collVersion);
@@ -164,7 +164,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -256,7 +256,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setCollectionUUID(collUuid);
// set up first chunk
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -270,7 +270,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
splitPoints.push_back(chunkSplitPoint);
// set up second chunk (chunk2)
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
chunk2.setVersion(competingVersion);
chunk2.setShard(ShardId(_shardName));
chunk2.setMin(BSON("a" << 10));
@@ -324,7 +324,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -362,7 +362,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -398,7 +398,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -434,7 +434,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -470,7 +470,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -507,7 +507,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -543,7 +543,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -588,7 +588,7 @@ TEST_F(SplitChunkTest, CantCommitSplitFromChunkSplitterDuringDefragmentation) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto version = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto version = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(version);
chunk.setShard(ShardId(_shardName));
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index a5b499cfe81..ccbad667d35 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -359,39 +359,8 @@ void broadcastDropCollection(OperationContext* opCtx,
} // namespace
-CreateCollectionCoordinator::CreateCollectionCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(CreateCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("CreateCollectionCoordinatorDocument"), initialState)),
- _request(_doc.getCreateCollectionRequest()),
- _critSecReason(BSON("command"
- << "createCollection"
- << "ns" << nss().toString())) {}
-
-boost::optional<BSONObj> CreateCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "CreateCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
+void CreateCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
void CreateCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
@@ -435,9 +404,9 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
// Additionally we want to perform a majority write on the CSRS to ensure that
// all the subsequent reads will see all the writes performed from a previous
// execution of this coordinator.
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
// Log the start of the event only if we're not recovering.
@@ -461,7 +430,7 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
->releaseRecoverableCriticalSection(
opCtx,
nss(),
- _getCriticalSectionReason(),
+ _critSecReason,
ShardingCatalogClient::kMajorityWriteConcern);
_result = createCollectionResponseOpt;
@@ -474,10 +443,7 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
// presence of a stepdown.
RecoverableCriticalSectionService::get(opCtx)
->acquireRecoverableCriticalSectionBlockWrites(
- opCtx,
- nss(),
- _getCriticalSectionReason(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, nss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern);
if (!_firstExecution) {
auto uuid = sharding_ddl_util::getCollectionUUID(opCtx, nss());
@@ -489,12 +455,11 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
"Removing partial changes from previous run",
"namespace"_attr = nss());
- _doc = _updateSession(opCtx, _doc);
- cleanupPartialChunksFromPreviousAttempt(
- opCtx, *uuid, getCurrentSession(_doc));
+ _updateSession(opCtx);
+ cleanupPartialChunksFromPreviousAttempt(opCtx, *uuid, getCurrentSession());
- _doc = _updateSession(opCtx, _doc);
- broadcastDropCollection(opCtx, nss(), **executor, getCurrentSession(_doc));
+ _updateSession(opCtx);
+ broadcastDropCollection(opCtx, nss(), **executor, getCurrentSession());
}
}
@@ -517,28 +482,18 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
->promoteRecoverableCriticalSectionToBlockAlsoReads(
opCtx,
nss(),
- _getCriticalSectionReason(),
+ _critSecReason,
ShardingCatalogClient::kMajorityWriteConcern);
- _doc = _updateSession(opCtx, _doc);
- try {
- _createCollectionOnNonPrimaryShards(opCtx, getCurrentSession(_doc));
- } catch (const ExceptionFor<ErrorCodes::NotARetryableWriteCommand>&) {
- // Older 5.0 binaries don't support running the
- // _shardsvrCreateCollectionParticipant command as a retryable write yet. In
- // that case, retry without attaching session info.
- _createCollectionOnNonPrimaryShards(opCtx, boost::none);
- }
+ _updateSession(opCtx);
+ _createCollectionOnNonPrimaryShards(opCtx, getCurrentSession());
_commit(opCtx);
}
// End of the critical section, from now on, read and writes are permitted.
RecoverableCriticalSectionService::get(opCtx)->releaseRecoverableCriticalSection(
- opCtx,
- nss(),
- _getCriticalSectionReason(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, nss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern);
// Slow path. Create chunks (which might incur in an index scan) and commit must be
// done outside of the critical section to prevent writes from stalling in unsharded
@@ -566,10 +521,7 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
RecoverableCriticalSectionService::get(opCtx)->releaseRecoverableCriticalSection(
- opCtx,
- nss(),
- _getCriticalSectionReason(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, nss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern);
}
return status;
});
@@ -751,7 +703,7 @@ void CreateCollectionCoordinator::_createChunks(OperationContext* opCtx) {
}
void CreateCollectionCoordinator::_createCollectionOnNonPrimaryShards(
- OperationContext* opCtx, const boost::optional<OperationSessionInfo>& osi) {
+ OperationContext* opCtx, const OperationSessionInfo& osi) {
LOGV2_DEBUG(5277905,
2,
"Create collection _createCollectionOnNonPrimaryShards",
@@ -778,10 +730,9 @@ void CreateCollectionCoordinator::_createCollectionOnNonPrimaryShards(
createCollectionParticipantRequest.setIdIndex(idIndex);
createCollectionParticipantRequest.setIndexes(indexes);
- requests.emplace_back(
- chunkShardId,
- CommandHelpers::appendMajorityWriteConcern(
- createCollectionParticipantRequest.toBSON(osi ? osi->toBSON() : BSONObj())));
+ requests.emplace_back(chunkShardId,
+ CommandHelpers::appendMajorityWriteConcern(
+ createCollectionParticipantRequest.toBSON(osi.toBSON())));
initializedShards.emplace(chunkShardId);
}
@@ -817,8 +768,8 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx) {
LOGV2_DEBUG(5277906, 2, "Create collection _commit", "namespace"_attr = nss());
// Upsert Chunks.
- _doc = _updateSession(opCtx, _doc);
- insertChunks(opCtx, _initialChunks->chunks, getCurrentSession(_doc));
+ _updateSession(opCtx);
+ insertChunks(opCtx, _initialChunks->chunks, getCurrentSession());
CollectionType coll(nss(),
_initialChunks->collVersion().epoch(),
@@ -841,9 +792,9 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx) {
coll.setUnique(*_request.getUnique());
}
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
try {
- insertCollectionEntry(opCtx, nss(), coll, getCurrentSession(_doc));
+ insertCollectionEntry(opCtx, nss(), coll, getCurrentSession());
notifyChangeStreamsOnShardCollection(opCtx, nss(), *_collectionUUID, _request.toBSON());
@@ -927,57 +878,4 @@ void CreateCollectionCoordinator::_logEndCreateCollection(OperationContext* opCt
opCtx, "shardCollection.end", nss().ns(), collectionDetail.obj());
}
-// Phase change API.
-
-void CreateCollectionCoordinator::_enterPhase(Phase newPhase) {
- CoordDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(5565600,
- 2,
- "Create collection coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = CreateCollectionCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = CreateCollectionCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
-}
-
-const BSONObj CreateCollectionCoordinatorDocumentPre60Compatible::kPre60IncompatibleFields =
- BSON(CreateCollectionRequest::kCollectionUUIDFieldName
- << 1 << CreateCollectionRequest::kImplicitlyCreateIndexFieldName << 1
- << CreateCollectionRequest::kEnforceUniquenessCheckFieldName << 1);
-
-void CreateCollectionCoordinatorDocumentPre60Compatible::serialize(BSONObjBuilder* builder) const {
- BSONObjBuilder internalBuilder;
- CreateCollectionCoordinatorDocument::serialize(&internalBuilder);
- internalBuilder.asTempObj().filterFieldsUndotted(builder, kPre60IncompatibleFields, false);
-}
-
-BSONObj CreateCollectionCoordinatorDocumentPre60Compatible::toBSON() const {
- BSONObjBuilder builder;
- serialize(&builder);
- return builder.obj();
-}
-
-CreateCollectionCoordinatorPre60Compatible::CreateCollectionCoordinatorPre60Compatible(
- ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : CreateCollectionCoordinator(service, initialState),
- _critSecReason(
- BSON("command"
- << "createCollection"
- << "ns" << nss().toString() << "request"
- << _request.toBSON().filterFieldsUndotted(
- CreateCollectionCoordinatorDocumentPre60Compatible::kPre60IncompatibleFields,
- false))) {}
-
} // namespace mongo
diff --git a/src/mongo/db/s/create_collection_coordinator.h b/src/mongo/db/s/create_collection_coordinator.h
index 565972afcb1..a1f8bbea4e8 100644
--- a/src/mongo/db/s/create_collection_coordinator.h
+++ b/src/mongo/db/s/create_collection_coordinator.h
@@ -39,21 +39,26 @@
namespace mongo {
-class CreateCollectionCoordinator : public ShardingDDLCoordinator {
+class CreateCollectionCoordinator
+ : public RecoverableShardingDDLCoordinator<CreateCollectionCoordinatorDocument,
+ CreateCollectionCoordinatorPhaseEnum> {
public:
using CoordDoc = CreateCollectionCoordinatorDocument;
using Phase = CreateCollectionCoordinatorPhaseEnum;
- CreateCollectionCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState);
+ CreateCollectionCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
+ : RecoverableShardingDDLCoordinator(service, "CreateCollectionCoordinator", initialState),
+ _request(_doc.getCreateCollectionRequest()),
+ _critSecReason(BSON("command"
+ << "createCollection"
+ << "ns" << nss().toString())) {}
+
~CreateCollectionCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the termination of the parent DDLCoordinator (so all the resources are liberated)
@@ -66,38 +71,16 @@ public:
}
protected:
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("CreateCollectionCoordinator::_docMutex");
- CoordDoc _doc;
-
const mongo::CreateCollectionRequest _request;
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return CreateCollectionCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- };
-
- void _enterPhase(Phase newState);
-
/**
* Performs all required checks before holding the critical sections.
*/
@@ -128,7 +111,7 @@ private:
* participant shards.
*/
void _createCollectionOnNonPrimaryShards(OperationContext* opCtx,
- const boost::optional<OperationSessionInfo>& osi);
+ const OperationSessionInfo& osi);
/**
* Does the following writes:
@@ -147,16 +130,6 @@ private:
*/
void _logEndCreateCollection(OperationContext* opCtx);
- /**
- * Returns the BSONObj used as critical section reason
- *
- * TODO SERVER-64720 remove this function, directly access _critSecReason
- *
- */
- virtual const BSONObj& _getCriticalSectionReason() const {
- return _critSecReason;
- };
-
const BSONObj _critSecReason;
// The shard key of the collection, static for the duration of the coordinator and reflects the
@@ -177,32 +150,4 @@ private:
boost::optional<bool> _collectionEmpty;
};
-class CreateCollectionCoordinatorDocumentPre60Compatible final
- : public CreateCollectionCoordinatorDocument {
- // TODO SERVER-64720 remove once 6.0 becomes last LTS
-public:
- using CreateCollectionCoordinatorDocument::CreateCollectionCoordinatorDocument;
-
- static const BSONObj kPre60IncompatibleFields;
- void serialize(BSONObjBuilder* builder) const;
- BSONObj toBSON() const;
-};
-
-class CreateCollectionCoordinatorPre60Compatible final : public CreateCollectionCoordinator {
- // TODO SERVER-64720 remove once 6.0 becomes last LTS
-public:
- using CreateCollectionCoordinator::CreateCollectionCoordinator;
- using CoordDoc = CreateCollectionCoordinatorDocumentPre60Compatible;
-
- CreateCollectionCoordinatorPre60Compatible(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState);
-
- virtual const BSONObj& _getCriticalSectionReason() const override {
- return _critSecReason;
- };
-
-private:
- const BSONObj _critSecReason;
-};
-
} // namespace mongo
diff --git a/src/mongo/db/s/create_collection_coordinator_test.cpp b/src/mongo/db/s/create_collection_coordinator_test.cpp
deleted file mode 100644
index 772ac8933a1..00000000000
--- a/src/mongo/db/s/create_collection_coordinator_test.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Copyright (C) 2022-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/unittest/unittest.h"
-
-#include "mongo/db/s/create_collection_coordinator.h"
-
-namespace mongo {
-namespace {
-
-static const auto kShardKey = BSON("x" << 1);
-static const NamespaceString kNs{"db.test"};
-
-TEST(CreateCollectionCoordinator, pre60CompatibleGetters) {
- const auto kUUID = UUID::gen();
-
- auto req = [&] {
- CreateCollectionRequest creq;
- creq.setShardKey(kShardKey.getOwned());
- creq.setCollectionUUID(kUUID);
- creq.setImplicitlyCreateIndex(false);
- creq.setEnforceUniquenessCheck(false);
- return creq;
- };
-
- auto pre60CompatDoc = [&] {
- auto doc = CreateCollectionCoordinatorDocumentPre60Compatible();
- doc.setShardingDDLCoordinatorMetadata(
- {{kNs, DDLCoordinatorTypeEnum::kCreateCollectionPre60Compatible}});
- doc.setCreateCollectionRequest(req());
- return doc;
- }();
-
- auto latestDoc = [&] {
- auto doc = CreateCollectionCoordinatorDocument();
- doc.setShardingDDLCoordinatorMetadata({{kNs, DDLCoordinatorTypeEnum::kCreateCollection}});
- doc.setCreateCollectionRequest(req());
- return doc;
- }();
-
- ASSERT(pre60CompatDoc.getShardKey());
- ASSERT(latestDoc.getShardKey());
- ASSERT_BSONOBJ_EQ(*pre60CompatDoc.getShardKey(), *latestDoc.getShardKey());
- ASSERT(pre60CompatDoc.getCollectionUUID());
- ASSERT(latestDoc.getCollectionUUID());
- ASSERT_EQ(*pre60CompatDoc.getCollectionUUID(), *latestDoc.getCollectionUUID());
- ASSERT_EQ(pre60CompatDoc.getImplicitlyCreateIndex(), latestDoc.getImplicitlyCreateIndex());
- ASSERT_EQ(pre60CompatDoc.getEnforceUniquenessCheck(), latestDoc.getEnforceUniquenessCheck());
-}
-
-TEST(CreateCollectionCoordinator, pre60CompatibleSerialization) {
- auto req = [&] {
- CreateCollectionRequest creq;
- creq.setShardKey(kShardKey.getOwned());
- creq.setCollectionUUID(UUID::gen());
- creq.setImplicitlyCreateIndex(false);
- creq.setEnforceUniquenessCheck(false);
- return creq;
- };
-
- auto pre60CompatDoc = [&] {
- auto doc = CreateCollectionCoordinatorDocumentPre60Compatible();
- doc.setShardingDDLCoordinatorMetadata(
- {{kNs, DDLCoordinatorTypeEnum::kCreateCollectionPre60Compatible}});
- doc.setCreateCollectionRequest(req());
- return doc;
- }();
-
- BSONObjBuilder builder;
- pre60CompatDoc.serialize(&builder);
- auto serialized = builder.asTempObj();
-
- ASSERT_BSONOBJ_EQ(
- BSONObj{},
- serialized.extractFieldsUndotted(
- CreateCollectionCoordinatorDocumentPre60Compatible::kPre60IncompatibleFields));
-}
-
-TEST(CreateCollectionCoordinator, pre60CompatibleToBSON) {
-
- auto req = [&] {
- CreateCollectionRequest creq;
- creq.setShardKey(kShardKey.getOwned());
- creq.setCollectionUUID(UUID::gen());
- creq.setImplicitlyCreateIndex(false);
- creq.setEnforceUniquenessCheck(false);
- return creq;
- };
-
- auto pre60CompatDoc = [&] {
- auto doc = CreateCollectionCoordinatorDocumentPre60Compatible();
- doc.setShardingDDLCoordinatorMetadata(
- {{kNs, DDLCoordinatorTypeEnum::kCreateCollectionPre60Compatible}});
- doc.setCreateCollectionRequest(req());
- return doc;
- }();
-
- auto serialized = pre60CompatDoc.toBSON();
-
- ASSERT_BSONOBJ_EQ(
- BSONObj{},
- serialized.extractFieldsUndotted(
- CreateCollectionCoordinatorDocumentPre60Compatible::kPre60IncompatibleFields));
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index ad18b8b9526..776b23857d0 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -185,9 +185,12 @@ void DatabaseShardingState::checkDbVersion(OperationContext* opCtx, DSSLock&) co
auto criticalSectionSignal = _critSec.getSignal(
opCtx->lockState()->isWriteLocked() ? ShardingMigrationCriticalSection::kWrite
: ShardingMigrationCriticalSection::kRead);
+ const std::string reason =
+ _critSec.getReason() ? _critSec.getReason()->toString() : "unknown";
uassert(
StaleDbRoutingVersion(_dbName, *clientDbVersion, boost::none, criticalSectionSignal),
- str::stream() << "movePrimary commit in progress for " << _dbName,
+ str::stream() << "The critical section for " << _dbName
+ << " is acquired with reason: " << reason,
!criticalSectionSignal);
}
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 7dabcae0f16..fa1e2f4b84e 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -47,37 +47,6 @@
namespace mongo {
-DropCollectionCoordinator::DropCollectionCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(DropCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("DropCollectionCoordinatorDocument"), initialState)) {}
-
-boost::optional<BSONObj> DropCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "DropCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx,
const NamespaceString& nss) {
{
@@ -101,29 +70,6 @@ DropReply DropCollectionCoordinator::dropCollectionLocally(OperationContext* opC
return result;
}
-void DropCollectionCoordinator::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(5390501,
- 2,
- "Drop collection coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = DropCollectionCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = DropCollectionCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
-}
-
ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept {
@@ -161,7 +107,7 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
// Persist the collection info before sticking to using it's uuid. This ensures this
// node is still the RS primary, so it was also the primary at the moment we read
// the collection metadata.
- _doc = _updateStateDocument(opCtx, StateDoc(_doc));
+ _updateStateDocument(opCtx, StateDoc(_doc));
if (_doc.getCollInfo()) {
sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollInfo()->getUuid());
@@ -178,9 +124,9 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
// Perform a noop write on the participants in order to advance the txnNumber
// for this coordinator's lsid so that requests with older txnNumbers can no
// longer execute.
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
const auto collIsSharded = bool(_doc.getCollInfo());
@@ -199,12 +145,11 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
}
// Remove tags even if the collection is not sharded or didn't exist
- _doc = _updateSession(opCtx, _doc);
- sharding_ddl_util::removeTagsMetadataFromConfig(
- opCtx, nss(), getCurrentSession(_doc));
+ _updateSession(opCtx);
+ sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss(), getCurrentSession());
// get a Lsid and an incremented txnNumber. Ensures we are the primary
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
const auto primaryShardId = ShardingState::get(opCtx)->shardId();
@@ -217,13 +162,13 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
participants.end());
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss(), participants, **executor, getCurrentSession(_doc));
+ opCtx, nss(), participants, **executor, getCurrentSession());
// The sharded collection must be dropped on the primary shard after it has been
// dropped on all of the other shards to ensure it can only be re-created as
// unsharded with a higher optime than all of the drops.
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss(), {primaryShardId}, **executor, getCurrentSession(_doc));
+ opCtx, nss(), {primaryShardId}, **executor, getCurrentSession());
ShardingLogging::get(opCtx)->logChange(opCtx, "dropCollection", nss().ns());
LOGV2(5390503, "Collection dropped", "namespace"_attr = nss());
diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h
index 140013e41e1..46b37d2a415 100644
--- a/src/mongo/db/s/drop_collection_coordinator.h
+++ b/src/mongo/db/s/drop_collection_coordinator.h
@@ -35,20 +35,20 @@
#include "mongo/db/s/sharding_ddl_coordinator.h"
namespace mongo {
-class DropCollectionCoordinator final : public ShardingDDLCoordinator {
+class DropCollectionCoordinator final
+ : public RecoverableShardingDDLCoordinator<DropCollectionCoordinatorDocument,
+ DropCollectionCoordinatorPhaseEnum> {
public:
using StateDoc = DropCollectionCoordinatorDocument;
using Phase = DropCollectionCoordinatorPhaseEnum;
- DropCollectionCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState);
+ DropCollectionCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
+ : RecoverableShardingDDLCoordinator(service, "DropCollectionCoordinator", initialState) {}
+
~DropCollectionCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& doc) const override {}
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
-
/**
* Locally drops a collection, cleans its CollectionShardingRuntime metadata and refreshes the
* catalog cache.
@@ -56,34 +56,12 @@ public:
static DropReply dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss);
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return DropCollectionCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
-
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("DropCollectionCoordinator::_docMutex");
- DropCollectionCoordinatorDocument _doc;
};
} // namespace mongo
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index b49e36d302c..54b8ef1108e 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -122,11 +122,11 @@ void DropDatabaseCoordinator::_dropShardedCollection(
sharding_ddl_util::removeCollAndChunksMetadataFromConfig(
opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern);
- _doc = _updateSession(opCtx, _doc);
- sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getCurrentSession(_doc));
+ _updateSession(opCtx);
+ sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getCurrentSession());
const auto primaryShardId = ShardingState::get(opCtx)->shardId();
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
// We need to send the drop to all the shards because both movePrimary and
// moveChunk leave garbage behind for sharded collections.
@@ -135,67 +135,13 @@ void DropDatabaseCoordinator::_dropShardedCollection(
participants.erase(std::remove(participants.begin(), participants.end(), primaryShardId),
participants.end());
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss, participants, **executor, getCurrentSession(_doc));
+ opCtx, nss, participants, **executor, getCurrentSession());
// The sharded collection must be dropped on the primary shard after it has been dropped on all
// of the other shards to ensure it can only be re-created as unsharded with a higher optime
// than all of the drops.
sharding_ddl_util::sendDropCollectionParticipantCommandToShards(
- opCtx, nss, {primaryShardId}, **executor, getCurrentSession(_doc));
-}
-
-DropDatabaseCoordinator::DropDatabaseCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(DropDatabaseCoordinatorDocument::parse(
- IDLParserErrorContext("DropDatabaseCoordinatorDocument"), initialState)),
- _dbName(nss().db()) {}
-
-boost::optional<BSONObj> DropDatabaseCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "DropDatabaseCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
-void DropDatabaseCoordinator::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(5494501,
- 2,
- "Drop database coordinator phase transition",
- "db"_attr = _dbName,
- "newPhase"_attr = DropDatabaseCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = DropDatabaseCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
+ opCtx, nss, {primaryShardId}, **executor, getCurrentSession());
}
void DropDatabaseCoordinator::_clearDatabaseInfoOnPrimary(OperationContext* opCtx) {
@@ -238,9 +184,9 @@ ExecutorFuture<void> DropDatabaseCoordinator::_runImpl(
// Perform a noop write on the participants in order to advance the txnNumber
// for this coordinator's lsid so that requests with older txnNumbers can no
// longer execute.
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
ShardingLogging::get(opCtx)->logChange(opCtx, "dropDatabase.start", _dbName);
@@ -284,7 +230,7 @@ ExecutorFuture<void> DropDatabaseCoordinator::_runImpl(
auto newStateDoc = _doc;
newStateDoc.setCollInfo(coll);
- _doc = _updateStateDocument(opCtx, std::move(newStateDoc));
+ _updateStateDocument(opCtx, std::move(newStateDoc));
_dropShardedCollection(opCtx, coll, executor);
}
diff --git a/src/mongo/db/s/drop_database_coordinator.h b/src/mongo/db/s/drop_database_coordinator.h
index 47d63310a19..f70ea2981cb 100644
--- a/src/mongo/db/s/drop_database_coordinator.h
+++ b/src/mongo/db/s/drop_database_coordinator.h
@@ -34,48 +34,29 @@
namespace mongo {
-class DropDatabaseCoordinator final : public ShardingDDLCoordinator {
+class DropDatabaseCoordinator final
+ : public RecoverableShardingDDLCoordinator<DropDatabaseCoordinatorDocument,
+ DropDatabaseCoordinatorPhaseEnum> {
+
public:
using StateDoc = DropDatabaseCoordinatorDocument;
using Phase = DropDatabaseCoordinatorPhaseEnum;
- DropDatabaseCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState);
+ DropDatabaseCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
+ : RecoverableShardingDDLCoordinator(service, "DropDatabaseCoordinator", initialState),
+ _dbName(nss().db()) {}
~DropDatabaseCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& doc) const override {}
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
-
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- stdx::lock_guard l{_docMutex};
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return DropDatabaseCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
void _dropShardedCollection(OperationContext* opCtx,
const CollectionType& coll,
std::shared_ptr<executor::ScopedTaskExecutor> executor);
@@ -84,10 +65,6 @@ private:
void _clearDatabaseInfoOnSecondaries(OperationContext* opCtx);
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("DropDatabaseCoordinator::_docMutex");
- DropDatabaseCoordinatorDocument _doc;
-
-
StringData _dbName;
};
diff --git a/src/mongo/db/s/flush_resharding_state_change_command.cpp b/src/mongo/db/s/flush_resharding_state_change_command.cpp
index 85f0c42cff0..95439564643 100644
--- a/src/mongo/db/s/flush_resharding_state_change_command.cpp
+++ b/src/mongo/db/s/flush_resharding_state_change_command.cpp
@@ -131,7 +131,7 @@ public:
.getAsync([](auto) {});
// Ensure the command isn't run on a stale primary.
- doNoopWrite(opCtx, "_flushReshardingStateChange no-op", ns());
+ resharding::doNoopWrite(opCtx, "_flushReshardingStateChange no-op", ns());
}
};
} _flushReshardingStateChange;
diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
index 9ea3f94af97..f85f73c0ef4 100644
--- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp
@@ -117,7 +117,8 @@ public:
boost::optional<SharedSemiFuture<void>> criticalSectionSignal;
{
- AutoGetCollection autoColl(opCtx, ns(), MODE_IS);
+ AutoGetCollection autoColl(
+ opCtx, ns(), MODE_IS, AutoGetCollectionViewMode::kViewsPermitted);
// If the primary is in the critical section, secondaries must wait for the commit
// to finish on the primary in case a secondary's caller has an afterClusterTime
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2f39ef09147..a842e4cfe03 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include "mongo/bson/bsonobjbuilder.h"
@@ -93,7 +91,7 @@ protected:
boost::none,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{uuid, range, ChunkVersion(1, 0, epoch, Timestamp(1, 1)), kOtherShard}});
+ {ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 462baee5069..1a76fd9a5a5 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -300,12 +300,12 @@ Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx,
opCtx->recoveryUnit()->setPrepareConflictBehavior(
PrepareConflictBehavior::kIgnoreConflicts);
- auto storeCurrentLocsStatus = _storeCurrentLocs(opCtx);
- if (storeCurrentLocsStatus == ErrorCodes::ChunkTooBig && _forceJumbo) {
+ auto storeCurrentRecordIdStatus = _storeCurrentRecordId(opCtx);
+ if (storeCurrentRecordIdStatus == ErrorCodes::ChunkTooBig && _forceJumbo) {
stdx::lock_guard<Latch> sl(_mutex);
_jumboChunkCloneState.emplace();
- } else if (!storeCurrentLocsStatus.isOK()) {
- return storeCurrentLocsStatus;
+ } else if (!storeCurrentRecordIdStatus.isOK()) {
+ return storeCurrentRecordIdStatus;
}
}
@@ -381,7 +381,7 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::commitClone(OperationConte
}
} else {
invariant(PlanExecutor::IS_EOF == _jumboChunkCloneState->clonerState);
- invariant(_cloneLocs.empty());
+ invariant(_cloneRecordIds.empty());
}
}
@@ -680,17 +680,16 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
_jumboChunkCloneState->clonerExec->detachFromOperationContext();
}
-void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneLocs(OperationContext* opCtx,
- const CollectionPtr& collection,
- BSONArrayBuilder* arrBuilder) {
+void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneRecordIds(
+ OperationContext* opCtx, const CollectionPtr& collection, BSONArrayBuilder* arrBuilder) {
ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(),
internalQueryExecYieldIterations.load(),
Milliseconds(internalQueryExecYieldPeriodMS.load()));
stdx::unique_lock<Latch> lk(_mutex);
- auto iter = _cloneLocs.begin();
+ auto iter = _cloneRecordIds.begin();
- for (; iter != _cloneLocs.end(); ++iter) {
+ for (; iter != _cloneRecordIds.end(); ++iter) {
// We must always make progress in this method by at least one document because empty
// return indicates there is no more initial clone data.
if (arrBuilder->arrSize() && tracker.intervalHasElapsed()) {
@@ -718,7 +717,7 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneLocs(OperationCon
lk.lock();
}
- _cloneLocs.erase(_cloneLocs.begin(), iter);
+ _cloneRecordIds.erase(_cloneRecordIds.begin(), iter);
}
uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
@@ -727,7 +726,7 @@ uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() {
return static_cast<uint64_t>(BSONObjMaxUserSize);
return std::min(static_cast<uint64_t>(BSONObjMaxUserSize),
- _averageObjectSizeForCloneLocs * _cloneLocs.size());
+ _averageObjectSizeForCloneRecordIds * _cloneRecordIds.size());
}
Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
@@ -735,8 +734,8 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
BSONArrayBuilder* arrBuilder) {
dassert(opCtx->lockState()->isCollectionLockedForMode(nss(), MODE_IS));
- // If this chunk is too large to store records in _cloneLocs and the command args specify to
- // attempt to move it, scan the collection directly.
+ // If this chunk is too large to store records in _cloneRecordIds and the command args specify
+ // to attempt to move it, scan the collection directly.
if (_jumboChunkCloneState && _forceJumbo) {
try {
_nextCloneBatchFromIndexScan(opCtx, collection, arrBuilder);
@@ -746,12 +745,11 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
}
}
- _nextCloneBatchFromCloneLocs(opCtx, collection, arrBuilder);
+ _nextCloneBatchFromCloneRecordIds(opCtx, collection, arrBuilder);
return Status::OK();
}
Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
- Database* db,
BSONObjBuilder* builder) {
dassert(opCtx->lockState()->isCollectionLockedForMode(nss(), MODE_IS));
@@ -761,7 +759,7 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
{
// All clone data must have been drained before starting to fetch the incremental changes.
stdx::unique_lock<Latch> lk(_mutex);
- invariant(_cloneLocs.empty());
+ invariant(_cloneRecordIds.empty());
// The "snapshot" for delete and update list must be taken under a single lock. This is to
// ensure that we will preserve the causal order of writes. Always consume the delete
@@ -784,8 +782,8 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
if (deleteList.empty()) {
BSONArrayBuilder arrUpd(builder->subarrayStart("reload"));
- auto findByIdWrapper = [opCtx, db, ns](BSONObj idDoc, BSONObj* fullDoc) {
- return Helpers::findById(opCtx, db, ns, idDoc, *fullDoc);
+ auto findByIdWrapper = [opCtx, ns](BSONObj idDoc, BSONObj* fullDoc) {
+ return Helpers::findById(opCtx, ns, idDoc, *fullDoc);
};
totalDocSize = xferMods(&arrUpd, &updateList, totalDocSize, findByIdWrapper);
arrUpd.done();
@@ -874,7 +872,7 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(
if (!shardKeyIdx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for " << nss().ns()};
+ << " in storeCurrentRecordId for " << nss().ns()};
}
// Assume both min and max non-empty, append MinKey's to make them fit chosen index
@@ -896,7 +894,7 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(
scanOption);
}
-Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) {
+Status MigrationChunkClonerSourceLegacy::_storeCurrentRecordId(OperationContext* opCtx) {
AutoGetCollection collection(opCtx, nss(), MODE_IS);
if (!collection) {
return {ErrorCodes::NamespaceNotFound,
@@ -948,14 +946,14 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
if (!isLargeChunk) {
stdx::lock_guard<Latch> lk(_mutex);
- _cloneLocs.insert(recordId);
+ _cloneRecordIds.insert(recordId);
}
if (++recCount > maxRecsWhenFull) {
isLargeChunk = true;
if (_forceJumbo) {
- _cloneLocs.clear();
+ _cloneRecordIds.clear();
break;
}
}
@@ -975,7 +973,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
const auto idIdx = collection->getIndexCatalog()->findIdIndex(opCtx)->getEntry();
if (!idIdx) {
return {ErrorCodes::IndexNotFound,
- str::stream() << "can't find index '_id' in storeCurrentLocs for "
+ str::stream() << "can't find index '_id' in storeCurrentRecordId for "
<< nss().ns()};
}
averageObjectIdSize = idIdx->accessMethod()->getSpaceUsedBytes(opCtx) / totalRecs;
@@ -992,7 +990,7 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
}
stdx::lock_guard<Latch> lk(_mutex);
- _averageObjectSizeForCloneLocs = collectionAverageObjectSize + defaultObjectIdSize;
+ _averageObjectSizeForCloneRecordIds = collectionAverageObjectSize + defaultObjectIdSize;
_averageObjectIdSize = std::max(averageObjectIdSize, defaultObjectIdSize);
return Status::OK();
}
@@ -1057,9 +1055,9 @@ Status MigrationChunkClonerSourceLegacy::_checkRecipientCloningStatus(OperationC
stdx::lock_guard<Latch> sl(_mutex);
- const std::size_t cloneLocsRemaining = _cloneLocs.size();
+ const std::size_t cloneRecordIdsRemaining = _cloneRecordIds.size();
int64_t untransferredModsSizeBytes = _untransferredDeletesCounter * _averageObjectIdSize +
- _untransferredUpsertsCounter * _averageObjectSizeForCloneLocs;
+ _untransferredUpsertsCounter * _averageObjectSizeForCloneRecordIds;
if (_forceJumbo && _jumboChunkCloneState) {
LOGV2(21992,
@@ -1079,13 +1077,13 @@ Status MigrationChunkClonerSourceLegacy::_checkRecipientCloningStatus(OperationC
"moveChunk data transfer progress",
"response"_attr = redact(res),
"memoryUsedBytes"_attr = _memoryUsed,
- "docsRemainingToClone"_attr = cloneLocsRemaining,
+ "docsRemainingToClone"_attr = cloneRecordIdsRemaining,
"untransferredModsSizeBytes"_attr = untransferredModsSizeBytes);
}
if (res["state"].String() == "steady" && sessionCatalogSourceInCatchupPhase &&
estimateUntransferredSessionsSize == 0) {
- if (cloneLocsRemaining != 0 ||
+ if (cloneRecordIdsRemaining != 0 ||
(_jumboChunkCloneState && _forceJumbo &&
PlanExecutor::IS_EOF != _jumboChunkCloneState->clonerState)) {
return {ErrorCodes::OperationIncomplete,
@@ -1124,7 +1122,8 @@ Status MigrationChunkClonerSourceLegacy::_checkRecipientCloningStatus(OperationC
"moveChunk data transfer within threshold to allow write blocking",
"_untransferredUpsertsCounter"_attr = _untransferredUpsertsCounter,
"_untransferredDeletesCounter"_attr = _untransferredDeletesCounter,
- "_averageObjectSizeForCloneLocs"_attr = _averageObjectSizeForCloneLocs,
+ "_averageObjectSizeForCloneRecordIds"_attr =
+ _averageObjectSizeForCloneRecordIds,
"_averageObjectIdSize"_attr = _averageObjectIdSize,
"untransferredModsSizeBytes"_attr = untransferredModsSizeBytes,
"untransferredSessionDataInBytes"_attr = estimateUntransferredSessionsSize,
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 8c15fa7a0cb..1912c947ad9 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -174,7 +174,7 @@ public:
*
* NOTE: Must be called with the collection lock held in at least IS mode.
*/
- Status nextModsBatch(OperationContext* opCtx, Database* db, BSONObjBuilder* builder);
+ Status nextModsBatch(OperationContext* opCtx, BSONObjBuilder* builder);
/**
* Appends to 'arrBuilder' oplog entries which wrote to the currently migrated chunk and contain
@@ -248,17 +248,17 @@ private:
const CollectionPtr& collection,
BSONArrayBuilder* arrBuilder);
- void _nextCloneBatchFromCloneLocs(OperationContext* opCtx,
- const CollectionPtr& collection,
- BSONArrayBuilder* arrBuilder);
+ void _nextCloneBatchFromCloneRecordIds(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ BSONArrayBuilder* arrBuilder);
/**
- * Get the disklocs that belong to the chunk migrated and sort them in _cloneLocs (to avoid
- * seeking disk later).
+ * Get the recordIds that belong to the chunk migrated and sort them in _cloneRecordIds (to
+ * avoid seeking disk later).
*
* Returns OK or any error status otherwise.
*/
- Status _storeCurrentLocs(OperationContext* opCtx);
+ Status _storeCurrentRecordId(OperationContext* opCtx);
/**
* Adds the OpTime to the list of OpTimes for oplog entries that we should consider migrating as
@@ -349,11 +349,11 @@ private:
State _state{kNew};
// List of record ids that needs to be transferred (initial clone)
- std::set<RecordId> _cloneLocs;
+ std::set<RecordId> _cloneRecordIds;
// The estimated average object size during the clone phase. Used for buffer size
// pre-allocation (initial clone).
- uint64_t _averageObjectSizeForCloneLocs{0};
+ uint64_t _averageObjectSizeForCloneRecordIds{0};
// The estimated average object _id size during the clone phase.
uint64_t _averageObjectIdSize{0};
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index d4c7593370c..8be0acd90df 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -103,11 +103,6 @@ public:
_autoColl = boost::none;
}
- Database* getDb() const {
- invariant(_autoColl);
- return _autoColl->getDb();
- }
-
const CollectionPtr& getColl() const {
invariant(_autoColl);
return _autoColl->getCollection();
@@ -235,7 +230,7 @@ public:
AutoGetActiveCloner autoCloner(opCtx, migrationSessionId, true);
- uassertStatusOK(autoCloner.getCloner()->nextModsBatch(opCtx, autoCloner.getDb(), &result));
+ uassertStatusOK(autoCloner.getCloner()->nextModsBatch(opCtx, &result));
return true;
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 91e1b4a21bc..dc1eb4579e5 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -179,7 +179,7 @@ protected:
true,
{ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
ShardId("dummyShardId")}});
AutoGetDb autoDb(operationContext(), kNss.db(), MODE_IX);
@@ -355,7 +355,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, CorrectDocumentsFetched) {
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(2U, modsObj["reload"].Array().size());
@@ -455,7 +455,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, RemoveDuplicateDocuments) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(1U, modsObj["reload"].Array().size());
@@ -522,7 +522,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, OneLargeDocumentTransferMods) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(1, modsObj["reload"].Array().size());
@@ -600,7 +600,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, ManySmallDocumentsTransferMods) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(modsObj["reload"].Array().size(), numDocuments);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 09a39686779..fa303126008 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -201,11 +201,10 @@ bool willOverrideLocalId(OperationContext* opCtx,
BSONObj min,
BSONObj max,
BSONObj shardKeyPattern,
- Database* db,
BSONObj remoteDoc,
BSONObj* localDoc) {
*localDoc = BSONObj();
- if (Helpers::findById(opCtx, db, nss.ns(), remoteDoc, *localDoc)) {
+ if (Helpers::findById(opCtx, nss.ns(), remoteDoc, *localDoc)) {
return !isInRange(*localDoc, min, max, shardKeyPattern);
}
@@ -819,7 +818,7 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC
Milliseconds(-1)));
for (auto&& spec : indexes.docs) {
- if (spec["clustered"]) {
+ if (spec[IndexDescriptor::kClusteredFieldName]) {
// The 'clustered' index is implicitly created upon clustered collection creation.
} else {
donorIndexSpecs.push_back(spec);
@@ -1774,7 +1773,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
// Do not apply delete if doc does not belong to the chunk being migrated
BSONObj fullObj;
- if (Helpers::findById(opCtx, autoColl.getDb(), _nss.ns(), id, fullObj)) {
+ if (Helpers::findById(opCtx, _nss.ns(), id, fullObj)) {
if (!isInRange(fullObj, _min, _max, _shardKeyPattern)) {
if (MONGO_unlikely(failMigrationReceivedOutOfRangeOperation.shouldFail())) {
MONGO_UNREACHABLE;
@@ -1823,14 +1822,8 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
}
BSONObj localDoc;
- if (willOverrideLocalId(opCtx,
- _nss,
- _min,
- _max,
- _shardKeyPattern,
- autoColl.getDb(),
- updatedDoc,
- &localDoc)) {
+ if (willOverrideLocalId(
+ opCtx, _nss, _min, _max, _shardKeyPattern, updatedDoc, &localDoc)) {
// Exception will abort migration cleanly
LOGV2_ERROR_OPTIONS(
16977,
diff --git a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
index 4b0d94e98bc..7ee89b9f26c 100644
--- a/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_destination_manager_legacy_commands.cpp
@@ -91,6 +91,14 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
+ bool shouldCheckoutSession() const final {
+ return false;
+ }
+
bool errmsgRun(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 1a6909e629f..a0fc3e650ee 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_coordinator.h"
#include "mongo/db/s/migration_util.h"
@@ -59,8 +60,6 @@
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/grid.h"
#include "mongo/s/pm2423_feature_flags_gen.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
-#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/duration.h"
#include "mongo/util/elapsed_tracker.h"
@@ -93,12 +92,10 @@ void refreshRecipientRoutingTable(OperationContext* opCtx,
const NamespaceString& nss,
const HostAndPort& toShardHost,
const ChunkVersion& newCollVersion) {
- SetShardVersionRequest ssv(nss, newCollVersion, false);
-
const executor::RemoteCommandRequest request(
toShardHost,
NamespaceString::kAdminDb.toString(),
- ssv.toBSON(),
+ BSON("_flushRoutingTableCacheUpdates" << nss.ns()),
ReadPreferenceSetting{ReadPreference::PrimaryOnly}.toContainingBSON(),
opCtx,
executor::RemoteCommandRequest::kNoTimeout);
@@ -560,20 +557,18 @@ void MigrationSourceManager::commitChunkMetadataOnConfig() {
{
const auto metadata = _getCurrentMetadataAndCheckEpoch();
- ChunkType migratedChunkType;
- migratedChunkType.setMin(*_args.getMin());
- migratedChunkType.setMax(*_args.getMax());
- migratedChunkType.setVersion(*_chunkVersion);
+ auto migratedChunk = MigratedChunkType(*_chunkVersion, *_args.getMin(), *_args.getMax());
const auto currentTime = VectorClock::get(_opCtx)->getTime();
- CommitChunkMigrationRequest::appendAsCommand(&builder,
- nss(),
- _args.getFromShard(),
- _args.getToShard(),
- migratedChunkType,
- metadata.getCollVersion(),
- currentTime.clusterTime().asTimestamp());
+ CommitChunkMigrationRequest request(nss(),
+ _args.getFromShard(),
+ _args.getToShard(),
+ migratedChunk,
+ metadata.getCollVersion(),
+ currentTime.clusterTime().asTimestamp());
+
+ request.serialize({}, &builder);
builder.append(kWriteConcernField, kMajorityWriteConcern.toBSON());
}
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index bd8600c1518..b877424f86a 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -37,7 +37,6 @@
#include "mongo/base/error_codes.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/client/query.h"
#include "mongo/db/catalog/collection_catalog_helper.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/commands.h"
@@ -520,14 +519,27 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
FindCommandRequest findCommand(NamespaceString::kRangeDeletionNamespace);
findCommand.setFilter(BSON(RangeDeletionTask::kProcessingFieldName << true));
auto cursor = client.find(std::move(findCommand));
- if (cursor->more()) {
- return migrationutil::submitRangeDeletionTask(
+
+ auto retFuture = ExecutorFuture<void>(getMigrationUtilExecutor(serviceContext));
+
+ int rangeDeletionsMarkedAsProcessing = 0;
+ while (cursor->more()) {
+ retFuture = migrationutil::submitRangeDeletionTask(
opCtx.get(),
RangeDeletionTask::parse(IDLParserErrorContext("rangeDeletionRecovery"),
cursor->next()));
- } else {
- return ExecutorFuture<void>(getMigrationUtilExecutor(serviceContext));
+ rangeDeletionsMarkedAsProcessing++;
}
+
+ if (rangeDeletionsMarkedAsProcessing > 1) {
+ LOGV2_WARNING(
+ 6695800,
+ "Rescheduling several range deletions marked as processing. Orphans count "
+ "may be off while they are not drained",
+ "numRangeDeletionsMarkedAsProcessing"_attr = rangeDeletionsMarkedAsProcessing);
+ }
+
+ return retFuture;
})
.then([serviceContext] {
ThreadClient tc("ResubmitRangeDeletions", serviceContext);
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 8e6f02043da..90a1e9016a1 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/catalog_raii.h"
@@ -591,7 +589,7 @@ TEST_F(SubmitRangeDeletionTaskTest, SucceedsIfFilteringMetadataUUIDMatchesTaskUU
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch, kDefaultTimestamp)));
+ makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {1, 0})));
_mockCatalogClient->setCollections({coll});
forceShardFilteringMetadataRefresh(opCtx, kTestNss);
@@ -619,7 +617,7 @@ TEST_F(
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, kEpoch, kDefaultTimestamp)));
+ makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {1, 0})));
_mockCatalogClient->setCollections({coll});
auto metadata = makeShardedMetadata(opCtx, collectionUUID);
@@ -654,7 +652,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
auto matchingColl = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(10, 0, kEpoch, kDefaultTimestamp)));
+ makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {10, 0})));
_mockCatalogClient->setCollections({matchingColl});
auto metadata = makeShardedMetadata(opCtx, collectionUUID);
@@ -684,7 +682,7 @@ TEST_F(SubmitRangeDeletionTaskTest,
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType);
_mockCatalogCacheLoader->setCollectionRefreshReturnValue(otherColl);
_mockCatalogCacheLoader->setChunkRefreshReturnValue(
- makeChangedChunks(ChunkVersion(1, 0, otherEpoch, otherTimestamp)));
+ makeChangedChunks(ChunkVersion({otherEpoch, otherTimestamp}, {1, 0})));
_mockCatalogClient->setCollections({otherColl});
// The task should not have been submitted, and the task's entry should have been removed from
diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp
index c7fb0d9e44d..863a4c17b9d 100644
--- a/src/mongo/db/s/move_primary_coordinator.cpp
+++ b/src/mongo/db/s/move_primary_coordinator.cpp
@@ -46,30 +46,10 @@
namespace mongo {
-MovePrimaryCoordinator::MovePrimaryCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(MovePrimaryCoordinatorDocument::parse(
- IDLParserErrorContext("MovePrimaryCoordinatorDocument"), initialState)) {}
-
-boost::optional<BSONObj> MovePrimaryCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.append("request", BSON(_doc.kToShardIdFieldName << _doc.getToShardId()));
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "MovePrimaryCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
-}
+void MovePrimaryCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ stdx::lock_guard lk{_docMutex};
+ cmdInfoBuilder->append("request", BSON(_doc.kToShardIdFieldName << _doc.getToShardId()));
+};
void MovePrimaryCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two shard collections on the same namespace, then the arguments must be the same.
diff --git a/src/mongo/db/s/move_primary_coordinator.h b/src/mongo/db/s/move_primary_coordinator.h
index 2e501419255..80a1586e0a4 100644
--- a/src/mongo/db/s/move_primary_coordinator.h
+++ b/src/mongo/db/s/move_primary_coordinator.h
@@ -35,30 +35,25 @@
namespace mongo {
-class MovePrimaryCoordinator final : public ShardingDDLCoordinator {
+class MovePrimaryCoordinator final
+ : public ShardingDDLCoordinatorImpl<MovePrimaryCoordinatorDocument> {
public:
- MovePrimaryCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState);
+ MovePrimaryCoordinator(ShardingDDLCoordinatorService* service, const BSONObj& initialState)
+ : ShardingDDLCoordinatorImpl(service, "MovePrimaryCoordinator", initialState) {}
+
~MovePrimaryCoordinator() = default;
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
bool canAlwaysStartWhenUserWritesAreDisabled() const override {
return true;
}
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
- }
-
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
-
- MovePrimaryCoordinatorDocument _doc;
};
} // namespace mongo
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index b4382f21e0f..aaca2f82bbb 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/move_primary_source_manager.h"
#include "mongo/client/connpool.h"
@@ -50,14 +47,11 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
MONGO_FAIL_POINT_DEFINE(hangInCloneStage);
MONGO_FAIL_POINT_DEFINE(hangInCleanStaleDataStage);
-using namespace shardmetadatautil;
-
MovePrimarySourceManager::MovePrimarySourceManager(OperationContext* opCtx,
ShardMovePrimary requestArgs,
StringData dbname,
@@ -192,7 +186,7 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
// time inclusive of the move primary config commit update from accessing secondary data.
// Note: this write must occur after the critSec flag is set, to ensure the secondary refresh
// will stall behind the flag.
- Status signalStatus = updateShardDatabasesEntry(
+ Status signalStatus = shardmetadatautil::updateShardDatabasesEntry(
opCtx,
BSON(ShardDatabaseType::kNameFieldName << getNss().toString()),
BSONObj(),
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index 1ef41426f26..2c4859ba782 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog_raii.h"
#include "mongo/db/op_observer_util.h"
#include "mongo/db/s/collection_sharding_runtime.h"
@@ -59,8 +57,10 @@ protected:
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
auto range = ChunkRange(BSON("key" << MINKEY), BSON("key" << MAXKEY));
- auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, Timestamp(1, 1)), ShardId("other"));
+ auto chunk = ChunkType(uuid,
+ std::move(range),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
+ ShardId("other"));
auto rt = RoutingTableHistory::makeNew(kTestNss,
uuid,
KeyPattern(keyPattern),
diff --git a/src/mongo/db/s/operation_sharding_state_test.cpp b/src/mongo/db/s/operation_sharding_state_test.cpp
index 0c4732b51ab..9c275398f85 100644
--- a/src/mongo/db/s/operation_sharding_state_test.cpp
+++ b/src/mongo/db/s/operation_sharding_state_test.cpp
@@ -47,7 +47,7 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleDbVersion) {
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleShardVersion) {
- ChunkVersion shardVersion(1, 0, OID::gen(), Timestamp(1, 0));
+ ChunkVersion shardVersion({OID::gen(), Timestamp(1, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, shardVersion, boost::none);
auto& oss = OperationShardingState::get(operationContext());
@@ -58,13 +58,13 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
auto& oss = OperationShardingState::get(operationContext());
{
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(
operationContext(), kNss, shardVersion1, boost::none);
ASSERT_EQ(shardVersion1, *oss.getShardVersion(kNss));
}
{
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole2(
operationContext(), kNss, shardVersion2, boost::none);
ASSERT_EQ(shardVersion2, *oss.getShardVersion(kNss));
@@ -72,8 +72,8 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleRecursiveShardVersionDifferentNamespaces) {
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(operationContext(), kNss, shardVersion1, boost::none);
ScopedSetShardRole scopedSetShardRole2(
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index 6efd33ce9d6..567d50748df 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -113,7 +111,7 @@ public:
true,
{ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
ShardId("dummyShardId")}});
ChunkManager cm(ShardId("dummyShardId"),
DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
index a5a01e7f309..b8d981bb4ce 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp
@@ -86,9 +86,8 @@ void notifyChangeStreamsOnRefineCollectionShardKeyComplete(OperationContext* opC
RefineCollectionShardKeyCoordinator::RefineCollectionShardKeyCoordinator(
ShardingDDLCoordinatorService* service, const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(RefineCollectionShardKeyCoordinatorDocument::parse(
- IDLParserErrorContext("RefineCollectionShardKeyCoordinatorDocument"), initialState)),
+ : RecoverableShardingDDLCoordinator(
+ service, "RefineCollectionShardKeyCoordinator", initialState),
_request(_doc.getRefineCollectionShardKeyRequest()),
_newShardKey(_doc.getNewShardKey()) {}
@@ -104,47 +103,8 @@ void RefineCollectionShardKeyCoordinator::checkIfOptionsConflict(const BSONObj&
_request.toBSON() == otherDoc.getRefineCollectionShardKeyRequest().toBSON()));
}
-boost::optional<BSONObj> RefineCollectionShardKeyCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "RefineCollectionShardKeyCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
-}
-
-void RefineCollectionShardKeyCoordinator::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(
- 6233200,
- 2,
- "Refine collection shard key coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = RefineCollectionShardKeyCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = RefineCollectionShardKeyCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
+void RefineCollectionShardKeyCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
ExecutorFuture<void> RefineCollectionShardKeyCoordinator::_runImpl(
diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.h b/src/mongo/db/s/refine_collection_shard_key_coordinator.h
index c2e70a0d067..c461383e876 100644
--- a/src/mongo/db/s/refine_collection_shard_key_coordinator.h
+++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.h
@@ -35,7 +35,9 @@
namespace mongo {
-class RefineCollectionShardKeyCoordinator : public ShardingDDLCoordinator {
+class RefineCollectionShardKeyCoordinator
+ : public RecoverableShardingDDLCoordinator<RefineCollectionShardKeyCoordinatorDocument,
+ RefineCollectionShardKeyCoordinatorPhaseEnum> {
public:
using StateDoc = RefineCollectionShardKeyCoordinatorDocument;
using Phase = RefineCollectionShardKeyCoordinatorPhaseEnum;
@@ -45,40 +47,16 @@ public:
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return RefineCollectionShardKeyCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("RefineCollectionShardKeyCoordinator::_docMutex");
- RefineCollectionShardKeyCoordinatorDocument _doc;
-
const mongo::RefineCollectionShardKeyRequest _request;
const KeyPattern _newShardKey;
diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp
index 789f8ade994..64680e96cc2 100644
--- a/src/mongo/db/s/rename_collection_coordinator.cpp
+++ b/src/mongo/db/s/rename_collection_coordinator.cpp
@@ -90,9 +90,7 @@ boost::optional<UUID> getCollectionUUID(OperationContext* opCtx,
RenameCollectionCoordinator::RenameCollectionCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(RenameCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("RenameCollectionCoordinatorDocument"), initialState)),
+ : RecoverableShardingDDLCoordinator(service, "RenameCollectionCoordinator", initialState),
_request(_doc.getRenameCollectionRequest()) {}
void RenameCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
@@ -113,54 +111,8 @@ std::vector<StringData> RenameCollectionCoordinator::_acquireAdditionalLocks(
return {_request.getTo().ns()};
}
-boost::optional<BSONObj> RenameCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
-
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- const auto currPhase = [&]() {
- stdx::lock_guard l{_docMutex};
- return _doc.getPhase();
- }();
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "RenameCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("currentPhase", currPhase);
- bob.append("active", true);
- return bob.obj();
-}
-
-void RenameCollectionCoordinator::_enterPhase(Phase newPhase) {
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(5460501,
- 2,
- "Rename collection coordinator phase transition",
- "fromNs"_attr = nss(),
- "toNs"_attr = _request.getTo(),
- "newPhase"_attr = RenameCollectionCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = RenameCollectionCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
+void RenameCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
@@ -275,15 +227,15 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
getForwardableOpMetadata().setOn(opCtx);
if (!_firstExecution) {
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
const auto& fromNss = nss();
- _doc = _updateSession(opCtx, _doc);
- const OperationSessionInfo osi = getCurrentSession(_doc);
+ _updateSession(opCtx);
+ const OperationSessionInfo osi = getCurrentSession();
// On participant shards:
// - Block CRUD on source and target collection in case at least one
@@ -303,20 +255,8 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(
renameCollParticipantRequest.toBSON({}));
- try {
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx,
- fromNss.db(),
- cmdObj.addFields(osi.toBSON()),
- participants,
- **executor);
-
- } catch (const ExceptionFor<ErrorCodes::NotARetryableWriteCommand>&) {
- // Older 5.0 binaries don't support running the command as a
- // retryable write yet. In that case, retry without attaching session info.
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx, fromNss.db(), cmdObj, participants, **executor);
- }
+ sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, fromNss.db(), cmdObj.addFields(osi.toBSON()), participants, **executor);
}))
.then(_executePhase(
Phase::kRenameMetadata,
@@ -325,10 +265,13 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
auto* opCtx = opCtxHolder.get();
getForwardableOpMetadata().setOn(opCtx);
+ // For an unsharded collection the CSRS server can not verify the targetUUID.
+ // Use the session ID + txnNumber to ensure no stale requests get through.
+ _updateSession(opCtx);
+
if (!_firstExecution) {
- _doc = _updateSession(opCtx, _doc);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
ConfigsvrRenameCollectionMetadata req(nss(), _request.getTo());
@@ -336,28 +279,12 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(req.toBSON({}));
const auto& configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- // For an unsharded collection the CSRS server can not verify the targetUUID.
- // Use the session ID + txnNumber to ensure no stale requests get through.
- _doc = _updateSession(opCtx, _doc);
- const OperationSessionInfo osi = getCurrentSession(_doc);
-
- try {
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(
- configShard->runCommand(opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- cmdObj.addFields(osi.toBSON()),
- Shard::RetryPolicy::kIdempotent)));
- } catch (const ExceptionFor<ErrorCodes::NotARetryableWriteCommand>&) {
- // Older 5.0 binaries don't support running the command as a
- // retryable write yet. In that case, retry without attaching session info.
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(
- configShard->runCommand(opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- cmdObj,
- Shard::RetryPolicy::kIdempotent)));
- }
+ uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(
+ configShard->runCommand(opCtx,
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ "admin",
+ cmdObj.addFields(getCurrentSession().toBSON()),
+ Shard::RetryPolicy::kIdempotent)));
}))
.then(_executePhase(
Phase::kUnblockCRUD,
@@ -367,9 +294,9 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
getForwardableOpMetadata().setOn(opCtx);
if (!_firstExecution) {
- _doc = _updateSession(opCtx, _doc);
+ _updateSession(opCtx);
_performNoopRetryableWriteOnAllShardsAndConfigsvr(
- opCtx, getCurrentSession(_doc), **executor);
+ opCtx, getCurrentSession(), **executor);
}
const auto& fromNss = nss();
@@ -383,22 +310,11 @@ ExecutorFuture<void> RenameCollectionCoordinator::_runImpl(
unblockParticipantRequest.toBSON({}));
auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx);
- _doc = _updateSession(opCtx, _doc);
- const OperationSessionInfo osi = getCurrentSession(_doc);
+ _updateSession(opCtx);
+ const OperationSessionInfo osi = getCurrentSession();
- try {
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx,
- fromNss.db(),
- cmdObj.addFields(osi.toBSON()),
- participants,
- **executor);
- } catch (const ExceptionFor<ErrorCodes::NotARetryableWriteCommand>&) {
- // Older 5.0 binaries don't support running the command as a
- // retryable write yet. In that case, retry without attaching session info.
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx, fromNss.db(), cmdObj, participants, **executor);
- }
+ sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, fromNss.db(), cmdObj.addFields(osi.toBSON()), participants, **executor);
}))
.then(_executePhase(Phase::kSetResponse,
[this, anchor = shared_from_this()] {
diff --git a/src/mongo/db/s/rename_collection_coordinator.h b/src/mongo/db/s/rename_collection_coordinator.h
index af395745001..32621bb6ea4 100644
--- a/src/mongo/db/s/rename_collection_coordinator.h
+++ b/src/mongo/db/s/rename_collection_coordinator.h
@@ -35,7 +35,9 @@
namespace mongo {
-class RenameCollectionCoordinator final : public ShardingDDLCoordinator {
+class RenameCollectionCoordinator final
+ : public RecoverableShardingDDLCoordinator<RenameCollectionCoordinatorDocument,
+ RenameCollectionCoordinatorPhaseEnum> {
public:
using StateDoc = RenameCollectionCoordinatorDocument;
using Phase = RenameCollectionCoordinatorPhaseEnum;
@@ -45,9 +47,7 @@ public:
void checkIfOptionsConflict(const BSONObj& doc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
/**
* Waits for the rename to complete and returns the collection version.
@@ -59,41 +59,19 @@ public:
}
private:
+ StringData serializePhase(const Phase& phase) const override {
+ return RenameCollectionCoordinatorPhase_serializer(phase);
+ }
+
bool _mustAlwaysMakeProgress() override {
return _doc.getPhase() >= Phase::kFreezeMigrations;
};
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
- }
-
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
std::vector<StringData> _acquireAdditionalLocks(OperationContext* opCtx) override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
- void _enterPhase(Phase newPhase);
-
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("RenameCollectionCoordinator::_docMutex");
- RenameCollectionCoordinatorDocument _doc;
-
boost::optional<RenameCollectionResponse> _response;
const RenameCollectionRequest _request;
};
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index bf48f41b581..64419c6c5e4 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -27,8 +27,7 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
+#include "mongo/db/s/rename_collection_participant_service.h"
#include "mongo/base/checked_cast.h"
#include "mongo/db/catalog/collection_catalog.h"
@@ -40,8 +39,6 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/range_deletion_util.h"
#include "mongo/db/s/recoverable_critical_section_service.h"
-#include "mongo/db/s/rename_collection_participant_service.h"
-#include "mongo/db/s/shard_metadata_util.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/grid.h"
@@ -49,9 +46,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
-
namespace {
const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
@@ -76,7 +71,6 @@ void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss)
"collectionExisted"_attr = knownNss);
}
-/* Clear the CollectionShardingRuntime entry for the specified namespace */
void clearFilteringMetadata(OperationContext* opCtx, const NamespaceString& nss) {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
Lock::DBLock dbLock(opCtx, nss.db(), MODE_IX);
@@ -135,6 +129,7 @@ void renameOrDropTarget(OperationContext* opCtx,
deleteRangeDeletionTasksForRename(opCtx, fromNss, toNss);
}
}
+
} // namespace
RenameCollectionParticipantService* RenameCollectionParticipantService::getService(
diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp
index 30ff299e538..c0c9648e0a4 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.cpp
+++ b/src/mongo/db/s/reshard_collection_coordinator.cpp
@@ -107,10 +107,7 @@ ReshardCollectionCoordinator::ReshardCollectionCoordinator(ShardingDDLCoordinato
ReshardCollectionCoordinator::ReshardCollectionCoordinator(ShardingDDLCoordinatorService* service,
const BSONObj& initialState,
bool persistCoordinatorDocument)
- : ShardingDDLCoordinator(service, initialState),
- _initialState(initialState.getOwned()),
- _doc(ReshardCollectionCoordinatorDocument::parse(
- IDLParserErrorContext("ReshardCollectionCoordinatorDocument"), _initialState)),
+ : RecoverableShardingDDLCoordinator(service, "ReshardCollectionCoordinator", initialState),
_request(_doc.getReshardCollectionRequest()),
_persistCoordinatorDocument(persistCoordinatorDocument) {}
@@ -125,50 +122,15 @@ void ReshardCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) co
_request.toBSON() == otherDoc.getReshardCollectionRequest().toBSON()));
}
-boost::optional<BSONObj> ReshardCollectionCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_request.toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "ReshardCollectionCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
+void ReshardCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ cmdInfoBuilder->appendElements(_request.toBSON());
}
void ReshardCollectionCoordinator::_enterPhase(Phase newPhase) {
if (!_persistCoordinatorDocument) {
return;
}
-
- StateDoc newDoc(_doc);
- newDoc.setPhase(newPhase);
-
- LOGV2_DEBUG(6206400,
- 2,
- "Reshard collection coordinator phase transition",
- "namespace"_attr = nss(),
- "newPhase"_attr = ReshardCollectionCoordinatorPhase_serializer(newDoc.getPhase()),
- "oldPhase"_attr = ReshardCollectionCoordinatorPhase_serializer(_doc.getPhase()));
-
- if (_doc.getPhase() == Phase::kUnset) {
- newDoc = _insertStateDocument(std::move(newDoc));
- } else {
- newDoc = _updateStateDocument(cc().makeOperationContext().get(), std::move(newDoc));
- }
-
- {
- stdx::unique_lock ul{_docMutex};
- _doc = std::move(newDoc);
- }
+ RecoverableShardingDDLCoordinator::_enterPhase(newPhase);
}
ExecutorFuture<void> ReshardCollectionCoordinator::_runImpl(
@@ -196,7 +158,7 @@ ExecutorFuture<void> ReshardCollectionCoordinator::_runImpl(
StateDoc newDoc(_doc);
newDoc.setOldShardKey(cmOld.getShardKeyPattern().getKeyPattern().toBSON());
newDoc.setOldCollectionUUID(cmOld.getUUID());
- _doc = _updateStateDocument(opCtx, std::move(newDoc));
+ _updateStateDocument(opCtx, std::move(newDoc));
} else {
_doc.setOldShardKey(cmOld.getShardKeyPattern().getKeyPattern().toBSON());
_doc.setOldCollectionUUID(cmOld.getUUID());
diff --git a/src/mongo/db/s/reshard_collection_coordinator.h b/src/mongo/db/s/reshard_collection_coordinator.h
index 54d98ee03d1..085c183dc55 100644
--- a/src/mongo/db/s/reshard_collection_coordinator.h
+++ b/src/mongo/db/s/reshard_collection_coordinator.h
@@ -34,7 +34,9 @@
#include "mongo/util/future.h"
namespace mongo {
-class ReshardCollectionCoordinator : public ShardingDDLCoordinator {
+class ReshardCollectionCoordinator
+ : public RecoverableShardingDDLCoordinator<ReshardCollectionCoordinatorDocument,
+ ReshardCollectionCoordinatorPhaseEnum> {
public:
using StateDoc = ReshardCollectionCoordinatorDocument;
using Phase = ReshardCollectionCoordinatorPhaseEnum;
@@ -44,9 +46,7 @@ public:
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
protected:
ReshardCollectionCoordinator(ShardingDDLCoordinatorService* service,
@@ -54,37 +54,15 @@ protected:
bool persistCoordinatorDocument);
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- stdx::lock_guard l{_docMutex};
- return _doc.getShardingDDLCoordinatorMetadata();
+ StringData serializePhase(const Phase& phase) const override {
+ return ReshardCollectionCoordinatorPhase_serializer(phase);
}
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- template <typename Func>
- auto _executePhase(const Phase& newPhase, Func&& func) {
- return [=] {
- const auto& currPhase = _doc.getPhase();
-
- if (currPhase > newPhase) {
- // Do not execute this phase if we already reached a subsequent one.
- return;
- }
- if (currPhase < newPhase) {
- // Persist the new phase if this is the first time we are executing it.
- _enterPhase(newPhase);
- }
- return func();
- };
- }
-
void _enterPhase(Phase newPhase);
- const BSONObj _initialState;
- mutable Mutex _docMutex = MONGO_MAKE_LATCH("ReshardCollectionCoordinator::_docMutex");
- ReshardCollectionCoordinatorDocument _doc;
-
const mongo::ReshardCollectionRequest _request;
const bool _persistCoordinatorDocument; // TODO: SERVER-62338 remove this then 6.0 branches out
diff --git a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
index aaeb6180654..dc3176cf3e8 100644
--- a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
+++ b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp
@@ -117,7 +117,8 @@ DocumentSource::GetModPathsReturn DocumentSourceReshardingOwnershipMatch::getMod
DocumentSource::GetNextResult DocumentSourceReshardingOwnershipMatch::doGetNext() {
if (!_tempReshardingChunkMgr) {
// TODO: Actually propagate the temporary resharding namespace from the recipient.
- auto tempReshardingNss = constructTemporaryReshardingNss(pExpCtx->ns.db(), *pExpCtx->uuid);
+ auto tempReshardingNss =
+ resharding::constructTemporaryReshardingNss(pExpCtx->ns.db(), *pExpCtx->uuid);
auto* catalogCache = Grid::get(pExpCtx->opCtx)->catalogCache();
_tempReshardingChunkMgr =
diff --git a/src/mongo/db/s/resharding/resharding_agg_test.cpp b/src/mongo/db/s/resharding/resharding_agg_test.cpp
index ce8d110e5ab..c49467f79f9 100644
--- a/src/mongo/db/s/resharding/resharding_agg_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_agg_test.cpp
@@ -362,7 +362,7 @@ protected:
expCtx->ns = kRemoteOplogNss;
expCtx->mongoProcessInterface = std::make_shared<MockMongoInterface>(pipelineSource);
- auto pipeline = createOplogFetchingPipelineForResharding(
+ auto pipeline = resharding::createOplogFetchingPipelineForResharding(
expCtx,
ReshardingDonorOplogId(Timestamp::min(), Timestamp::min()),
_reshardingCollUUID,
@@ -524,13 +524,14 @@ TEST_F(ReshardingAggTest, VerifyPipelineOutputHasOplogSchema) {
expCtx->ns = kRemoteOplogNss;
expCtx->mongoProcessInterface = std::make_shared<MockMongoInterface>(pipelineSource);
- std::unique_ptr<Pipeline, PipelineDeleter> pipeline = createOplogFetchingPipelineForResharding(
- expCtx,
- // Use the test to also exercise the stages for resuming. The timestamp passed in is
- // excluded from the results.
- ReshardingDonorOplogId(insertOplog.getTimestamp(), insertOplog.getTimestamp()),
- _reshardingCollUUID,
- {_destinedRecipient});
+ std::unique_ptr<Pipeline, PipelineDeleter> pipeline =
+ resharding::createOplogFetchingPipelineForResharding(
+ expCtx,
+ // Use the test to also exercise the stages for resuming. The timestamp passed in is
+ // excluded from the results.
+ ReshardingDonorOplogId(insertOplog.getTimestamp(), insertOplog.getTimestamp()),
+ _reshardingCollUUID,
+ {_destinedRecipient});
auto bsonPipeline = pipeline->serializeToBson();
if (debug) {
std::cout << "Pipeline stages:" << std::endl;
@@ -624,11 +625,12 @@ TEST_F(ReshardingAggTest, VerifyPipelinePreparedTxn) {
expCtx->ns = kRemoteOplogNss;
expCtx->mongoProcessInterface = std::make_shared<MockMongoInterface>(pipelineSource);
- std::unique_ptr<Pipeline, PipelineDeleter> pipeline = createOplogFetchingPipelineForResharding(
- expCtx,
- ReshardingDonorOplogId(Timestamp::min(), Timestamp::min()),
- _reshardingCollUUID,
- {_destinedRecipient});
+ std::unique_ptr<Pipeline, PipelineDeleter> pipeline =
+ resharding::createOplogFetchingPipelineForResharding(
+ expCtx,
+ ReshardingDonorOplogId(Timestamp::min(), Timestamp::min()),
+ _reshardingCollUUID,
+ {_destinedRecipient});
if (debug) {
std::cout << "Pipeline stages:" << std::endl;
// This is can be changed to process a prefix of the pipeline for debugging.
@@ -1476,7 +1478,7 @@ TEST_F(ReshardingAggWithStorageTest, RetryableFindAndModifyWithImageLookup) {
expCtx->mongoProcessInterface = std::move(mockMongoInterface);
}
- auto pipeline = createOplogFetchingPipelineForResharding(
+ auto pipeline = resharding::createOplogFetchingPipelineForResharding(
expCtx, ReshardingDonorOplogId(Timestamp::min(), Timestamp::min()), kCrudUUID, kMyShardId);
pipeline->addInitialSource(DocumentSourceMock::createForTest(pipelineSource, expCtx));
@@ -1578,8 +1580,8 @@ TEST_F(ReshardingAggWithStorageTest,
expCtx->mongoProcessInterface = std::move(mockMongoInterface);
}
- auto pipeline =
- createOplogFetchingPipelineForResharding(expCtx, startAt, kCrudUUID, kMyShardId);
+ auto pipeline = resharding::createOplogFetchingPipelineForResharding(
+ expCtx, startAt, kCrudUUID, kMyShardId);
pipeline->addInitialSource(DocumentSourceMock::createForTest(pipelineSource, expCtx));
return pipeline;
};
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
index a80bfbb88ec..8bd04ebfe37 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp
@@ -50,7 +50,7 @@
#include "mongo/db/s/resharding/document_source_resharding_ownership_match.h"
#include "mongo/db/s/resharding/resharding_data_copy_util.h"
#include "mongo/db/s/resharding/resharding_future_util.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/service_context.h"
@@ -80,7 +80,7 @@ bool collectionHasSimpleCollation(OperationContext* opCtx, const NamespaceString
} // namespace
-ReshardingCollectionCloner::ReshardingCollectionCloner(ReshardingMetricsNew* metrics,
+ReshardingCollectionCloner::ReshardingCollectionCloner(ReshardingMetrics* metrics,
ShardKeyPattern newShardKeyPattern,
NamespaceString sourceNss,
const UUID& sourceUUID,
@@ -109,7 +109,7 @@ std::unique_ptr<Pipeline, PipelineDeleter> ReshardingCollectionCloner::makePipel
resolvedNamespaces[_sourceNss.coll()] = {_sourceNss, std::vector<BSONObj>{}};
// Assume that the config.cache.chunks collection isn't a view either.
- auto tempNss = constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
+ auto tempNss = resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
auto tempCacheChunksNss =
NamespaceString(NamespaceString::kConfigDb, "cache.chunks." + tempNss.ns());
resolvedNamespaces[tempCacheChunksNss.coll()] = {tempCacheChunksNss, std::vector<BSONObj>{}};
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.h b/src/mongo/db/s/resharding/resharding_collection_cloner.h
index e24b03c76b6..97e28a4fce5 100644
--- a/src/mongo/db/s/resharding/resharding_collection_cloner.h
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner.h
@@ -52,7 +52,7 @@ class TaskExecutor;
class OperationContext;
class MongoProcessInterface;
-class ReshardingMetricsNew;
+class ReshardingMetrics;
class ServiceContext;
/**
@@ -61,7 +61,7 @@ class ServiceContext;
*/
class ReshardingCollectionCloner {
public:
- ReshardingCollectionCloner(ReshardingMetricsNew* metrics,
+ ReshardingCollectionCloner(ReshardingMetrics* metrics,
ShardKeyPattern newShardKeyPattern,
NamespaceString sourceNss,
const UUID& sourceUUID,
@@ -99,7 +99,7 @@ private:
std::unique_ptr<Pipeline, PipelineDeleter> _restartPipeline(OperationContext* opCtx);
- ReshardingMetricsNew* _metrics;
+ ReshardingMetrics* _metrics;
const ShardKeyPattern _newShardKeyPattern;
const NamespaceString _sourceNss;
const UUID _sourceUUID;
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
index ae6b61fb314..61eb1a620c4 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp
@@ -88,13 +88,13 @@ boost::optional<Milliseconds> extractOperationRemainingTime(const BSONObj& obj)
} // namespace
CoordinatorCommitMonitor::CoordinatorCommitMonitor(
- std::shared_ptr<ReshardingMetricsNew> metricsNew,
+ std::shared_ptr<ReshardingMetrics> metrics,
NamespaceString ns,
std::vector<ShardId> recipientShards,
CoordinatorCommitMonitor::TaskExecutorPtr executor,
CancellationToken cancelToken,
Milliseconds maxDelayBetweenQueries)
- : _metricsNew{std::move(metricsNew)},
+ : _metrics{std::move(metrics)},
_ns(std::move(ns)),
_recipientShards(std::move(recipientShards)),
_executor(std::move(executor)),
@@ -209,8 +209,8 @@ ExecutorFuture<void> CoordinatorCommitMonitor::_makeFuture() const {
return RemainingOperationTimes{Milliseconds(0), Milliseconds::max()};
})
.then([this, anchor = shared_from_this()](RemainingOperationTimes remainingTimes) {
- _metricsNew->setCoordinatorHighEstimateRemainingTimeMillis(remainingTimes.max);
- _metricsNew->setCoordinatorLowEstimateRemainingTimeMillis(remainingTimes.min);
+ _metrics->setCoordinatorHighEstimateRemainingTimeMillis(remainingTimes.max);
+ _metrics->setCoordinatorLowEstimateRemainingTimeMillis(remainingTimes.min);
// Check if all recipient shards are within the commit threshold.
if (remainingTimes.max <= _threshold)
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.h b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.h
index fb9f55d614f..aa3ff0727e9 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.h
+++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.h
@@ -33,7 +33,7 @@
#include <vector>
#include "mongo/db/namespace_string.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/executor/task_executor.h"
#include "mongo/s/shard_id.h"
#include "mongo/util/cancellation.h"
@@ -69,7 +69,7 @@ public:
Milliseconds max;
};
- CoordinatorCommitMonitor(std::shared_ptr<ReshardingMetricsNew> metricsNew,
+ CoordinatorCommitMonitor(std::shared_ptr<ReshardingMetrics> metrics,
NamespaceString ns,
std::vector<ShardId> recipientShards,
TaskExecutorPtr executor,
@@ -95,7 +95,7 @@ private:
static constexpr auto kDiagnosticLogLevel = 0;
static constexpr auto kMaxDelayBetweenQueries = Seconds(30);
- std::shared_ptr<ReshardingMetricsNew> _metricsNew;
+ std::shared_ptr<ReshardingMetrics> _metrics;
const NamespaceString _ns;
const std::vector<ShardId> _recipientShards;
const TaskExecutorPtr _executor;
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp
index 1cc717b7aec..d8740053c80 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp
@@ -40,7 +40,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/resharding/resharding_coordinator_commit_monitor.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_server_parameters_gen.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/logv2/log.h"
@@ -109,7 +109,7 @@ private:
boost::optional<Callback> _runOnMockingNextResponse;
ShardingDataTransformCumulativeMetrics _cumulativeMetrics{"dummyForTest"};
- std::shared_ptr<ReshardingMetricsNew> _metrics;
+ std::shared_ptr<ReshardingMetrics> _metrics;
};
auto makeExecutor() {
@@ -151,7 +151,7 @@ void CoordinatorCommitMonitorTest::setUp() {
_cancellationSource = std::make_unique<CancellationSource>();
auto clockSource = getServiceContext()->getFastClockSource();
- _metrics = std::make_shared<ReshardingMetricsNew>(
+ _metrics = std::make_shared<ReshardingMetrics>(
UUID::gen(),
BSON("y" << 1),
_ns,
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
index 5f78cac592c..da457d8eab3 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp
@@ -112,7 +112,7 @@ bool stateTransistionsComplete(WithLock lk,
template <class TParticipant>
Status getStatusFromAbortReasonWithShardInfo(const TParticipant& participant,
StringData participantType) {
- return getStatusFromAbortReason(participant.getMutableState())
+ return resharding::getStatusFromAbortReason(participant.getMutableState())
.withContext("{} shard {} reached an unrecoverable error"_format(
participantType, participant.getId().toString()));
}
@@ -128,7 +128,7 @@ boost::optional<Status> getAbortReasonIfExists(
if (updatedStateDoc.getAbortReason()) {
// Note: the absence of context specifying which shard the abortReason originates from
// implies the abortReason originates from the coordinator.
- return getStatusFromAbortReason(updatedStateDoc);
+ return resharding::getStatusFromAbortReason(updatedStateDoc);
}
for (const auto& donorShard : updatedStateDoc.getDonorShards()) {
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp
index 0f3803ab04e..bd893bd6dee 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp
@@ -53,7 +53,7 @@ protected:
auto coordinatorDoc = ReshardingCoordinatorDocument();
coordinatorDoc.setRecipientShards(std::move(recipients));
coordinatorDoc.setDonorShards(std::move(donors));
- emplaceTruncatedAbortReasonIfExists(coordinatorDoc, abortReason);
+ resharding::emplaceTruncatedAbortReasonIfExists(coordinatorDoc, abortReason);
return coordinatorDoc;
}
@@ -62,9 +62,9 @@ protected:
boost::optional<Timestamp> timestamp = boost::none,
boost::optional<Status> abortReason = boost::none) {
// The mock state here is simulating only one donor shard having errored locally.
- return {makeDonorShard(ShardId{"s1"}, donorState, timestamp),
- makeDonorShard(ShardId{"s2"}, donorState, timestamp, abortReason),
- makeDonorShard(ShardId{"s3"}, donorState, timestamp)};
+ return {resharding::makeDonorShard(ShardId{"s1"}, donorState, timestamp),
+ resharding::makeDonorShard(ShardId{"s2"}, donorState, timestamp, abortReason),
+ resharding::makeDonorShard(ShardId{"s3"}, donorState, timestamp)};
}
std::vector<RecipientShardEntry> makeMockRecipientsInState(
@@ -72,9 +72,9 @@ protected:
boost::optional<Timestamp> timestamp = boost::none,
boost::optional<Status> abortReason = boost::none) {
// The mock state here is simulating only one donor shard having errored locally.
- return {makeRecipientShard(ShardId{"s1"}, recipientState),
- makeRecipientShard(ShardId{"s2"}, recipientState, abortReason),
- makeRecipientShard(ShardId{"s3"}, recipientState)};
+ return {resharding::makeRecipientShard(ShardId{"s1"}, recipientState),
+ resharding::makeRecipientShard(ShardId{"s2"}, recipientState, abortReason),
+ resharding::makeRecipientShard(ShardId{"s3"}, recipientState)};
}
};
@@ -85,15 +85,15 @@ TEST_F(ReshardingCoordinatorObserverTest, onReshardingParticipantTransitionSucce
auto donorShards = makeMockDonorsInState(DonorStateEnum::kDonatingInitialData, Timestamp(1, 1));
std::vector<RecipientShardEntry> recipientShards0{
- makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning),
- makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)};
+ resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning),
+ resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)};
auto coordinatorDoc0 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards0, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc0);
ASSERT_FALSE(fut.isReady());
std::vector<RecipientShardEntry> recipientShards1{
- makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying),
- makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)};
+ resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying),
+ resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)};
auto coordinatorDoc1 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards1, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc1);
ASSERT_TRUE(fut.isReady());
@@ -110,25 +110,25 @@ TEST_F(ReshardingCoordinatorObserverTest, onReshardingParticipantTransitionTwoOu
auto donorShards = makeMockDonorsInState(DonorStateEnum::kDonatingInitialData, Timestamp(1, 1));
std::vector<RecipientShardEntry> recipientShards0{
- {makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
- {makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
- {makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
+ {resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
+ {resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
+ {resharding::makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
auto coordinatorDoc0 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards0, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc0);
ASSERT_FALSE(fut.isReady());
std::vector<RecipientShardEntry> recipientShards1{
- {makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
- {makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
- {makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kCloning)}};
+ {resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
+ {resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
+ {resharding::makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kCloning)}};
auto coordinatorDoc1 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards1, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc1);
ASSERT_FALSE(fut.isReady());
std::vector<RecipientShardEntry> recipientShards2{
- {makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying)},
- {makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
- {makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
+ {resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying)},
+ {resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kApplying)},
+ {resharding::makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
auto coordinatorDoc2 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards2, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc2);
ASSERT_TRUE(fut.isReady());
@@ -145,11 +145,11 @@ TEST_F(ReshardingCoordinatorObserverTest, participantReportsError) {
auto donorShards = makeMockDonorsInState(DonorStateEnum::kDonatingInitialData, Timestamp(1, 1));
std::vector<RecipientShardEntry> recipientShards{
- {makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
- {makeRecipientShard(ShardId{"s2"},
- RecipientStateEnum::kError,
- Status{ErrorCodes::InternalError, "We gotta abort"})},
- {makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
+ {resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kCloning)},
+ {resharding::makeRecipientShard(ShardId{"s2"},
+ RecipientStateEnum::kError,
+ Status{ErrorCodes::InternalError, "We gotta abort"})},
+ {resharding::makeRecipientShard(ShardId{"s3"}, RecipientStateEnum::kApplying)}};
auto coordinatorDoc = makeCoordinatorDocWithRecipientsAndDonors(recipientShards, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc);
auto resp = fut.getNoThrow();
@@ -173,9 +173,11 @@ TEST_F(ReshardingCoordinatorObserverTest, participantsDoneAborting) {
// donor who hasn't seen there was an error yet.
auto recipientShards = makeMockRecipientsInState(RecipientStateEnum::kDone, Timestamp(1, 1));
std::vector<DonorShardEntry> donorShards0{
- {makeDonorShard(ShardId{"s1"}, DonorStateEnum::kDone, Timestamp(1, 1), abortReason)},
- {makeDonorShard(ShardId{"s2"}, DonorStateEnum::kDonatingOplogEntries, Timestamp(1, 1))},
- {makeDonorShard(ShardId{"s3"}, DonorStateEnum::kDone, Timestamp(1, 1))}};
+ {resharding::makeDonorShard(
+ ShardId{"s1"}, DonorStateEnum::kDone, Timestamp(1, 1), abortReason)},
+ {resharding::makeDonorShard(
+ ShardId{"s2"}, DonorStateEnum::kDonatingOplogEntries, Timestamp(1, 1))},
+ {resharding::makeDonorShard(ShardId{"s3"}, DonorStateEnum::kDone, Timestamp(1, 1))}};
auto coordinatorDoc0 =
makeCoordinatorDocWithRecipientsAndDonors(recipientShards, donorShards0, abortReason);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc0);
@@ -183,9 +185,10 @@ TEST_F(ReshardingCoordinatorObserverTest, participantsDoneAborting) {
// All participants are done.
std::vector<DonorShardEntry> donorShards1{
- {makeDonorShard(ShardId{"s1"}, DonorStateEnum::kDone, Timestamp(1, 1), abortReason)},
- {makeDonorShard(ShardId{"s2"}, DonorStateEnum::kDone, Timestamp(1, 1))},
- {makeDonorShard(ShardId{"s3"}, DonorStateEnum::kDone, Timestamp(1, 1))}};
+ {resharding::makeDonorShard(
+ ShardId{"s1"}, DonorStateEnum::kDone, Timestamp(1, 1), abortReason)},
+ {resharding::makeDonorShard(ShardId{"s2"}, DonorStateEnum::kDone, Timestamp(1, 1))},
+ {resharding::makeDonorShard(ShardId{"s3"}, DonorStateEnum::kDone, Timestamp(1, 1))}};
auto coordinatorDoc1 =
makeCoordinatorDocWithRecipientsAndDonors(recipientShards, donorShards1, abortReason);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc1);
@@ -206,15 +209,15 @@ TEST_F(ReshardingCoordinatorObserverTest, onReshardingRecipientsOutOfSync) {
auto donorShards = makeMockDonorsInState(DonorStateEnum::kDonatingInitialData, Timestamp(1, 1));
std::vector<RecipientShardEntry> recipientShards0{
- makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kUnused),
- makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kStrictConsistency)};
+ resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kUnused),
+ resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kStrictConsistency)};
auto coordinatorDoc0 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards0, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc0);
ASSERT_FALSE(fut.isReady());
std::vector<RecipientShardEntry> recipientShards1{
- makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying),
- makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kStrictConsistency)};
+ resharding::makeRecipientShard(ShardId{"s1"}, RecipientStateEnum::kApplying),
+ resharding::makeRecipientShard(ShardId{"s2"}, RecipientStateEnum::kStrictConsistency)};
auto coordinatorDoc1 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards1, donorShards);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc1);
ASSERT_TRUE(fut.isReady());
@@ -231,15 +234,18 @@ TEST_F(ReshardingCoordinatorObserverTest, onDonorsReportedMinFetchTimestamp) {
auto recipientShards = makeMockRecipientsInState(RecipientStateEnum::kUnused);
std::vector<DonorShardEntry> donorShards0{
- {makeDonorShard(ShardId{"s1"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))},
- {makeDonorShard(ShardId{"s2"}, DonorStateEnum::kPreparingToDonate)}};
+ {resharding::makeDonorShard(
+ ShardId{"s1"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))},
+ {resharding::makeDonorShard(ShardId{"s2"}, DonorStateEnum::kPreparingToDonate)}};
auto coordinatorDoc0 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards, donorShards0);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc0);
ASSERT_FALSE(fut.isReady());
std::vector<DonorShardEntry> donorShards1{
- {makeDonorShard(ShardId{"s1"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))},
- {makeDonorShard(ShardId{"s2"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))}};
+ {resharding::makeDonorShard(
+ ShardId{"s1"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))},
+ {resharding::makeDonorShard(
+ ShardId{"s2"}, DonorStateEnum::kDonatingInitialData, Timestamp(1, 1))}};
auto coordinatorDoc1 = makeCoordinatorDocWithRecipientsAndDonors(recipientShards, donorShards1);
reshardingObserver->onReshardingParticipantTransition(coordinatorDoc1);
ASSERT_TRUE(fut.isReady());
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index cbd6232a5d1..9aa5ed7c223 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/base/string_data.h"
@@ -79,7 +76,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding
-
namespace mongo {
namespace {
@@ -150,12 +146,12 @@ using resharding_metrics::getIntervalStartFieldName;
using DocT = ReshardingCoordinatorDocument;
const auto metricsPrefix = resharding_metrics::getMetricsPrefix<DocT>();
-void buildStateDocumentCloneMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetricsNew* metrics) {
+void buildStateDocumentCloneMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetrics* metrics) {
bob.append(getIntervalStartFieldName<DocT>(ReshardingRecipientMetrics::kDocumentCopyFieldName),
metrics->getCopyingBegin());
}
-void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetricsNew* metrics) {
+void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetrics* metrics) {
bob.append(getIntervalEndFieldName<DocT>(ReshardingRecipientMetrics::kDocumentCopyFieldName),
metrics->getCopyingEnd());
bob.append(
@@ -164,14 +160,14 @@ void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetr
}
void buildStateDocumentBlockingWritesMetricsForUpdate(BSONObjBuilder& bob,
- ReshardingMetricsNew* metrics) {
+ ReshardingMetrics* metrics) {
bob.append(
getIntervalEndFieldName<DocT>(ReshardingRecipientMetrics::kOplogApplicationFieldName),
metrics->getApplyingEnd());
}
void buildStateDocumentMetricsForUpdate(BSONObjBuilder& bob,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
CoordinatorStateEnum newState) {
switch (newState) {
case CoordinatorStateEnum::kCloning:
@@ -189,7 +185,7 @@ void buildStateDocumentMetricsForUpdate(BSONObjBuilder& bob,
}
void writeToCoordinatorStateNss(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
TxnNumber txnNumber) {
BatchedCommandRequest request([&] {
@@ -295,9 +291,9 @@ TypeCollectionRecipientFields constructRecipientFields(
coordinatorDoc.getSourceNss(),
resharding::gReshardingMinimumOperationDurationMillis.load());
- emplaceCloneTimestampIfExists(recipientFields, coordinatorDoc.getCloneTimestamp());
- emplaceApproxBytesToCopyIfExists(recipientFields,
- coordinatorDoc.getReshardingApproxCopySizeStruct());
+ resharding::emplaceCloneTimestampIfExists(recipientFields, coordinatorDoc.getCloneTimestamp());
+ resharding::emplaceApproxBytesToCopyIfExists(
+ recipientFields, coordinatorDoc.getReshardingApproxCopySizeStruct());
return recipientFields;
}
@@ -323,10 +319,10 @@ BSONObj createReshardingFieldsUpdateForOriginalNss(
<< CollectionType::kAllowMigrationsFieldName << false));
}
case CoordinatorStateEnum::kPreparingToDonate: {
- TypeCollectionDonorFields donorFields(
- coordinatorDoc.getTempReshardingNss(),
- coordinatorDoc.getReshardingKey(),
- extractShardIdsFromParticipantEntries(coordinatorDoc.getRecipientShards()));
+ TypeCollectionDonorFields donorFields(coordinatorDoc.getTempReshardingNss(),
+ coordinatorDoc.getReshardingKey(),
+ resharding::extractShardIdsFromParticipantEntries(
+ coordinatorDoc.getRecipientShards()));
BSONObjBuilder updateBuilder;
{
@@ -394,7 +390,7 @@ BSONObj createReshardingFieldsUpdateForOriginalNss(
// If the abortReason exists, include it in the update.
setBuilder.append("reshardingFields.abortReason", *abortReason);
- auto abortStatus = getStatusFromAbortReason(coordinatorDoc);
+ auto abortStatus = resharding::getStatusFromAbortReason(coordinatorDoc);
setBuilder.append("reshardingFields.userCanceled",
abortStatus == ErrorCodes::ReshardCollectionAborted);
}
@@ -504,7 +500,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx,
if (auto abortReason = coordinatorDoc.getAbortReason()) {
setBuilder.append("reshardingFields.abortReason", *abortReason);
- auto abortStatus = getStatusFromAbortReason(coordinatorDoc);
+ auto abortStatus = resharding::getStatusFromAbortReason(coordinatorDoc);
setBuilder.append("reshardingFields.userCanceled",
abortStatus == ErrorCodes::ReshardCollectionAborted);
}
@@ -608,8 +604,8 @@ BSONObj makeFlushRoutingTableCacheUpdatesCmd(const NamespaceString& nss) {
BSON(WriteConcernOptions::kWriteConcernField << kMajorityWriteConcern.toBSON()));
}
-ReshardingMetricsNew::CoordinatorState toMetricsState(CoordinatorStateEnum state) {
- return ReshardingMetricsNew::CoordinatorState(state);
+ReshardingMetrics::CoordinatorState toMetricsState(CoordinatorStateEnum state) {
+ return ReshardingMetrics::CoordinatorState(state);
}
} // namespace
@@ -664,7 +660,7 @@ void cleanupSourceConfigCollections(OperationContext* opCtx,
}
void writeDecisionPersistedState(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
OID newCollectionEpoch,
Timestamp newCollectionTimestamp) {
@@ -691,7 +687,7 @@ void writeDecisionPersistedState(OperationContext* opCtx,
}
void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc) {
ShardingCatalogManager::get(opCtx)->bumpCollectionVersionAndChangeMetadataInTxn(
opCtx,
@@ -741,7 +737,7 @@ void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx,
void writeParticipantShardsAndTempCollInfo(
OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& updatedCoordinatorDoc,
std::vector<ChunkType> initialChunks,
std::vector<BSONObj> zones) {
@@ -770,7 +766,7 @@ void writeParticipantShardsAndTempCollInfo(
void writeStateTransitionAndCatalogUpdatesThenBumpShardVersions(
OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc) {
// Run updates to config.reshardingOperations and config.collections in a transaction
auto nextState = coordinatorDoc.getState();
@@ -804,7 +800,7 @@ void writeStateTransitionAndCatalogUpdatesThenBumpShardVersions(
}
void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
boost::optional<Status> abortReason) {
// If the coordinator needs to abort and isn't in kInitializing, additional collections need to
@@ -857,7 +853,7 @@ ChunkVersion ReshardingCoordinatorExternalState::calculateChunkVersionForInitial
OperationContext* opCtx) {
const auto now = VectorClock::get(opCtx)->getTime();
const auto timestamp = now.clusterTime().asTimestamp();
- return ChunkVersion(1, 0, OID::gen(), timestamp);
+ return ChunkVersion({OID::gen(), timestamp}, {1, 0});
}
std::vector<DonorShardEntry> constructDonorShardEntries(const std::set<ShardId>& donorShardIds) {
@@ -1036,7 +1032,7 @@ ReshardingCoordinatorService::ReshardingCoordinator::ReshardingCoordinator(
: PrimaryOnlyService::TypedInstance<ReshardingCoordinator>(),
_id(coordinatorDoc.getReshardingUUID().toBSON()),
_coordinatorService(coordinatorService),
- _metricsNew{ReshardingMetricsNew::initializeFrom(coordinatorDoc, getGlobalServiceContext())},
+ _metrics{ReshardingMetrics::initializeFrom(coordinatorDoc, getGlobalServiceContext())},
_metadata(coordinatorDoc.getCommonReshardingMetadata()),
_coordinatorDoc(coordinatorDoc),
_markKilledExecutor(std::make_shared<ThreadPool>([] {
@@ -1055,7 +1051,7 @@ ReshardingCoordinatorService::ReshardingCoordinator::ReshardingCoordinator(
_reshardingCoordinatorObserver->onReshardingParticipantTransition(coordinatorDoc);
}
- _metricsNew->onStateTransition(boost::none, toMetricsState(coordinatorDoc.getState()));
+ _metrics->onStateTransition(boost::none, toMetricsState(coordinatorDoc.getState()));
}
void ReshardingCoordinatorService::ReshardingCoordinator::installCoordinatorDoc(
@@ -1080,8 +1076,8 @@ void ReshardingCoordinatorService::ReshardingCoordinator::installCoordinatorDoc(
const auto previousState = _coordinatorDoc.getState();
_coordinatorDoc = doc;
- _metricsNew->onStateTransition(toMetricsState(previousState),
- toMetricsState(_coordinatorDoc.getState()));
+ _metrics->onStateTransition(toMetricsState(previousState),
+ toMetricsState(_coordinatorDoc.getState()));
ShardingLogging::get(opCtx)->logChange(opCtx,
"resharding.coordinator.transition",
@@ -1090,7 +1086,7 @@ void ReshardingCoordinatorService::ReshardingCoordinator::installCoordinatorDoc(
kMajorityWriteConcern);
}
-void markCompleted(const Status& status, ReshardingMetricsNew* metrics) {
+void markCompleted(const Status& status, ReshardingMetrics* metrics) {
if (status.isOK()) {
metrics->onSuccess();
} else if (status == ErrorCodes::ReshardCollectionAborted) {
@@ -1320,7 +1316,7 @@ ReshardingCoordinatorService::ReshardingCoordinator::_commitAndFinishReshardOper
})
.then([this, executor] { return _awaitAllParticipantShardsDone(executor); })
.then([this, executor] {
- _metricsNew->onCriticalSectionEnd();
+ _metrics->onCriticalSectionEnd();
// Best-effort attempt to trigger a refresh on the participant shards so
// they see the collection metadata without reshardingFields and no longer
@@ -1403,6 +1399,14 @@ SemiFuture<void> ReshardingCoordinatorService::ReshardingCoordinator::run(
.onCompletion([outerStatus](Status) { return outerStatus; });
})
.onCompletion([this, self = shared_from_this()](Status status) {
+ _metrics->onStateTransition(toMetricsState(_coordinatorDoc.getState()), boost::none);
+
+ // Destroy metrics early so it's lifetime will not be tied to the lifetime of this
+ // state machine. This is because we have future callbacks copy shared pointers to this
+ // state machine that causes it to live longer than expected and potentially overlap
+ // with a newer instance when stepping up.
+ _metrics.reset();
+
if (!status.isOK()) {
{
auto lg = stdx::lock_guard(_fulfillmentMutex);
@@ -1416,8 +1420,6 @@ SemiFuture<void> ReshardingCoordinatorService::ReshardingCoordinator::run(
}
_reshardingCoordinatorObserver->interrupt(status);
}
-
- _metricsNew->onStateTransition(toMetricsState(_coordinatorDoc.getState()), boost::none);
})
.semi();
}
@@ -1432,12 +1434,12 @@ ExecutorFuture<void> ReshardingCoordinatorService::ReshardingCoordinator::_onAbo
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
// Notify metrics as the operation is now complete for external observers.
- markCompleted(status, _metricsNew.get());
+ markCompleted(status, _metrics.get());
// The temporary collection and its corresponding entries were never created. Only
// the coordinator document and reshardingFields require cleanup.
resharding::removeCoordinatorDocAndReshardingFields(
- opCtx.get(), _metricsNew.get(), _coordinatorDoc, status);
+ opCtx.get(), _metrics.get(), _coordinatorDoc, status);
return status;
})
.onTransientError([](const Status& retryStatus) {
@@ -1506,7 +1508,7 @@ void ReshardingCoordinatorService::ReshardingCoordinator::abort() {
boost::optional<BSONObj> ReshardingCoordinatorService::ReshardingCoordinator::reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode,
MongoProcessInterface::CurrentOpSessionsMode) noexcept {
- return _metricsNew->reportForCurrentOp();
+ return _metrics->reportForCurrentOp();
}
std::shared_ptr<ReshardingCoordinatorObserver>
@@ -1561,13 +1563,13 @@ void ReshardingCoordinatorService::ReshardingCoordinator::_insertCoordDocAndChan
ReshardingCoordinatorDocument updatedCoordinatorDoc = _coordinatorDoc;
updatedCoordinatorDoc.setState(CoordinatorStateEnum::kInitializing);
resharding::insertCoordDocAndChangeOrigCollEntry(
- opCtx.get(), _metricsNew.get(), updatedCoordinatorDoc);
+ opCtx.get(), _metrics.get(), updatedCoordinatorDoc);
installCoordinatorDoc(opCtx.get(), updatedCoordinatorDoc);
{
// Note: don't put blocking or interruptible code in this block.
_coordinatorDocWrittenPromise.emplaceValue();
- _metricsNew->onStarted();
+ _metrics->onStarted();
}
pauseBeforeInsertCoordinatorDoc.pauseWhileSet();
@@ -1592,14 +1594,14 @@ void ReshardingCoordinatorService::ReshardingCoordinator::
// the possibility of the document reaching the BSONObj size constraint.
std::vector<BSONObj> zones;
if (updatedCoordinatorDoc.getZones()) {
- zones = buildTagsDocsFromZones(updatedCoordinatorDoc.getTempReshardingNss(),
- *updatedCoordinatorDoc.getZones());
+ zones = resharding::buildTagsDocsFromZones(updatedCoordinatorDoc.getTempReshardingNss(),
+ *updatedCoordinatorDoc.getZones());
}
updatedCoordinatorDoc.setPresetReshardedChunks(boost::none);
updatedCoordinatorDoc.setZones(boost::none);
resharding::writeParticipantShardsAndTempCollInfo(opCtx.get(),
- _metricsNew.get(),
+ _metrics.get(),
updatedCoordinatorDoc,
std::move(shardsAndChunks.initialChunks),
std::move(zones));
@@ -1652,14 +1654,14 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllDonorsReadyToDonat
opCtx.get(), _ctHolder->getAbortToken());
}
- auto highestMinFetchTimestamp =
- getHighestMinFetchTimestamp(coordinatorDocChangedOnDisk.getDonorShards());
+ auto highestMinFetchTimestamp = resharding::getHighestMinFetchTimestamp(
+ coordinatorDocChangedOnDisk.getDonorShards());
_updateCoordinatorDocStateAndCatalogEntries(
CoordinatorStateEnum::kCloning,
coordinatorDocChangedOnDisk,
highestMinFetchTimestamp,
computeApproxCopySize(coordinatorDocChangedOnDisk));
- _metricsNew->onCopyingBegin();
+ _metrics->onCopyingBegin();
})
.then([this] { return _waitForMajority(_ctHolder->getAbortToken()); });
}
@@ -1678,8 +1680,8 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllRecipientsFinished
.then([this](ReshardingCoordinatorDocument coordinatorDocChangedOnDisk) {
this->_updateCoordinatorDocStateAndCatalogEntries(CoordinatorStateEnum::kApplying,
coordinatorDocChangedOnDisk);
- _metricsNew->onCopyingEnd();
- _metricsNew->onApplyingBegin();
+ _metrics->onCopyingEnd();
+ _metrics->onApplyingBegin();
})
.then([this] { return _waitForMajority(_ctHolder->getAbortToken()); });
}
@@ -1691,9 +1693,9 @@ void ReshardingCoordinatorService::ReshardingCoordinator::_startCommitMonitor(
}
_commitMonitor = std::make_shared<resharding::CoordinatorCommitMonitor>(
- _metricsNew,
+ _metrics,
_coordinatorDoc.getSourceNss(),
- extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards()),
+ resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards()),
**executor,
_ctHolder->getCommitMonitorToken());
@@ -1718,9 +1720,22 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllRecipientsFinished
_startCommitMonitor(executor);
LOGV2(5391602, "Resharding operation waiting for an okay to enter critical section");
- return future_util::withCancellation(_canEnterCritical.getFuture(),
- _ctHolder->getAbortToken())
+
+ // The _reshardingCoordinatorObserver->awaitAllRecipientsInStrictConsistency() future is
+ // used for reporting recipient shard errors encountered during the Applying phase and
+ // in turn aborting the resharding operation.
+ // For all other cases, the _canEnterCritical.getFuture() resolves first and the
+ // operation can then proceed to entering the critical section depending on the status
+ // returned.
+ return future_util::withCancellation(
+ whenAny(
+ _canEnterCritical.getFuture().thenRunOn(**executor),
+ _reshardingCoordinatorObserver->awaitAllRecipientsInStrictConsistency()
+ .thenRunOn(**executor)
+ .ignoreValue()),
+ _ctHolder->getAbortToken())
.thenRunOn(**executor)
+ .then([](auto result) { return result.result; })
.onCompletion([this](Status status) {
_ctHolder->cancelCommitMonitor();
if (status.isOK()) {
@@ -1738,8 +1753,8 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllRecipientsFinished
this->_updateCoordinatorDocStateAndCatalogEntries(CoordinatorStateEnum::kBlockingWrites,
_coordinatorDoc);
- _metricsNew->onApplyingEnd();
- _metricsNew->onCriticalSectionBegin();
+ _metrics->onApplyingEnd();
+ _metrics->onCriticalSectionBegin();
})
.then([this] { return _waitForMajority(_ctHolder->getAbortToken()); })
.thenRunOn(**executor)
@@ -1805,7 +1820,7 @@ Future<void> ReshardingCoordinatorService::ReshardingCoordinator::_commit(
resharding::writeDecisionPersistedState(opCtx.get(),
- _metricsNew.get(),
+ _metrics.get(),
updatedCoordinatorDoc,
std::move(newCollectionEpoch),
std::move(newCollectionTimestamp));
@@ -1836,7 +1851,7 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllParticipantShardsD
boost::optional<Status> abortReason;
if (coordinatorDoc.getAbortReason()) {
- abortReason = getStatusFromAbortReason(coordinatorDoc);
+ abortReason = resharding::getStatusFromAbortReason(coordinatorDoc);
}
if (!abortReason) {
@@ -1849,40 +1864,18 @@ ReshardingCoordinatorService::ReshardingCoordinator::_awaitAllParticipantShardsD
const auto cmdObj =
ShardsvrDropCollectionIfUUIDNotMatchingRequest(nss, notMatchingThisUUID)
.toBSON({});
-
- try {
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx.get(), nss.db(), cmdObj, allShardIds, **executor);
- } catch (const DBException& ex) {
- if (ex.code() == ErrorCodes::CommandNotFound) {
- // TODO SERVER-60531 get rid of the catch logic
- // Cleanup failed because at least one shard could is using a binary
- // not supporting the ShardsvrDropCollectionIfUUIDNotMatching command.
- LOGV2_INFO(5423100,
- "Resharding coordinator couldn't guarantee older incarnations "
- "of the collection were dropped. A chunk migration to a shard "
- "with an older incarnation of the collection will fail",
- "namespace"_attr = nss.ns());
- } else if (opCtx->checkForInterruptNoAssert().isOK()) {
- LOGV2_INFO(
- 5423101,
- "Resharding coordinator failed while trying to drop possible older "
- "incarnations of the collection. A chunk migration to a shard with "
- "an older incarnation of the collection will fail",
- "namespace"_attr = nss.ns(),
- "error"_attr = redact(ex.toStatus()));
- }
- }
+ _reshardingCoordinatorExternalState->sendCommandToShards(
+ opCtx.get(), nss.db(), cmdObj, allShardIds, **executor);
}
reshardingPauseCoordinatorBeforeRemovingStateDoc.pauseWhileSetAndNotCanceled(
opCtx.get(), _ctHolder->getStepdownToken());
// Notify metrics as the operation is now complete for external observers.
- markCompleted(abortReason ? *abortReason : Status::OK(), _metricsNew.get());
+ markCompleted(abortReason ? *abortReason : Status::OK(), _metrics.get());
resharding::removeCoordinatorDocAndReshardingFields(
- opCtx.get(), _metricsNew.get(), coordinatorDoc, abortReason);
+ opCtx.get(), _metrics.get(), coordinatorDoc, abortReason);
});
}
@@ -1896,13 +1889,13 @@ void ReshardingCoordinatorService::ReshardingCoordinator::
// Build new state doc for coordinator state update
ReshardingCoordinatorDocument updatedCoordinatorDoc = coordinatorDoc;
updatedCoordinatorDoc.setState(nextState);
- emplaceApproxBytesToCopyIfExists(updatedCoordinatorDoc, std::move(approxCopySize));
- emplaceCloneTimestampIfExists(updatedCoordinatorDoc, std::move(cloneTimestamp));
- emplaceTruncatedAbortReasonIfExists(updatedCoordinatorDoc, abortReason);
+ resharding::emplaceApproxBytesToCopyIfExists(updatedCoordinatorDoc, std::move(approxCopySize));
+ resharding::emplaceCloneTimestampIfExists(updatedCoordinatorDoc, std::move(cloneTimestamp));
+ resharding::emplaceTruncatedAbortReasonIfExists(updatedCoordinatorDoc, abortReason);
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
resharding::writeStateTransitionAndCatalogUpdatesThenBumpShardVersions(
- opCtx.get(), _metricsNew.get(), updatedCoordinatorDoc);
+ opCtx.get(), _metrics.get(), updatedCoordinatorDoc);
// Update in-memory coordinator doc
installCoordinatorDoc(opCtx.get(), updatedCoordinatorDoc);
@@ -1911,9 +1904,10 @@ void ReshardingCoordinatorService::ReshardingCoordinator::
void ReshardingCoordinatorService::ReshardingCoordinator::_sendCommandToAllParticipants(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor, const BSONObj& command) {
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
- auto donorShardIds = extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards());
+ auto donorShardIds =
+ resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards());
auto recipientShardIds =
- extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards());
+ resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards());
std::set<ShardId> participantShardIds{donorShardIds.begin(), donorShardIds.end()};
participantShardIds.insert(recipientShardIds.begin(), recipientShardIds.end());
@@ -1929,7 +1923,7 @@ void ReshardingCoordinatorService::ReshardingCoordinator::_sendCommandToAllRecip
const std::shared_ptr<executor::ScopedTaskExecutor>& executor, const BSONObj& command) {
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
auto recipientShardIds =
- extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards());
+ resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards());
_reshardingCoordinatorExternalState->sendCommandToShards(
opCtx.get(),
@@ -1942,7 +1936,8 @@ void ReshardingCoordinatorService::ReshardingCoordinator::_sendCommandToAllRecip
void ReshardingCoordinatorService::ReshardingCoordinator::_sendCommandToAllDonors(
const std::shared_ptr<executor::ScopedTaskExecutor>& executor, const BSONObj& command) {
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
- auto donorShardIds = extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards());
+ auto donorShardIds =
+ resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards());
_reshardingCoordinatorExternalState->sendCommandToShards(
opCtx.get(),
@@ -2036,7 +2031,7 @@ void ReshardingCoordinatorService::ReshardingCoordinator::_updateChunkImbalanceM
auto imbalanceCount =
getMaxChunkImbalanceCount(routingInfo, allShardsWithOpTime.value, zoneInfo);
- _metricsNew->setLastOpEndingChunkImbalance(imbalanceCount);
+ _metrics->setLastOpEndingChunkImbalance(imbalanceCount);
} catch (const DBException& ex) {
LOGV2_WARNING(5543000,
"Encountered error while trying to update resharding chunk imbalance metrics",
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.h b/src/mongo/db/s/resharding/resharding_coordinator_service.h
index d24c23f6b68..6f0eb95c79a 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.h
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.h
@@ -33,7 +33,7 @@
#include "mongo/db/repl/primary_only_service.h"
#include "mongo/db/s/resharding/coordinator_document_gen.h"
#include "mongo/db/s/resharding/resharding_coordinator_observer.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -55,28 +55,28 @@ void cleanupSourceConfigCollections(OperationContext* opCtx,
const ReshardingCoordinatorDocument& coordinatorDoc);
void writeDecisionPersistedState(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
OID newCollectionEpoch,
Timestamp newCollectionTimestamp);
void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc);
void writeParticipantShardsAndTempCollInfo(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
std::vector<ChunkType> initialChunks,
std::vector<BSONObj> zones);
void writeStateTransitionAndCatalogUpdatesThenBumpShardVersions(
OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc);
void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
const ReshardingCoordinatorDocument& coordinatorDoc,
boost::optional<Status> abortReason = boost::none);
} // namespace resharding
@@ -513,7 +513,7 @@ private:
// The primary-only service instance corresponding to the coordinator instance. Not owned.
const ReshardingCoordinatorService* const _coordinatorService;
- std::shared_ptr<ReshardingMetricsNew> _metricsNew;
+ std::shared_ptr<ReshardingMetrics> _metrics;
// The in-memory representation of the immutable portion of the document in
// config.reshardingOperations.
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index dc16d5fe271..1fc380093bf 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include <functional>
@@ -59,7 +56,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -203,7 +199,7 @@ public:
{DonorShardEntry(ShardId("shard0000"), {})},
{RecipientShardEntry(ShardId("shard0001"), {})});
doc.setCommonReshardingMetadata(meta);
- emplaceCloneTimestampIfExists(doc, cloneTimestamp);
+ resharding::emplaceCloneTimestampIfExists(doc, cloneTimestamp);
return doc;
}
@@ -372,10 +368,11 @@ public:
TypeCollectionReshardingFields reshardingFields(coordinatorDoc.getReshardingUUID());
reshardingFields.setState(coordinatorDoc.getState());
- reshardingFields.setDonorFields(TypeCollectionDonorFields(
- coordinatorDoc.getTempReshardingNss(),
- coordinatorDoc.getReshardingKey(),
- extractShardIdsFromParticipantEntries(coordinatorDoc.getRecipientShards())));
+ reshardingFields.setDonorFields(
+ TypeCollectionDonorFields(coordinatorDoc.getTempReshardingNss(),
+ coordinatorDoc.getReshardingKey(),
+ resharding::extractShardIdsFromParticipantEntries(
+ coordinatorDoc.getRecipientShards())));
auto originalNssCatalogEntry = makeOriginalCollectionCatalogEntry(
coordinatorDoc,
@@ -414,7 +411,7 @@ public:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index da56d0d8cb5..35ffa75b31a 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -27,10 +27,6 @@
* it in the license file.
*/
-#include "mongo/db/s/resharding/coordinator_document_gen.h"
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include "mongo/client/remote_command_targeter_mock.h"
@@ -40,6 +36,7 @@
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/index_on_config.h"
+#include "mongo/db/s/resharding/coordinator_document_gen.h"
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
@@ -52,7 +49,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -85,13 +81,12 @@ protected:
TransactionCoordinatorService::get(operationContext())
->onShardingInitialization(operationContext(), true);
- _metrics =
- ReshardingMetricsNew::makeInstance(_originalUUID,
- _newShardKey.toBSON(),
- _originalNss,
- ReshardingMetricsNew::Role::kCoordinator,
- getServiceContext()->getFastClockSource()->now(),
- getServiceContext());
+ _metrics = ReshardingMetrics::makeInstance(_originalUUID,
+ _newShardKey.toBSON(),
+ _originalNss,
+ ReshardingMetrics::Role::kCoordinator,
+ getServiceContext()->getFastClockSource()->now(),
+ getServiceContext());
}
void tearDown() override {
@@ -180,7 +175,7 @@ protected:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, Timestamp(1, 2));
+ ChunkVersion version({epoch, Timestamp(1, 2)}, {1, 0});
ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
@@ -227,7 +222,7 @@ protected:
client.insert(CollectionType::ConfigNS.ns(), originalNssCatalogEntry.toBSON());
auto tempNssCatalogEntry = createTempReshardingCollectionType(
- opCtx, coordinatorDoc, ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)), BSONObj());
+ opCtx, coordinatorDoc, ChunkVersion({OID::gen(), Timestamp(1, 2)}, {1, 1}), BSONObj());
client.insert(CollectionType::ConfigNS.ns(), tempNssCatalogEntry.toBSON());
return coordinatorDoc;
@@ -519,11 +514,11 @@ protected:
// collection should have been removed.
boost::optional<CollectionType> expectedTempCollType = boost::none;
if (expectedCoordinatorDoc.getState() < CoordinatorStateEnum::kCommitting) {
- expectedTempCollType =
- createTempReshardingCollectionType(opCtx,
- expectedCoordinatorDoc,
- ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)),
- BSONObj());
+ expectedTempCollType = createTempReshardingCollectionType(
+ opCtx,
+ expectedCoordinatorDoc,
+ ChunkVersion({OID::gen(), Timestamp(1, 2)}, {1, 1}),
+ BSONObj());
// It's necessary to add the userCanceled field because the call into
// createTempReshardingCollectionType assumes that the collection entry is
@@ -723,7 +718,7 @@ protected:
ShardKeyPattern _oldShardKey = ShardKeyPattern(BSON("oldSK" << 1));
ShardKeyPattern _newShardKey = ShardKeyPattern(BSON("newSK" << 1));
- std::unique_ptr<ReshardingMetricsNew> _metrics;
+ std::unique_ptr<ReshardingMetrics> _metrics;
const std::vector<ChunkRange> _oldChunkRanges = {
ChunkRange(_oldShardKey.getKeyPattern().globalMin(), BSON("oldSK" << 12345)),
diff --git a/src/mongo/db/s/resharding/resharding_data_replication.cpp b/src/mongo/db/s/resharding/resharding_data_replication.cpp
index 4143c8c0c76..ff7bc064939 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication.cpp
@@ -81,12 +81,12 @@ void ensureFulfilledPromise(SharedPromise<void>& sp, Status error) {
} // namespace
std::unique_ptr<ReshardingCollectionCloner> ReshardingDataReplication::_makeCollectionCloner(
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
const CommonReshardingMetadata& metadata,
const ShardId& myShardId,
Timestamp cloneTimestamp) {
return std::make_unique<ReshardingCollectionCloner>(
- metricsNew,
+ metrics,
ShardKeyPattern{metadata.getReshardingKey()},
metadata.getSourceNss(),
metadata.getSourceUUID(),
@@ -112,7 +112,7 @@ std::vector<std::unique_ptr<ReshardingTxnCloner>> ReshardingDataReplication::_ma
std::vector<std::unique_ptr<ReshardingOplogFetcher>> ReshardingDataReplication::_makeOplogFetchers(
OperationContext* opCtx,
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
const CommonReshardingMetadata& metadata,
const std::vector<DonorShardFetchTimestamp>& donorShards,
const ShardId& myShardId) {
@@ -121,14 +121,14 @@ std::vector<std::unique_ptr<ReshardingOplogFetcher>> ReshardingDataReplication::
for (const auto& donor : donorShards) {
auto oplogBufferNss =
- getLocalOplogBufferNamespace(metadata.getSourceUUID(), donor.getShardId());
+ resharding::getLocalOplogBufferNamespace(metadata.getSourceUUID(), donor.getShardId());
auto minFetchTimestamp = *donor.getMinFetchTimestamp();
auto idToResumeFrom = getOplogFetcherResumeId(
opCtx, metadata.getReshardingUUID(), oplogBufferNss, minFetchTimestamp);
invariant((idToResumeFrom >= ReshardingDonorOplogId{minFetchTimestamp, minFetchTimestamp}));
oplogFetchers.emplace_back(std::make_unique<ReshardingOplogFetcher>(
- std::make_unique<ReshardingOplogFetcher::Env>(opCtx->getServiceContext(), metricsNew),
+ std::make_unique<ReshardingOplogFetcher::Env>(opCtx->getServiceContext(), metrics),
metadata.getReshardingUUID(),
metadata.getSourceUUID(),
// The recipient fetches oplog entries from the donor starting from the largest _id
@@ -182,7 +182,7 @@ std::vector<std::unique_ptr<ReshardingOplogApplier>> ReshardingDataReplication::
invariant((idToResumeFrom >= ReshardingDonorOplogId{minFetchTimestamp, minFetchTimestamp}));
const auto& oplogBufferNss =
- getLocalOplogBufferNamespace(metadata.getSourceUUID(), donorShardId);
+ resharding::getLocalOplogBufferNamespace(metadata.getSourceUUID(), donorShardId);
auto applierMetrics = (*applierMetricsMap)[donorShardId].get();
oplogAppliers.emplace_back(std::make_unique<ReshardingOplogApplier>(
@@ -206,7 +206,7 @@ std::vector<std::unique_ptr<ReshardingOplogApplier>> ReshardingDataReplication::
std::unique_ptr<ReshardingDataReplicationInterface> ReshardingDataReplication::make(
OperationContext* opCtx,
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
ReshardingApplierMetricsMap* applierMetricsMap,
CommonReshardingMetadata metadata,
const std::vector<DonorShardFetchTimestamp>& donorShards,
@@ -218,11 +218,11 @@ std::unique_ptr<ReshardingDataReplicationInterface> ReshardingDataReplication::m
std::vector<std::unique_ptr<ReshardingTxnCloner>> txnCloners;
if (!cloningDone) {
- collectionCloner = _makeCollectionCloner(metricsNew, metadata, myShardId, cloneTimestamp);
+ collectionCloner = _makeCollectionCloner(metrics, metadata, myShardId, cloneTimestamp);
txnCloners = _makeTxnCloners(metadata, donorShards);
}
- auto oplogFetchers = _makeOplogFetchers(opCtx, metricsNew, metadata, donorShards, myShardId);
+ auto oplogFetchers = _makeOplogFetchers(opCtx, metrics, metadata, donorShards, myShardId);
auto oplogFetcherExecutor = _makeOplogFetcherExecutor(donorShards.size());
@@ -456,7 +456,7 @@ ReshardingDonorOplogId ReshardingDataReplication::getOplogFetcherResumeId(
if (highestOplogBufferId) {
auto oplogEntry = repl::OplogEntry{highestOplogBufferId->toBson()};
- if (isFinalOplog(oplogEntry, reshardingUUID)) {
+ if (resharding::isFinalOplog(oplogEntry, reshardingUUID)) {
return ReshardingOplogFetcher::kFinalOpAlreadyFetched;
}
diff --git a/src/mongo/db/s/resharding/resharding_data_replication.h b/src/mongo/db/s/resharding/resharding_data_replication.h
index f8348646758..2e44a5d2a21 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication.h
+++ b/src/mongo/db/s/resharding/resharding_data_replication.h
@@ -140,7 +140,7 @@ private:
public:
static std::unique_ptr<ReshardingDataReplicationInterface> make(
OperationContext* opCtx,
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
ReshardingApplierMetricsMap* applierMetricsMap,
CommonReshardingMetadata metadata,
const std::vector<DonorShardFetchTimestamp>& donorShards,
@@ -196,7 +196,7 @@ public:
private:
static std::unique_ptr<ReshardingCollectionCloner> _makeCollectionCloner(
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
const CommonReshardingMetadata& metadata,
const ShardId& myShardId,
Timestamp cloneTimestamp);
@@ -207,7 +207,7 @@ private:
static std::vector<std::unique_ptr<ReshardingOplogFetcher>> _makeOplogFetchers(
OperationContext* opCtx,
- ReshardingMetricsNew* metricsNew,
+ ReshardingMetrics* metrics,
const CommonReshardingMetadata& metadata,
const std::vector<DonorShardFetchTimestamp>& donorShards,
const ShardId& myShardId);
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index f71ce9f0356..f5f588ac948 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/persistent_task_store.h"
#include "mongo/db/query/collation/collator_factory_mock.h"
@@ -50,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -78,7 +71,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -193,7 +186,7 @@ TEST_F(ReshardingDataReplicationTest, GetOplogFetcherResumeId) {
auto opCtx = makeOperationContext();
const auto reshardingUUID = UUID::gen();
- auto oplogBufferNss = getLocalOplogBufferNamespace(reshardingUUID, {"shard0"});
+ auto oplogBufferNss = resharding::getLocalOplogBufferNamespace(reshardingUUID, {"shard0"});
const auto minFetchTimestamp = Timestamp{10, 0};
const auto oplogId1 = ReshardingDonorOplogId{{20, 0}, {18, 0}};
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index d95f0fdc23e..632b387a817 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -55,7 +52,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -164,11 +160,11 @@ protected:
const std::string& shardKey) {
auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5));
ChunkType chunk1(
- uuid, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
+ uuid, range1, ChunkVersion({epoch, timestamp}, {1, 0}), kShardList[0].getName());
auto range2 = ChunkRange(BSON(shardKey << 5), BSON(shardKey << MAXKEY));
ChunkType chunk2(
- uuid, range2, ChunkVersion(1, 0, epoch, timestamp), kShardList[1].getName());
+ uuid, range2, ChunkVersion({epoch, timestamp}, {1, 0}), kShardList[1].getName());
return {chunk1, chunk2};
}
@@ -199,7 +195,7 @@ protected:
ReshardingEnv env(CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, kNss).value());
env.destShard = kShardList[1].getName();
- env.version = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ env.version = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
env.tempNss =
NamespaceString(kNss.db(),
fmt::format("{}{}",
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
index 5213b170753..0a9027deea2 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp
@@ -129,7 +129,7 @@ std::vector<repl::OplogEntry> ReshardingDonorOplogIterator::_fillBatch(Pipeline&
numBytes += obj.objsize();
- if (isFinalOplog(entry)) {
+ if (resharding::isFinalOplog(entry)) {
// The ReshardingOplogFetcher should never insert documents after the reshardFinalOp
// entry. We defensively check each oplog entry for being the reshardFinalOp and confirm
// the pipeline has been exhausted.
@@ -185,7 +185,7 @@ ExecutorFuture<std::vector<repl::OplogEntry>> ReshardingDonorOplogIterator::getN
const auto& lastEntryInBatch = batch.back();
_resumeToken = getId(lastEntryInBatch);
- if (isFinalOplog(lastEntryInBatch)) {
+ if (resharding::isFinalOplog(lastEntryInBatch)) {
_hasSeenFinalOplogEntry = true;
// Skip returning the final oplog entry because it is known to be a no-op.
batch.pop_back();
diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
index 26b7646283f..a0491b06e7c 100644
--- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp
@@ -95,7 +95,7 @@ public:
const BSONObj oField(BSON("msg"
<< "Created temporary resharding collection"));
const BSONObj o2Field(
- BSON("type" << kReshardFinalOpLogType << "reshardingUUID" << UUID::gen()));
+ BSON("type" << resharding::kReshardFinalOpLogType << "reshardingUUID" << UUID::gen()));
return makeOplog(_crudNss, _uuid, repl::OpTypeEnum::kNoop, oField, o2Field, oplogId);
}
@@ -103,7 +103,7 @@ public:
ReshardingDonorOplogId oplogId(ts, ts);
const BSONObj oField(BSON("msg"
<< "Latest oplog ts from donor's cursor response"));
- const BSONObj o2Field(BSON("type" << kReshardProgressMark));
+ const BSONObj o2Field(BSON("type" << resharding::kReshardProgressMark));
return makeOplog(_crudNss, _uuid, repl::OpTypeEnum::kNoop, oField, o2Field, oplogId);
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
index 43d91e83b97..27157f82b66 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
@@ -332,7 +332,12 @@ void clearFilteringMetadata(OperationContext* opCtx, bool scheduleAsyncRefresh)
return true;
});
}
+ clearFilteringMetadata(opCtx, namespacesToRefresh, scheduleAsyncRefresh);
+}
+void clearFilteringMetadata(OperationContext* opCtx,
+ stdx::unordered_set<NamespaceString> namespacesToRefresh,
+ bool scheduleAsyncRefresh) {
for (const auto& nss : namespacesToRefresh) {
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
CollectionShardingRuntime::get(opCtx, nss)->clearFilteringMetadata(opCtx);
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common.h
index 2efba26f659..10be195c586 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.h
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.h
@@ -77,6 +77,10 @@ void processReshardingFieldsForCollection(OperationContext* opCtx,
void clearFilteringMetadata(OperationContext* opCtx, bool scheduleAsyncRefresh);
+void clearFilteringMetadata(OperationContext* opCtx,
+ stdx::unordered_set<NamespaceString> namespacesToRefresh,
+ bool scheduleAsyncRefresh);
+
void refreshShardVersion(OperationContext* opCtx, const NamespaceString& nss);
} // namespace resharding
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index 3fccff9812c..e5bd8defdbd 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -58,7 +58,7 @@ public:
const NamespaceString kOriginalNss = NamespaceString("db", "foo");
const NamespaceString kTemporaryReshardingNss =
- constructTemporaryReshardingNss("db", kExistingUUID);
+ resharding::constructTemporaryReshardingNss("db", kExistingUUID);
const std::string kOriginalShardKey = "oldKey";
const BSONObj kOriginalShardKeyPattern = BSON(kOriginalShardKey << 1);
const std::string kReshardingKey = "newKey";
@@ -111,8 +111,10 @@ protected:
const OID& epoch,
const ShardId& shardThatChunkExistsOn) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
- auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), shardThatChunkExistsOn);
+ auto chunk = ChunkType(uuid,
+ std::move(range),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ shardThatChunkExistsOn);
ChunkManager cm(kThisShard.getShardId(),
DatabaseVersion(uuid, timestamp),
makeStandaloneRoutingTableHistory(
@@ -133,17 +135,18 @@ protected:
return CollectionMetadata(std::move(cm), kThisShard.getShardId());
}
- ReshardingDonorDocument makeDonorStateDoc() {
+ ReshardingDonorDocument makeDonorStateDoc(NamespaceString sourceNss,
+ NamespaceString tempReshardingNss,
+ BSONObj reshardingKey,
+ std::vector<mongo::ShardId> recipientShards) {
DonorShardContext donorCtx;
donorCtx.setState(DonorStateEnum::kPreparingToDonate);
- ReshardingDonorDocument doc(std::move(donorCtx),
- {kThisShard.getShardId(), kOtherShard.getShardId()});
+ ReshardingDonorDocument doc(std::move(donorCtx), recipientShards);
- NamespaceString sourceNss = kOriginalNss;
auto sourceUUID = UUID::gen();
auto commonMetadata = CommonReshardingMetadata(
- UUID::gen(), sourceNss, sourceUUID, kTemporaryReshardingNss, kReshardingKeyPattern);
+ UUID::gen(), sourceNss, sourceUUID, tempReshardingNss, reshardingKey);
doc.setCommonReshardingMetadata(std::move(commonMetadata));
return doc;
@@ -194,7 +197,7 @@ protected:
const boost::optional<Timestamp>& cloneTimestamp = boost::none) {
auto recipientFields =
TypeCollectionRecipientFields(donorShards, existingUUID, originalNss, 5000);
- emplaceCloneTimestampIfExists(recipientFields, cloneTimestamp);
+ resharding::emplaceCloneTimestampIfExists(recipientFields, cloneTimestamp);
fields.setRecipientFields(std::move(recipientFields));
}
@@ -262,6 +265,19 @@ protected:
ASSERT(donorShardMap.empty());
}
+ void addFilteringMetadata(OperationContext* opCtx, NamespaceString sourceNss, ShardId shardId) {
+ AutoGetCollection autoColl(opCtx, sourceNss, LockMode::MODE_IS);
+ const auto metadata{makeShardedMetadataForOriginalCollection(opCtx, shardId)};
+ ScopedSetShardRole scopedSetShardRole{opCtx,
+ sourceNss,
+ metadata.getShardVersion() /* shardVersion */,
+ boost::none /* databaseVersion */};
+
+ auto csr = CollectionShardingRuntime::get(opCtx, sourceNss);
+ csr->setFilteringMetadata(opCtx, metadata);
+ ASSERT(csr->getCurrentMetadataIfKnown());
+ }
+
private:
DonorShardFetchTimestamp makeDonorShardFetchTimestamp(
ShardId shardId, boost::optional<Timestamp> fetchTimestamp) {
@@ -553,34 +569,10 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest, ClearReshardingFilteringMeta
}
// Add filtering metadata for the collection being resharded.
- {
- AutoGetCollection autoColl(opCtx, kOriginalNss, LockMode::MODE_IS);
- const auto metadata{
- makeShardedMetadataForOriginalCollection(opCtx, kThisShard.getShardId())};
- ScopedSetShardRole scopedSetShardRole{opCtx,
- kOriginalNss,
- metadata.getShardVersion() /* shardVersion */,
- boost::none /* databaseVersion */};
-
- auto csr = CollectionShardingRuntime::get(opCtx, kOriginalNss);
- csr->setFilteringMetadata(opCtx, metadata);
- ASSERT(csr->getCurrentMetadataIfKnown());
- }
+ addFilteringMetadata(opCtx, kOriginalNss, kThisShard.getShardId());
// Add filtering metadata for the temporary resharding namespace.
- {
- AutoGetCollection autoColl(opCtx, kTemporaryReshardingNss, LockMode::MODE_IS);
- const auto metadata{makeShardedMetadataForTemporaryReshardingCollection(
- opCtx, kThisShard.getShardId())};
- ScopedSetShardRole scopedSetShardRole{opCtx,
- kTemporaryReshardingNss,
- metadata.getShardVersion() /* shardVersion */,
- boost::none /* databaseVersion */};
-
- auto csr = CollectionShardingRuntime::get(opCtx, kTemporaryReshardingNss);
- csr->setFilteringMetadata(opCtx, metadata);
- ASSERT(csr->getCurrentMetadataIfKnown());
- }
+ addFilteringMetadata(opCtx, kTemporaryReshardingNss, kThisShard.getShardId());
// Prior to adding a resharding document, assert that attempting to clear filtering does
// nothing.
@@ -595,7 +587,11 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest, ClearReshardingFilteringMeta
doSetupFunc();
// Add a resharding donor document that targets the namespaces involved in resharding.
- ReshardingDonorDocument donorDoc = makeDonorStateDoc();
+ ReshardingDonorDocument donorDoc =
+ makeDonorStateDoc(kOriginalNss,
+ kTemporaryReshardingNss,
+ kReshardingKeyPattern,
+ {kThisShard.getShardId(), kOtherShard.getShardId()});
ReshardingDonorService::DonorStateMachine::insertStateDocument(opCtx, donorDoc);
// Clear the filtering metadata (without scheduling a refresh) and assert the metadata is gone.
@@ -622,5 +618,49 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest, ClearReshardingFilteringMeta
}
}
+TEST_F(ReshardingDonorRecipientCommonInternalsTest, ClearReshardingFilteringMetaDataForActiveOp) {
+ OperationContext* opCtx = operationContext();
+ NamespaceString sourceNss1 = NamespaceString("db", "one");
+ NamespaceString tempReshardingNss1 =
+ resharding::constructTemporaryReshardingNss(sourceNss1.db(), UUID::gen());
+ NamespaceString sourceNss2 = NamespaceString("db", "two");
+ NamespaceString tempReshardingNss2 =
+ resharding::constructTemporaryReshardingNss(sourceNss2.db(), UUID::gen());
+ ShardId shardId1 = ShardId{"recipient1"};
+ ShardId shardId2 = ShardId{"recipient2"};
+ ReshardingDonorDocument doc1 =
+ makeDonorStateDoc(sourceNss1, tempReshardingNss1, BSON("newKey1" << 1), {shardId1});
+ ReshardingDonorDocument doc2 =
+ makeDonorStateDoc(sourceNss2, tempReshardingNss2, BSON("newKey2" << 1), {shardId2});
+
+ ReshardingDonorService::DonorStateMachine::insertStateDocument(opCtx, doc1);
+ ReshardingDonorService::DonorStateMachine::insertStateDocument(opCtx, doc2);
+
+ // Add filtering metadata for the collection being resharded.
+ addFilteringMetadata(opCtx, sourceNss1, {shardId1});
+ addFilteringMetadata(opCtx, sourceNss2, {shardId2});
+
+ // Add filtering metadata for the temporary resharding namespace.
+ addFilteringMetadata(opCtx, tempReshardingNss1, {shardId1});
+ addFilteringMetadata(opCtx, tempReshardingNss2, {shardId2});
+
+ // Clear the filtering metadata (without scheduling a refresh) for only on single operation
+ // related namespaces
+ resharding::clearFilteringMetadata(opCtx, {sourceNss1, tempReshardingNss1}, false);
+
+ for (auto const& nss : {sourceNss1, tempReshardingNss1}) {
+ AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_IS);
+ auto csr = CollectionShardingRuntime::get(opCtx, nss);
+ ASSERT(csr->getCurrentMetadataIfKnown() == boost::none);
+ }
+
+ // Assert that the filtering metadata is not cleared for other operation
+ for (auto const& nss : {sourceNss2, tempReshardingNss2}) {
+ AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_IS);
+ auto csr = CollectionShardingRuntime::get(opCtx, nss);
+ ASSERT(csr->getCurrentMetadataIfKnown() != boost::none);
+ }
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp
index 7f870033a1f..40b1f17f179 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp
@@ -178,13 +178,17 @@ public:
}
}
- void clearFilteringMetadata(OperationContext* opCtx) {
- resharding::clearFilteringMetadata(opCtx, true /* scheduleAsyncRefresh */);
+ void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) {
+ stdx::unordered_set<NamespaceString> namespacesToRefresh{sourceNss, tempReshardingNss};
+ resharding::clearFilteringMetadata(
+ opCtx, namespacesToRefresh, true /* scheduleAsyncRefresh */);
}
};
-ReshardingMetricsNew::DonorState toMetricsState(DonorStateEnum state) {
- return ReshardingMetricsNew::DonorState(state);
+ReshardingMetrics::DonorState toMetricsState(DonorStateEnum state) {
+ return ReshardingMetrics::DonorState(state);
}
} // namespace
@@ -209,7 +213,7 @@ ReshardingDonorService::DonorStateMachine::DonorStateMachine(
std::unique_ptr<DonorStateMachineExternalState> externalState)
: repl::PrimaryOnlyService::TypedInstance<DonorStateMachine>(),
_donorService(donorService),
- _metricsNew{ReshardingMetricsNew::initializeFrom(donorDoc, getGlobalServiceContext())},
+ _metrics{ReshardingMetrics::initializeFrom(donorDoc, getGlobalServiceContext())},
_metadata{donorDoc.getCommonReshardingMetadata()},
_recipientShardIds{donorDoc.getRecipientShards()},
_donorCtx{donorDoc.getMutableState()},
@@ -233,7 +237,7 @@ ReshardingDonorService::DonorStateMachine::DonorStateMachine(
}()) {
invariant(_externalState);
- _metricsNew->onStateTransition(boost::none, toMetricsState(_donorCtx.getState()));
+ _metrics->onStateTransition(boost::none, toMetricsState(_donorCtx.getState()));
}
ExecutorFuture<void> ReshardingDonorService::DonorStateMachine::_runUntilBlockingWritesOrErrored(
@@ -375,8 +379,8 @@ ExecutorFuture<void> ReshardingDonorService::DonorStateMachine::_finishReshardin
{
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
-
- _externalState->clearFilteringMetadata(opCtx.get());
+ _externalState->clearFilteringMetadata(
+ opCtx.get(), _metadata.getSourceNss(), _metadata.getTempReshardingNss());
RecoverableCriticalSectionService::get(opCtx.get())
->releaseRecoverableCriticalSection(
@@ -385,7 +389,7 @@ ExecutorFuture<void> ReshardingDonorService::DonorStateMachine::_finishReshardin
_critSecReason,
ShardingCatalogClient::kLocalWriteConcern);
- _metricsNew->onCriticalSectionEnd();
+ _metrics->onCriticalSectionEnd();
}
auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc());
@@ -410,6 +414,14 @@ ExecutorFuture<void> ReshardingDonorService::DonorStateMachine::_finishReshardin
Status ReshardingDonorService::DonorStateMachine::_runMandatoryCleanup(
Status status, const CancellationToken& stepdownToken) {
+ _metrics->onStateTransition(toMetricsState(_donorCtx.getState()), boost::none);
+
+ // Destroy metrics early so it's lifetime will not be tied to the lifetime of this state
+ // machine. This is because we have future callbacks copy shared pointers to this state machine
+ // that causes it to live longer than expected and potentially overlap with a newer instance
+ // when stepping up.
+ _metrics.reset();
+
if (!status.isOK()) {
// If the stepdownToken was triggered, it takes priority in order to make sure that
// the promise is set with an error that can be retried with. If it ran into an
@@ -427,8 +439,6 @@ Status ReshardingDonorService::DonorStateMachine::_runMandatoryCleanup(
ensureFulfilledPromise(lk, _completionPromise, statusForPromise);
}
- _metricsNew->onStateTransition(toMetricsState(_donorCtx.getState()), boost::none);
-
return status;
}
@@ -493,7 +503,7 @@ void ReshardingDonorService::DonorStateMachine::interrupt(Status status) {}
boost::optional<BSONObj> ReshardingDonorService::DonorStateMachine::reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode connMode,
MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- return _metricsNew->reportForCurrentOp();
+ return _metrics->reportForCurrentOp();
}
void ReshardingDonorService::DonorStateMachine::onReshardingFieldsChanges(
@@ -521,11 +531,11 @@ void ReshardingDonorService::DonorStateMachine::onReshardingFieldsChanges(
}
void ReshardingDonorService::DonorStateMachine::onWriteDuringCriticalSection() {
- _metricsNew->onWriteDuringCriticalSection();
+ _metrics->onWriteDuringCriticalSection();
}
void ReshardingDonorService::DonorStateMachine::onReadDuringCriticalSection() {
- _metricsNew->onReadDuringCriticalSection();
+ _metrics->onReadDuringCriticalSection();
}
SharedSemiFuture<void> ReshardingDonorService::DonorStateMachine::awaitCriticalSectionAcquired() {
@@ -690,7 +700,7 @@ void ReshardingDonorService::DonorStateMachine::
_critSecReason,
ShardingCatalogClient::kLocalWriteConcern);
- _metricsNew->onCriticalSectionBegin();
+ _metrics->onCriticalSectionBegin();
}
{
@@ -711,7 +721,7 @@ void ReshardingDonorService::DonorStateMachine::
oplog.setObject(
BSON("msg" << fmt::format("Writes to {} are temporarily blocked for resharding.",
_metadata.getSourceNss().toString())));
- oplog.setObject2(BSON("type" << kReshardFinalOpLogType << "reshardingUUID"
+ oplog.setObject2(BSON("type" << resharding::kReshardFinalOpLogType << "reshardingUUID"
<< _metadata.getReshardingUUID()));
oplog.setOpTime(OplogSlot());
oplog.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now());
@@ -828,7 +838,7 @@ void ReshardingDonorService::DonorStateMachine::_transitionState(DonorShardConte
_updateDonorDocument(std::move(newDonorCtx));
- _metricsNew->onStateTransition(toMetricsState(oldState), toMetricsState(newState));
+ _metrics->onStateTransition(toMetricsState(oldState), toMetricsState(newState));
LOGV2_INFO(5279505,
"Transitioned resharding donor state",
@@ -852,7 +862,7 @@ void ReshardingDonorService::DonorStateMachine::_transitionToDonatingInitialData
void ReshardingDonorService::DonorStateMachine::_transitionToError(Status abortReason) {
auto newDonorCtx = _donorCtx;
newDonorCtx.setState(DonorStateEnum::kError);
- emplaceTruncatedAbortReasonIfExists(newDonorCtx, abortReason);
+ resharding::emplaceTruncatedAbortReasonIfExists(newDonorCtx, abortReason);
_transitionState(std::move(newDonorCtx));
}
diff --git a/src/mongo/db/s/resharding/resharding_donor_service.h b/src/mongo/db/s/resharding/resharding_donor_service.h
index f2f4d99d2e8..3f3d88965db 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service.h
+++ b/src/mongo/db/s/resharding/resharding_donor_service.h
@@ -32,7 +32,7 @@
#include "mongo/db/cancelable_operation_context.h"
#include "mongo/db/repl/primary_only_service.h"
#include "mongo/db/s/resharding/donor_document_gen.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/s/resharding/type_collection_fields_gen.h"
namespace mongo {
@@ -218,7 +218,7 @@ private:
// The primary-only service instance corresponding to the donor instance. Not owned.
const ReshardingDonorService* const _donorService;
- std::unique_ptr<ReshardingMetricsNew> _metricsNew;
+ std::unique_ptr<ReshardingMetrics> _metrics;
// The in-memory representation of the immutable portion of the document in
// config.localReshardingOperations.donor.
@@ -297,7 +297,9 @@ public:
const BSONObj& query,
const BSONObj& update) = 0;
- virtual void clearFilteringMetadata(OperationContext* opCtx) = 0;
+ virtual void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index 0f40919d14d..4d83cfe5e44 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -85,7 +85,9 @@ public:
const BSONObj& query,
const BSONObj& update) override {}
- void clearFilteringMetadata(OperationContext* opCtx) override {}
+ void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) override {}
};
class DonorOpObserverForTest : public OpObserverForTest {
@@ -148,12 +150,12 @@ public:
NamespaceString sourceNss("sourcedb.sourcecollection");
auto sourceUUID = UUID::gen();
- auto commonMetadata =
- CommonReshardingMetadata(UUID::gen(),
- sourceNss,
- sourceUUID,
- constructTemporaryReshardingNss(sourceNss.db(), sourceUUID),
- BSON("newKey" << 1));
+ auto commonMetadata = CommonReshardingMetadata(
+ UUID::gen(),
+ sourceNss,
+ sourceUUID,
+ resharding::constructTemporaryReshardingNss(sourceNss.db(), sourceUUID),
+ BSON("newKey" << 1));
commonMetadata.setStartTime(getServiceContext()->getFastClockSource()->now());
doc.setCommonReshardingMetadata(std::move(commonMetadata));
@@ -348,7 +350,7 @@ TEST_F(ReshardingDonorServiceTest, WritesFinalReshardOpOplogEntriesWhileWritesBl
DBDirectClient client(opCtx.get());
FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace};
- findRequest.setFilter(BSON("o2.type" << kReshardFinalOpLogType));
+ findRequest.setFilter(BSON("o2.type" << resharding::kReshardFinalOpLogType));
auto cursor = client.find(std::move(findRequest));
ASSERT_TRUE(cursor->more()) << "Found no oplog entries for source collection";
@@ -710,7 +712,7 @@ TEST_F(ReshardingDonorServiceTest, TruncatesXLErrorOnDonorDocument) {
// to the primitive truncation algorithm - Check that the total size is less than
// kReshardErrorMaxBytes + a couple additional bytes to provide a buffer for the field
// name sizes.
- int maxReshardErrorBytesCeiling = kReshardErrorMaxBytes + 200;
+ int maxReshardErrorBytesCeiling = resharding::kReshardErrorMaxBytes + 200;
ASSERT_LT(persistedAbortReasonBSON->objsize(), maxReshardErrorBytesCeiling);
ASSERT_EQ(persistedAbortReasonBSON->getIntField("code"),
ErrorCodes::ReshardCollectionTruncatedError);
diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
index 9c2b78385fa..74911c8518f 100644
--- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
+++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
@@ -48,8 +48,9 @@ namespace {
std::vector<ShardId> getAllParticipantsFromCoordDoc(const ReshardingCoordinatorDocument& doc) {
std::vector<ShardId> participants;
- auto donorShards = extractShardIdsFromParticipantEntriesAsSet(doc.getDonorShards());
- auto recipientShards = extractShardIdsFromParticipantEntriesAsSet(doc.getRecipientShards());
+ auto donorShards = resharding::extractShardIdsFromParticipantEntriesAsSet(doc.getDonorShards());
+ auto recipientShards =
+ resharding::extractShardIdsFromParticipantEntriesAsSet(doc.getRecipientShards());
std::set_union(donorShards.begin(),
donorShards.end(),
recipientShards.begin(),
diff --git a/src/mongo/db/s/resharding/resharding_metrics_new.cpp b/src/mongo/db/s/resharding/resharding_metrics.cpp
index e07468ad1b9..610ef970475 100644
--- a/src/mongo/db/s/resharding/resharding_metrics_new.cpp
+++ b/src/mongo/db/s/resharding/resharding_metrics.cpp
@@ -26,15 +26,15 @@
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/exec/document_value/document.h"
namespace mongo {
namespace {
-inline ReshardingMetricsNew::State getDefaultState(ReshardingMetricsNew::Role role) {
- using Role = ReshardingMetricsNew::Role;
+inline ReshardingMetrics::State getDefaultState(ReshardingMetrics::Role role) {
+ using Role = ReshardingMetrics::Role;
switch (role) {
case Role::kCoordinator:
return CoordinatorStateEnum::kUnused;
@@ -70,14 +70,13 @@ Date_t readStartTime(const CommonReshardingMetadata& metadata, ClockSource* fall
} // namespace
-ReshardingMetricsNew::ReshardingMetricsNew(
- UUID instanceId,
- BSONObj shardKey,
- NamespaceString nss,
- Role role,
- Date_t startTime,
- ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
+ReshardingMetrics::ReshardingMetrics(UUID instanceId,
+ BSONObj shardKey,
+ NamespaceString nss,
+ Role role,
+ Date_t startTime,
+ ClockSource* clockSource,
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
: ShardingDataTransformInstanceMetrics{std::move(instanceId),
createOriginalCommand(nss, std::move(shardKey)),
nss,
@@ -87,44 +86,42 @@ ReshardingMetricsNew::ReshardingMetricsNew(
cumulativeMetrics},
_state{getDefaultState(role)} {}
-ReshardingMetricsNew::ReshardingMetricsNew(
- const CommonReshardingMetadata& metadata,
- Role role,
- ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
- : ReshardingMetricsNew{metadata.getReshardingUUID(),
- metadata.getReshardingKey().toBSON(),
- metadata.getSourceNss(),
- role,
- readStartTime(metadata, clockSource),
- clockSource,
- cumulativeMetrics} {}
-
-std::string ReshardingMetricsNew::createOperationDescription() const noexcept {
+ReshardingMetrics::ReshardingMetrics(const CommonReshardingMetadata& metadata,
+ Role role,
+ ClockSource* clockSource,
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics)
+ : ReshardingMetrics{metadata.getReshardingUUID(),
+ metadata.getReshardingKey().toBSON(),
+ metadata.getSourceNss(),
+ role,
+ readStartTime(metadata, clockSource),
+ clockSource,
+ cumulativeMetrics} {}
+
+std::string ReshardingMetrics::createOperationDescription() const noexcept {
return fmt::format("ReshardingMetrics{}Service {}",
ShardingDataTransformMetrics::getRoleName(_role),
_instanceId.toString());
}
-std::unique_ptr<ReshardingMetricsNew> ReshardingMetricsNew::makeInstance(
- UUID instanceId,
- BSONObj shardKey,
- NamespaceString nss,
- Role role,
- Date_t startTime,
- ServiceContext* serviceContext) {
+std::unique_ptr<ReshardingMetrics> ReshardingMetrics::makeInstance(UUID instanceId,
+ BSONObj shardKey,
+ NamespaceString nss,
+ Role role,
+ Date_t startTime,
+ ServiceContext* serviceContext) {
auto cumulativeMetrics =
ShardingDataTransformCumulativeMetrics::getForResharding(serviceContext);
- return std::make_unique<ReshardingMetricsNew>(instanceId,
- createOriginalCommand(nss, std::move(shardKey)),
- std::move(nss),
- role,
- startTime,
- serviceContext->getFastClockSource(),
- cumulativeMetrics);
+ return std::make_unique<ReshardingMetrics>(instanceId,
+ createOriginalCommand(nss, std::move(shardKey)),
+ std::move(nss),
+ role,
+ startTime,
+ serviceContext->getFastClockSource(),
+ cumulativeMetrics);
}
-StringData ReshardingMetricsNew::getStateString() const noexcept {
+StringData ReshardingMetrics::getStateString() const noexcept {
return stdx::visit(
visit_helper::Overloaded{
[](CoordinatorStateEnum state) { return CoordinatorState_serializer(state); },
@@ -133,7 +130,7 @@ StringData ReshardingMetricsNew::getStateString() const noexcept {
_state.load());
}
-void ReshardingMetricsNew::accumulateFrom(const ReshardingOplogApplierProgress& progressDoc) {
+void ReshardingMetrics::accumulateFrom(const ReshardingOplogApplierProgress& progressDoc) {
invariant(_role == Role::kRecipient);
accumulateValues(progressDoc.getInsertsApplied(),
@@ -142,7 +139,7 @@ void ReshardingMetricsNew::accumulateFrom(const ReshardingOplogApplierProgress&
progressDoc.getWritesToStashCollections());
}
-void ReshardingMetricsNew::restoreRecipientSpecificFields(
+void ReshardingMetrics::restoreRecipientSpecificFields(
const ReshardingRecipientDocument& document) {
auto metrics = document.getMetrics();
if (!metrics) {
@@ -161,14 +158,14 @@ void ReshardingMetricsNew::restoreRecipientSpecificFields(
restorePhaseDurationFields(document);
}
-void ReshardingMetricsNew::restoreCoordinatorSpecificFields(
+void ReshardingMetrics::restoreCoordinatorSpecificFields(
const ReshardingCoordinatorDocument& document) {
restorePhaseDurationFields(document);
}
-ReshardingMetricsNew::DonorState::DonorState(DonorStateEnum enumVal) : _enumVal(enumVal) {}
+ReshardingMetrics::DonorState::DonorState(DonorStateEnum enumVal) : _enumVal(enumVal) {}
-ShardingDataTransformCumulativeMetrics::DonorStateEnum ReshardingMetricsNew::DonorState::toMetrics()
+ShardingDataTransformCumulativeMetrics::DonorStateEnum ReshardingMetrics::DonorState::toMetrics()
const {
using MetricsEnum = ShardingDataTransformCumulativeMetrics::DonorStateEnum;
@@ -204,15 +201,14 @@ ShardingDataTransformCumulativeMetrics::DonorStateEnum ReshardingMetricsNew::Don
}
}
-DonorStateEnum ReshardingMetricsNew::DonorState::getState() const {
+DonorStateEnum ReshardingMetrics::DonorState::getState() const {
return _enumVal;
}
-ReshardingMetricsNew::RecipientState::RecipientState(RecipientStateEnum enumVal)
- : _enumVal(enumVal) {}
+ReshardingMetrics::RecipientState::RecipientState(RecipientStateEnum enumVal) : _enumVal(enumVal) {}
ShardingDataTransformCumulativeMetrics::RecipientStateEnum
-ReshardingMetricsNew::RecipientState::toMetrics() const {
+ReshardingMetrics::RecipientState::toMetrics() const {
using MetricsEnum = ShardingDataTransformCumulativeMetrics::RecipientStateEnum;
switch (_enumVal) {
@@ -248,15 +244,15 @@ ReshardingMetricsNew::RecipientState::toMetrics() const {
}
}
-RecipientStateEnum ReshardingMetricsNew::RecipientState::getState() const {
+RecipientStateEnum ReshardingMetrics::RecipientState::getState() const {
return _enumVal;
}
-ReshardingMetricsNew::CoordinatorState::CoordinatorState(CoordinatorStateEnum enumVal)
+ReshardingMetrics::CoordinatorState::CoordinatorState(CoordinatorStateEnum enumVal)
: _enumVal(enumVal) {}
ShardingDataTransformCumulativeMetrics::CoordinatorStateEnum
-ReshardingMetricsNew::CoordinatorState::toMetrics() const {
+ReshardingMetrics::CoordinatorState::toMetrics() const {
switch (_enumVal) {
case CoordinatorStateEnum::kUnused:
return ShardingDataTransformCumulativeMetrics::CoordinatorStateEnum::kUnused;
@@ -292,7 +288,7 @@ ReshardingMetricsNew::CoordinatorState::toMetrics() const {
}
}
-CoordinatorStateEnum ReshardingMetricsNew::CoordinatorState::getState() const {
+CoordinatorStateEnum ReshardingMetrics::CoordinatorState::getState() const {
return _enumVal;
}
diff --git a/src/mongo/db/s/resharding/resharding_metrics_new.h b/src/mongo/db/s/resharding/resharding_metrics.h
index b8e96698b0d..a1faa5a96da 100644
--- a/src/mongo/db/s/resharding/resharding_metrics_new.h
+++ b/src/mongo/db/s/resharding/resharding_metrics.h
@@ -38,7 +38,7 @@
namespace mongo {
-class ReshardingMetricsNew : public ShardingDataTransformInstanceMetrics {
+class ReshardingMetrics : public ShardingDataTransformInstanceMetrics {
public:
using State = stdx::variant<CoordinatorStateEnum, RecipientStateEnum, DonorStateEnum>;
@@ -78,24 +78,24 @@ public:
CoordinatorStateEnum _enumVal;
};
- ReshardingMetricsNew(UUID instanceId,
- BSONObj shardKey,
- NamespaceString nss,
- Role role,
- Date_t startTime,
- ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
- ReshardingMetricsNew(const CommonReshardingMetadata& metadata,
- Role role,
- ClockSource* clockSource,
- ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
-
- static std::unique_ptr<ReshardingMetricsNew> makeInstance(UUID instanceId,
- BSONObj shardKey,
- NamespaceString nss,
- Role role,
- Date_t startTime,
- ServiceContext* serviceContext);
+ ReshardingMetrics(UUID instanceId,
+ BSONObj shardKey,
+ NamespaceString nss,
+ Role role,
+ Date_t startTime,
+ ClockSource* clockSource,
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
+ ReshardingMetrics(const CommonReshardingMetadata& metadata,
+ Role role,
+ ClockSource* clockSource,
+ ShardingDataTransformCumulativeMetrics* cumulativeMetrics);
+
+ static std::unique_ptr<ReshardingMetrics> makeInstance(UUID instanceId,
+ BSONObj shardKey,
+ NamespaceString nss,
+ Role role,
+ Date_t startTime,
+ ServiceContext* serviceContext);
template <typename T>
static auto initializeFrom(const T& document,
@@ -103,10 +103,10 @@ public:
ShardingDataTransformCumulativeMetrics* cumulativeMetrics) {
static_assert(resharding_metrics::isStateDocument<T>);
auto result =
- std::make_unique<ReshardingMetricsNew>(document.getCommonReshardingMetadata(),
- resharding_metrics::getRoleForStateDocument<T>(),
- clockSource,
- cumulativeMetrics);
+ std::make_unique<ReshardingMetrics>(document.getCommonReshardingMetadata(),
+ resharding_metrics::getRoleForStateDocument<T>(),
+ clockSource,
+ cumulativeMetrics);
result->setState(resharding_metrics::getState(document));
result->restoreRoleSpecificFields(document);
return result;
diff --git a/src/mongo/db/s/resharding/resharding_metrics_new_test.cpp b/src/mongo/db/s/resharding/resharding_metrics_test.cpp
index 82bcba56d43..e57581cf8dd 100644
--- a/src/mongo/db/s/resharding/resharding_metrics_new_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_metrics_test.cpp
@@ -30,7 +30,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_service_test_helpers.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/sharding_data_transform_cumulative_metrics.h"
@@ -49,16 +49,16 @@ const auto kShardKey = BSON("newKey" << 1);
class ReshardingMetricsTest : public ShardingDataTransformMetricsTestFixture {
public:
- std::unique_ptr<ReshardingMetricsNew> createInstanceMetrics(ClockSource* clockSource,
- UUID instanceId = UUID::gen(),
- Role role = Role::kDonor) {
- return std::make_unique<ReshardingMetricsNew>(instanceId,
- BSON("y" << 1),
- kTestNamespace,
- role,
- clockSource->now(),
- clockSource,
- &_cumulativeMetrics);
+ std::unique_ptr<ReshardingMetrics> createInstanceMetrics(ClockSource* clockSource,
+ UUID instanceId = UUID::gen(),
+ Role role = Role::kDonor) {
+ return std::make_unique<ReshardingMetrics>(instanceId,
+ BSON("y" << 1),
+ kTestNamespace,
+ role,
+ clockSource->now(),
+ clockSource,
+ &_cumulativeMetrics);
}
const UUID& getSourceCollectionId() {
@@ -69,7 +69,7 @@ public:
template <typename T>
BSONObj getReportFromStateDocument(T document) {
auto metrics =
- ReshardingMetricsNew::initializeFrom(document, getClockSource(), &_cumulativeMetrics);
+ ReshardingMetrics::initializeFrom(document, getClockSource(), &_cumulativeMetrics);
return metrics->reportForCurrentOp();
}
@@ -98,12 +98,12 @@ public:
}
CommonReshardingMetadata createCommonReshardingMetadata(const UUID& operationId) {
- CommonReshardingMetadata metadata{
- operationId,
- kTestNamespace,
- getSourceCollectionId(),
- constructTemporaryReshardingNss(kTestNamespace.db(), getSourceCollectionId()),
- kShardKey};
+ CommonReshardingMetadata metadata{operationId,
+ kTestNamespace,
+ getSourceCollectionId(),
+ resharding::constructTemporaryReshardingNss(
+ kTestNamespace.db(), getSourceCollectionId()),
+ kShardKey};
metadata.setStartTime(getClockSource()->now() - kRunningTime);
return metadata;
}
@@ -169,7 +169,7 @@ public:
doc.setMetrics(metricsDoc);
auto metrics =
- ReshardingMetricsNew::initializeFrom(doc, getClockSource(), &_cumulativeMetrics);
+ ReshardingMetrics::initializeFrom(doc, getClockSource(), &_cumulativeMetrics);
clock->advance(kInterval);
auto report = metrics->reportForCurrentOp();
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 1478a3ec30c..9a643ef819e 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -252,7 +252,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and run a replacement update on the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -348,7 +348,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and update the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -430,7 +430,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and delete the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto nDeleted = deleteObjects(opCtx, stashColl, _myStashNss, idQuery, true /* justOne */);
invariant(nDeleted != 0);
@@ -543,7 +543,6 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(OperationContext* opCt
}
BSONObj ReshardingOplogApplicationRules::_queryStashCollById(OperationContext* opCtx,
- Database* db,
const CollectionPtr& coll,
const BSONObj& idQuery) const {
const IndexCatalog* indexCatalog = coll->getIndexCatalog();
@@ -552,7 +551,7 @@ BSONObj ReshardingOplogApplicationRules::_queryStashCollById(OperationContext* o
indexCatalog->haveIdIndex(opCtx));
BSONObj result;
- Helpers::findById(opCtx, db, _myStashNss.ns(), idQuery, result);
+ Helpers::findById(opCtx, _myStashNss.ns(), idQuery, result);
return result;
}
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h
index b8bd3942b40..4e00a62a269 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.h
@@ -96,7 +96,6 @@ private:
// Queries '_stashNss' using 'idQuery'.
BSONObj _queryStashCollById(OperationContext* opCtx,
- Database* db,
const CollectionPtr& coll,
const BSONObj& idQuery) const;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index cf449c4c00c..d9edf786371 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -271,7 +271,7 @@ NamespaceString ReshardingOplogApplier::ensureStashCollectionExists(
const UUID& existingUUID,
const ShardId& donorShardId,
const CollectionOptions& options) {
- auto nss = getLocalConflictStashNamespace(existingUUID, donorShardId);
+ auto nss = resharding::getLocalConflictStashNamespace(existingUUID, donorShardId);
resharding::data_copy::ensureCollectionExists(opCtx, nss, options);
return nss;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.h b/src/mongo/db/s/resharding/resharding_oplog_applier.h
index 56a7e9d3a0a..f1df65219cc 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.h
@@ -36,7 +36,7 @@
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/s/resharding/donor_oplog_id_gen.h"
#include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_application.h"
#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h"
#include "mongo/db/s/resharding/resharding_oplog_batch_applier.h"
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp
index 31bb6ca8dd6..7a474b7edf1 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp
@@ -34,8 +34,8 @@
namespace mongo {
ReshardingOplogApplierMetrics::ReshardingOplogApplierMetrics(
- ReshardingMetricsNew* metricsNew, boost::optional<ReshardingOplogApplierProgress> progressDoc)
- : _metricsNew(metricsNew) {
+ ReshardingMetrics* metrics, boost::optional<ReshardingOplogApplierProgress> progressDoc)
+ : _metrics(metrics) {
if (progressDoc) {
_insertsApplied = progressDoc->getInsertsApplied();
_updatesApplied = progressDoc->getUpdatesApplied();
@@ -46,35 +46,35 @@ ReshardingOplogApplierMetrics::ReshardingOplogApplierMetrics(
void ReshardingOplogApplierMetrics::onInsertApplied() {
_insertsApplied++;
- _metricsNew->onInsertApplied();
+ _metrics->onInsertApplied();
}
void ReshardingOplogApplierMetrics::onUpdateApplied() {
_updatesApplied++;
- _metricsNew->onUpdateApplied();
+ _metrics->onUpdateApplied();
}
void ReshardingOplogApplierMetrics::onDeleteApplied() {
_deletesApplied++;
- _metricsNew->onDeleteApplied();
+ _metrics->onDeleteApplied();
}
void ReshardingOplogApplierMetrics::onBatchRetrievedDuringOplogApplying(Milliseconds elapsed) {
- _metricsNew->onBatchRetrievedDuringOplogApplying(elapsed);
+ _metrics->onBatchRetrievedDuringOplogApplying(elapsed);
}
void ReshardingOplogApplierMetrics::onOplogLocalBatchApplied(Milliseconds elapsed) {
- _metricsNew->onOplogLocalBatchApplied(elapsed);
+ _metrics->onOplogLocalBatchApplied(elapsed);
}
void ReshardingOplogApplierMetrics::onOplogEntriesApplied(int64_t numEntries) {
_oplogEntriesApplied += numEntries;
- _metricsNew->onOplogEntriesApplied(numEntries);
+ _metrics->onOplogEntriesApplied(numEntries);
}
void ReshardingOplogApplierMetrics::onWriteToStashCollections() {
_writesToStashCollections++;
- _metricsNew->onWriteToStashedCollections();
+ _metrics->onWriteToStashedCollections();
}
int64_t ReshardingOplogApplierMetrics::getInsertsApplied() const {
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h
index 28830da1bfc..14347ce0b6b 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h
@@ -29,7 +29,7 @@
#pragma once
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h"
#include "mongo/util/duration.h"
@@ -40,7 +40,7 @@ namespace mongo {
*/
class ReshardingOplogApplierMetrics {
public:
- ReshardingOplogApplierMetrics(ReshardingMetricsNew* metricsNew,
+ ReshardingOplogApplierMetrics(ReshardingMetrics* metrics,
boost::optional<ReshardingOplogApplierProgress> progressDoc);
void onInsertApplied();
@@ -59,7 +59,7 @@ public:
int64_t getWritesToStashCollections() const;
private:
- ReshardingMetricsNew* _metricsNew;
+ ReshardingMetrics* _metrics;
int64_t _insertsApplied{0};
int64_t _updatesApplied{0};
int64_t _deletesApplied{0};
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp
index 44ea5efb842..7c04439713a 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp
@@ -42,14 +42,14 @@ namespace {
class ReshardingOplogApplierMetricsTest : public ShardingDataTransformMetricsTestFixture {
public:
- std::unique_ptr<ReshardingMetricsNew> createInstanceMetrics() {
- return std::make_unique<ReshardingMetricsNew>(UUID::gen(),
- kTestCommand,
- kTestNamespace,
- ReshardingMetricsNew::Role::kRecipient,
- getClockSource()->now(),
- getClockSource(),
- &_cumulativeMetrics);
+ std::unique_ptr<ReshardingMetrics> createInstanceMetrics() {
+ return std::make_unique<ReshardingMetrics>(UUID::gen(),
+ kTestCommand,
+ kTestNamespace,
+ ReshardingMetrics::Role::kRecipient,
+ getClockSource()->now(),
+ getClockSource(),
+ &_cumulativeMetrics);
}
};
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index 0e3f5a87504..d2313684ff9 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <fmt/format.h>
#include "mongo/db/cancelable_operation_context.h"
@@ -64,7 +61,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -158,13 +154,12 @@ public:
_cm = createChunkManagerForOriginalColl();
- _metrics =
- ReshardingMetricsNew::makeInstance(kCrudUUID,
- BSON("y" << 1),
- kCrudNs,
- ReshardingMetricsNew::Role::kRecipient,
- getServiceContext()->getFastClockSource()->now(),
- getServiceContext());
+ _metrics = ReshardingMetrics::makeInstance(kCrudUUID,
+ BSON("y" << 1),
+ kCrudNs,
+ ReshardingMetrics::Role::kRecipient,
+ getServiceContext()->getFastClockSource()->now(),
+ getServiceContext());
_applierMetrics =
std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), boost::none);
@@ -195,17 +190,17 @@ public:
kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << MINKEY),
BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
_sourceId.getShardId()},
ChunkType{
kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity()),
BSON(kOriginalShardKey << 0)},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
kOtherShardId},
ChunkType{kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << 0), BSON(kOriginalShardKey << MAXKEY)},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
_sourceId.getShardId()}};
auto rt = RoutingTableHistory::makeNew(kCrudNs,
@@ -363,7 +358,7 @@ protected:
boost::optional<ChunkManager> _cm;
const ReshardingSourceId _sourceId{UUID::gen(), kMyShardId};
- std::unique_ptr<ReshardingMetricsNew> _metrics;
+ std::unique_ptr<ReshardingMetrics> _metrics;
std::unique_ptr<ReshardingOplogApplierMetrics> _applierMetrics;
std::shared_ptr<executor::ThreadPoolTaskExecutor> _executor;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index f8af8d80998..ca596e65e16 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include <memory>
#include <vector>
@@ -46,7 +43,7 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/s/resharding/resharding_data_copy_util.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_application.h"
#include "mongo/db/s/resharding/resharding_oplog_batch_applier.h"
#include "mongo/db/s/resharding/resharding_oplog_session_application.h"
@@ -66,7 +63,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -111,15 +107,15 @@ public:
opCtx.get(), nss, CollectionOptions{});
}
- _metricsNew =
- ReshardingMetricsNew::makeInstance(UUID::gen(),
- BSON("y" << 1),
- _outputNss,
- ShardingDataTransformMetrics::Role::kRecipient,
- serviceContext->getFastClockSource()->now(),
- serviceContext);
+ _metrics =
+ ReshardingMetrics::makeInstance(UUID::gen(),
+ BSON("y" << 1),
+ _outputNss,
+ ShardingDataTransformMetrics::Role::kRecipient,
+ serviceContext->getFastClockSource()->now(),
+ serviceContext);
_applierMetrics =
- std::make_unique<ReshardingOplogApplierMetrics>(_metricsNew.get(), boost::none);
+ std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), boost::none);
_crudApplication = std::make_unique<ReshardingOplogApplicationRules>(
_outputNss,
std::vector<NamespaceString>{_myStashNss, _otherStashNss},
@@ -318,7 +314,7 @@ private:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -356,13 +352,15 @@ private:
const ShardId _otherDonorId{"otherDonorId"};
const NamespaceString _outputNss =
- constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
- const NamespaceString _myStashNss = getLocalConflictStashNamespace(_sourceUUID, _myDonorId);
+ resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
+ const NamespaceString _myStashNss =
+ resharding::getLocalConflictStashNamespace(_sourceUUID, _myDonorId);
const NamespaceString _otherStashNss =
- getLocalConflictStashNamespace(_sourceUUID, _otherDonorId);
- const NamespaceString _myOplogBufferNss = getLocalOplogBufferNamespace(_sourceUUID, _myDonorId);
+ resharding::getLocalConflictStashNamespace(_sourceUUID, _otherDonorId);
+ const NamespaceString _myOplogBufferNss =
+ resharding::getLocalOplogBufferNamespace(_sourceUUID, _myDonorId);
- std::unique_ptr<ReshardingMetricsNew> _metricsNew;
+ std::unique_ptr<ReshardingMetrics> _metrics;
std::unique_ptr<ReshardingOplogApplierMetrics> _applierMetrics;
std::unique_ptr<ReshardingOplogApplicationRules> _crudApplication;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index 4114100a5bc..9c09f5ebcf0 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog_raii.h"
@@ -47,7 +41,7 @@
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/s/resharding/resharding_data_copy_util.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_application.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/sharding_state.h"
@@ -112,15 +106,15 @@ public:
CollectionMetadata(makeChunkManagerForOutputCollection(), _myDonorId));
}
- _metricsNew =
- ReshardingMetricsNew::makeInstance(_sourceUUID,
- BSON(_newShardKey << 1),
- _outputNss,
- ShardingDataTransformMetrics::Role::kRecipient,
- serviceContext->getFastClockSource()->now(),
- serviceContext);
+ _metrics =
+ ReshardingMetrics::makeInstance(_sourceUUID,
+ BSON(_newShardKey << 1),
+ _outputNss,
+ ShardingDataTransformMetrics::Role::kRecipient,
+ serviceContext->getFastClockSource()->now(),
+ serviceContext);
_oplogApplierMetrics =
- std::make_unique<ReshardingOplogApplierMetrics>(_metricsNew.get(), boost::none);
+ std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), boost::none);
_applier = std::make_unique<ReshardingOplogApplicationRules>(
_outputNss,
std::vector<NamespaceString>{_myStashNss, _otherStashNss},
@@ -289,16 +283,16 @@ private:
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY),
BSON(_currentShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << -std::numeric_limits<double>::infinity()),
BSON(_currentShardKey << 0)},
- ChunkVersion(100, 1, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 1}),
_otherDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << 0), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 2, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 2}),
_myDonorId}};
return makeChunkManager(
@@ -311,7 +305,7 @@ private:
std::vector<ChunkType> chunks = {
ChunkType{outputUuid,
ChunkRange{BSON(_newShardKey << MINKEY), BSON(_newShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
return makeChunkManager(
@@ -335,13 +329,14 @@ private:
const ShardId _otherDonorId{"otherDonorId"};
const NamespaceString _outputNss =
- constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
- const NamespaceString _myStashNss = getLocalConflictStashNamespace(_sourceUUID, _myDonorId);
+ resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
+ const NamespaceString _myStashNss =
+ resharding::getLocalConflictStashNamespace(_sourceUUID, _myDonorId);
const NamespaceString _otherStashNss =
- getLocalConflictStashNamespace(_sourceUUID, _otherDonorId);
+ resharding::getLocalConflictStashNamespace(_sourceUUID, _otherDonorId);
std::unique_ptr<ReshardingOplogApplicationRules> _applier;
- std::unique_ptr<ReshardingMetricsNew> _metricsNew;
+ std::unique_ptr<ReshardingMetrics> _metrics;
std::unique_ptr<ReshardingOplogApplierMetrics> _oplogApplierMetrics;
};
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
index 41f87420e70..ac62a1cee4d 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp
@@ -45,7 +45,7 @@
#include "mongo/db/pipeline/aggregate_command_gen.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/read_concern_level.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/sharding_data_transform_cumulative_metrics.h"
#include "mongo/db/storage/write_unit_of_work.h"
@@ -272,9 +272,9 @@ AggregateCommandRequest ReshardingOplogFetcher::_makeAggregateCommandRequest(
auto opCtx = opCtxRaii.get();
auto expCtx = _makeExpressionContext(opCtx);
- auto serializedPipeline =
- createOplogFetchingPipelineForResharding(expCtx, _startAt, _collUUID, _recipientShard)
- ->serializeToBson();
+ auto serializedPipeline = resharding::createOplogFetchingPipelineForResharding(
+ expCtx, _startAt, _collUUID, _recipientShard)
+ ->serializeToBson();
AggregateCommandRequest aggRequest(NamespaceString::kRsOplogNamespace,
std::move(serializedPipeline));
@@ -326,8 +326,8 @@ bool ReshardingOplogFetcher::consume(Client* client,
[this, &batchesProcessed, &moreToCome, &opCtxRaii, &batchFetchTimer, factory](
const std::vector<BSONObj>& batch,
const boost::optional<BSONObj>& postBatchResumeToken) {
- _env->metricsNew()->onOplogEntriesFetched(batch.size(),
- Milliseconds(batchFetchTimer.millis()));
+ _env->metrics()->onOplogEntriesFetched(batch.size(),
+ Milliseconds(batchFetchTimer.millis()));
ThreadClient client(fmt::format("ReshardingFetcher-{}-{}",
_reshardingUUID.toString(),
@@ -354,7 +354,7 @@ bool ReshardingOplogFetcher::consume(Client* client,
uassertStatusOK(toWriteTo->insertDocument(opCtx, InsertStatement{doc}, nullptr));
wuow.commit();
- _env->metricsNew()->onLocalInsertDuringOplogFetching(
+ _env->metrics()->onLocalInsertDuringOplogFetching(
Milliseconds(insertTimer.millis()));
++_numOplogEntriesCopied;
@@ -368,7 +368,7 @@ bool ReshardingOplogFetcher::consume(Client* client,
_onInsertFuture = std::move(f);
}
- if (isFinalOplog(nextOplog, _reshardingUUID)) {
+ if (resharding::isFinalOplog(nextOplog, _reshardingUUID)) {
moreToCome = false;
return false;
}
@@ -392,7 +392,7 @@ bool ReshardingOplogFetcher::consume(Client* client,
oplog.set_id(Value(startAt.toBSON()));
oplog.setObject(BSON("msg"
<< "Latest oplog ts from donor's cursor response"));
- oplog.setObject2(BSON("type" << kReshardProgressMark));
+ oplog.setObject2(BSON("type" << resharding::kReshardProgressMark));
oplog.setOpTime(OplogSlot());
oplog.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now());
@@ -402,7 +402,7 @@ bool ReshardingOplogFetcher::consume(Client* client,
// Also include synthetic oplog in the fetched count so it can match up with the
// total oplog applied count in the end.
- _env->metricsNew()->onOplogEntriesFetched(1, Milliseconds(0));
+ _env->metrics()->onOplogEntriesFetched(1, Milliseconds(0));
auto [p, f] = makePromiseFuture<void>();
{
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
index 5772c6bdfaa..37f5090f0e2 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h
@@ -50,25 +50,25 @@
namespace mongo {
-class ReshardingMetricsNew;
+class ReshardingMetrics;
class ReshardingOplogFetcher : public resharding::OnInsertAwaitable {
public:
class Env {
public:
- Env(ServiceContext* service, ReshardingMetricsNew* metricsNew)
- : _service(service), _metricsNew(metricsNew) {}
+ Env(ServiceContext* service, ReshardingMetrics* metrics)
+ : _service(service), _metrics(metrics) {}
ServiceContext* service() const {
return _service;
}
- ReshardingMetricsNew* metricsNew() const {
- return _metricsNew;
+ ReshardingMetrics* metrics() const {
+ return _metrics;
}
private:
ServiceContext* _service;
- ReshardingMetricsNew* _metricsNew;
+ ReshardingMetrics* _metrics;
};
// Special value to use for startAt to indicate there are no more oplog entries needing to be
diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
index 17624acced9..68523519f41 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp
@@ -45,7 +45,7 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/repl/wait_for_majority_service.h"
#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_fetcher.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/db/s/shard_server_test_fixture.h"
@@ -98,13 +98,12 @@ public:
OldClientContext ctx(_opCtx, NamespaceString::kRsOplogNamespace.ns());
}
- _metrics =
- ReshardingMetricsNew::makeInstance(_reshardingUUID,
- BSON("y" << 1),
- NamespaceString{""},
- ReshardingMetricsNew::Role::kRecipient,
- getServiceContext()->getFastClockSource()->now(),
- getServiceContext());
+ _metrics = ReshardingMetrics::makeInstance(_reshardingUUID,
+ BSON("y" << 1),
+ NamespaceString{""},
+ ReshardingMetrics::Role::kRecipient,
+ getServiceContext()->getFastClockSource()->now(),
+ getServiceContext());
for (const auto& shardId : kTwoShardIdList) {
auto shardTargeter = RemoteCommandTargeterMock::get(
@@ -299,7 +298,8 @@ public:
BSON(
"msg" << fmt::format("Writes to {} are temporarily blocked for resharding.",
dataColl.getCollection()->ns().toString())),
- BSON("type" << kReshardFinalOpLogType << "reshardingUUID" << _reshardingUUID),
+ BSON("type" << resharding::kReshardFinalOpLogType << "reshardingUUID"
+ << _reshardingUUID),
boost::none,
boost::none,
boost::none,
@@ -343,7 +343,7 @@ protected:
Timestamp _fetchTimestamp;
ShardId _donorShard;
ShardId _destinationShard;
- std::unique_ptr<ReshardingMetricsNew> _metrics;
+ std::unique_ptr<ReshardingMetrics> _metrics;
private:
static HostAndPort makeHostAndPort(const ShardId& shardId) {
diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
index bd3d602f3a7..5be42b0c30d 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
@@ -705,6 +705,16 @@ TEST_F(ReshardingOplogSessionApplicationTest,
TxnNumber internalTxnTxnNumber = 1;
StmtId stmtId = 2;
+ // Make two in progress transactions so the one started by resharding must block.
+ {
+ auto newClientOwned = getServiceContext()->makeClient("newClient");
+ AlternativeClientRegion acr(newClientOwned);
+ auto newOpCtx = cc().makeOperationContext();
+ makeInProgressTxn(newOpCtx.get(),
+ makeLogicalSessionIdWithTxnNumberAndUUIDForTest(retryableWriteLsid,
+ retryableWriteTxnNumber),
+ internalTxnTxnNumber);
+ }
{
auto opCtx = makeOperationContext();
makeInProgressTxn(opCtx.get(), internalTxnLsid, internalTxnTxnNumber);
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
index baaf64fc5e3..5b66d19e8bd 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp
@@ -120,12 +120,12 @@ using resharding_metrics::getIntervalStartFieldName;
using DocT = ReshardingRecipientDocument;
const auto metricsPrefix = resharding_metrics::getMetricsPrefix<DocT>();
-void buildStateDocumentCloneMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetricsNew* metrics) {
+void buildStateDocumentCloneMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetrics* metrics) {
bob.append(getIntervalStartFieldName<DocT>(ReshardingRecipientMetrics::kDocumentCopyFieldName),
metrics->getCopyingBegin());
}
-void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetricsNew* metrics) {
+void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetrics* metrics) {
bob.append(getIntervalEndFieldName<DocT>(ReshardingRecipientMetrics::kDocumentCopyFieldName),
metrics->getCopyingEnd());
bob.append(
@@ -138,14 +138,14 @@ void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetr
}
void buildStateDocumentStrictConsistencyMetricsForUpdate(BSONObjBuilder& bob,
- ReshardingMetricsNew* metrics) {
+ ReshardingMetrics* metrics) {
bob.append(
getIntervalEndFieldName<DocT>(ReshardingRecipientMetrics::kOplogApplicationFieldName),
metrics->getApplyingEnd());
}
void buildStateDocumentMetricsForUpdate(BSONObjBuilder& bob,
- ReshardingMetricsNew* metrics,
+ ReshardingMetrics* metrics,
RecipientStateEnum newState) {
switch (newState) {
case RecipientStateEnum::kCloning:
@@ -162,8 +162,8 @@ void buildStateDocumentMetricsForUpdate(BSONObjBuilder& bob,
}
}
-ReshardingMetricsNew::RecipientState toMetricsState(RecipientStateEnum state) {
- return ReshardingMetricsNew::RecipientState(state);
+ReshardingMetrics::RecipientState toMetricsState(RecipientStateEnum state) {
+ return ReshardingMetrics::RecipientState(state);
}
} // namespace
@@ -190,7 +190,7 @@ ReshardingRecipientService::RecipientStateMachine::RecipientStateMachine(
ReshardingDataReplicationFactory dataReplicationFactory)
: repl::PrimaryOnlyService::TypedInstance<RecipientStateMachine>(),
_recipientService{recipientService},
- _metricsNew{ReshardingMetricsNew::initializeFrom(recipientDoc, getGlobalServiceContext())},
+ _metrics{ReshardingMetrics::initializeFrom(recipientDoc, getGlobalServiceContext())},
_metadata{recipientDoc.getCommonReshardingMetadata()},
_minimumOperationDuration{Milliseconds{recipientDoc.getMinimumOperationDurationMillis()}},
_recipientCtx{recipientDoc.getMutableState()},
@@ -219,7 +219,7 @@ ReshardingRecipientService::RecipientStateMachine::RecipientStateMachine(
}()) {
invariant(_externalState);
- _metricsNew->onStateTransition(boost::none, toMetricsState(_recipientCtx.getState()));
+ _metrics->onStateTransition(boost::none, toMetricsState(_recipientCtx.getState()));
}
ExecutorFuture<void>
@@ -370,7 +370,9 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::_finishR
if (!_isAlsoDonor) {
auto opCtx = factory.makeOperationContext(&cc());
- _externalState->clearFilteringMetadata(opCtx.get());
+ _externalState->clearFilteringMetadata(opCtx.get(),
+ _metadata.getSourceNss(),
+ _metadata.getTempReshardingNss());
RecoverableCriticalSectionService::get(opCtx.get())
->releaseRecoverableCriticalSection(
@@ -417,7 +419,13 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::_runMand
self = shared_from_this(),
outerStatus = status,
isCanceled = stepdownToken.isCanceled()](Status dataReplicationHaltStatus) {
- _metricsNew->onStateTransition(toMetricsState(_recipientCtx.getState()), boost::none);
+ _metrics->onStateTransition(toMetricsState(_recipientCtx.getState()), boost::none);
+
+ // Destroy metrics early so it's lifetime will not be tied to the lifetime of this
+ // state machine. This is because we have future callbacks copy shared pointers to this
+ // state machine that causes it to live longer than expected and potentially overlap
+ // with a newer instance when stepping up.
+ _metrics.reset();
// If the stepdownToken was triggered, it takes priority in order to make sure that
// the promise is set with an error that the coordinator can retry with. If it ran into
@@ -432,7 +440,6 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::_runMand
// replication errors because resharding is known to have failed already.
stdx::lock_guard<Latch> lk(_mutex);
ensureFulfilledPromise(lk, _completionPromise, outerStatus);
-
return outerStatus;
});
}
@@ -504,7 +511,7 @@ void ReshardingRecipientService::RecipientStateMachine::interrupt(Status status)
boost::optional<BSONObj> ReshardingRecipientService::RecipientStateMachine::reportForCurrentOp(
MongoProcessInterface::CurrentOpConnectionsMode,
MongoProcessInterface::CurrentOpSessionsMode) noexcept {
- return _metricsNew->reportForCurrentOp();
+ return _metrics->reportForCurrentOp();
}
void ReshardingRecipientService::RecipientStateMachine::onReshardingFieldsChanges(
@@ -550,8 +557,8 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::
ReshardingRecipientService::RecipientStateMachine::CloneDetails cloneDetails) {
_transitionToCreatingCollection(
cloneDetails, (*executor)->now() + _minimumOperationDuration, factory);
- _metricsNew->setDocumentsToCopyCounts(cloneDetails.approxDocumentsToCopy,
- cloneDetails.approxBytesToCopy);
+ _metrics->setDocumentsToCopyCounts(cloneDetails.approxDocumentsToCopy,
+ cloneDetails.approxBytesToCopy);
});
}
@@ -616,7 +623,7 @@ ReshardingRecipientService::RecipientStateMachine::_makeDataReplication(Operatio
for (const auto& donor : _donorShards) {
_applierMetricsMap.emplace(
donor.getShardId(),
- std::make_unique<ReshardingOplogApplierMetrics>(_metricsNew.get(), boost::none));
+ std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), boost::none));
}
} else {
invariant(_applierMetricsMap.size() == _donorShards.size(),
@@ -625,7 +632,7 @@ ReshardingRecipientService::RecipientStateMachine::_makeDataReplication(Operatio
}
return _dataReplicationFactory(opCtx,
- _metricsNew.get(),
+ _metrics.get(),
&_applierMetricsMap,
_metadata,
_donorShards,
@@ -726,8 +733,8 @@ ExecutorFuture<void> ReshardingRecipientService::RecipientStateMachine::
.then([this, &factory] {
auto opCtx = factory.makeOperationContext(&cc());
for (const auto& donor : _donorShards) {
- auto stashNss =
- getLocalConflictStashNamespace(_metadata.getSourceUUID(), donor.getShardId());
+ auto stashNss = resharding::getLocalConflictStashNamespace(
+ _metadata.getSourceUUID(), donor.getShardId());
AutoGetCollection stashColl(opCtx.get(), stashNss, MODE_IS);
uassert(5356800,
"Resharding completed with non-empty stash collections",
@@ -846,7 +853,7 @@ void ReshardingRecipientService::RecipientStateMachine::_transitionState(
_updateRecipientDocument(
std::move(newRecipientCtx), std::move(cloneDetails), std::move(configStartTime), factory);
- _metricsNew->onStateTransition(toMetricsState(oldState), toMetricsState(newState));
+ _metrics->onStateTransition(toMetricsState(oldState), toMetricsState(newState));
LOGV2_INFO(5279506,
"Transitioned resharding recipient state",
@@ -871,7 +878,7 @@ void ReshardingRecipientService::RecipientStateMachine::_transitionToCreatingCol
void ReshardingRecipientService::RecipientStateMachine::_transitionToCloning(
const CancelableOperationContextFactory& factory) {
- _metricsNew->onCopyingBegin();
+ _metrics->onCopyingBegin();
auto newRecipientCtx = _recipientCtx;
newRecipientCtx.setState(RecipientStateEnum::kCloning);
_transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory);
@@ -883,8 +890,8 @@ void ReshardingRecipientService::RecipientStateMachine::_transitionToApplying(
newRecipientCtx.setState(RecipientStateEnum::kApplying);
_transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory);
- _metricsNew->onCopyingEnd();
- _metricsNew->onApplyingBegin();
+ _metrics->onCopyingEnd();
+ _metrics->onApplyingBegin();
}
void ReshardingRecipientService::RecipientStateMachine::_transitionToStrictConsistency(
@@ -893,14 +900,14 @@ void ReshardingRecipientService::RecipientStateMachine::_transitionToStrictConsi
newRecipientCtx.setState(RecipientStateEnum::kStrictConsistency);
_transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory);
- _metricsNew->onApplyingEnd();
+ _metrics->onApplyingEnd();
}
void ReshardingRecipientService::RecipientStateMachine::_transitionToError(
Status abortReason, const CancelableOperationContextFactory& factory) {
auto newRecipientCtx = _recipientCtx;
newRecipientCtx.setState(RecipientStateEnum::kError);
- emplaceTruncatedAbortReasonIfExists(newRecipientCtx, abortReason);
+ resharding::emplaceTruncatedAbortReasonIfExists(newRecipientCtx, abortReason);
_transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory);
}
@@ -1052,8 +1059,7 @@ void ReshardingRecipientService::RecipientStateMachine::_updateRecipientDocument
*configStartTime);
}
- buildStateDocumentMetricsForUpdate(
- setBuilder, _metricsNew.get(), newRecipientCtx.getState());
+ buildStateDocumentMetricsForUpdate(setBuilder, _metrics.get(), newRecipientCtx.getState());
setBuilder.doneFast();
}
@@ -1156,7 +1162,7 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics(
// metrics section of the recipient state document and restored during metrics
// initialization. This is so that applied oplog entries that add or remove documents do
// not affect the cloning metrics.
- _metricsNew->restoreDocumentsCopied(documentCountCopied, documentBytesCopied);
+ _metrics->restoreDocumentsCopied(documentCountCopied, documentBytesCopied);
}
}
@@ -1167,10 +1173,10 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics(
progressDocList;
for (const auto& donor : _donorShards) {
{
- AutoGetCollection oplogBufferColl(
- opCtx.get(),
- getLocalOplogBufferNamespace(_metadata.getSourceUUID(), donor.getShardId()),
- MODE_IS);
+ AutoGetCollection oplogBufferColl(opCtx.get(),
+ resharding::getLocalOplogBufferNamespace(
+ _metadata.getSourceUUID(), donor.getShardId()),
+ MODE_IS);
if (oplogBufferColl) {
oplogEntriesFetched += oplogBufferColl->numRecords(opCtx.get());
}
@@ -1208,19 +1214,19 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics(
if (!progressDoc) {
_applierMetricsMap.emplace(
shardId,
- std::make_unique<ReshardingOplogApplierMetrics>(_metricsNew.get(), boost::none));
+ std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), boost::none));
continue;
}
- _metricsNew->accumulateFrom(*progressDoc);
+ _metrics->accumulateFrom(*progressDoc);
auto applierMetrics =
- std::make_unique<ReshardingOplogApplierMetrics>(_metricsNew.get(), progressDoc);
+ std::make_unique<ReshardingOplogApplierMetrics>(_metrics.get(), progressDoc);
_applierMetricsMap.emplace(shardId, std::move(applierMetrics));
}
- _metricsNew->restoreOplogEntriesFetched(oplogEntriesFetched);
- _metricsNew->restoreOplogEntriesApplied(oplogEntriesApplied);
+ _metrics->restoreOplogEntriesFetched(oplogEntriesFetched);
+ _metrics->restoreOplogEntriesApplied(oplogEntriesApplied);
}
CancellationToken ReshardingRecipientService::RecipientStateMachine::_initAbortSource(
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.h b/src/mongo/db/s/resharding/resharding_recipient_service.h
index fc41ba0e9ee..5dab490b96f 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service.h
+++ b/src/mongo/db/s/resharding/resharding_recipient_service.h
@@ -33,7 +33,7 @@
#include "mongo/db/s/resharding/recipient_document_gen.h"
#include "mongo/db/s/resharding/resharding_data_replication.h"
#include "mongo/db/s/resharding/resharding_future_util.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h"
#include "mongo/db/s/resharding/resharding_util.h"
#include "mongo/s/resharding/type_collection_fields_gen.h"
@@ -163,9 +163,9 @@ public:
return _metadata;
}
- inline const ReshardingMetricsNew& getMetrics() const {
- invariant(_metricsNew);
- return *_metricsNew;
+ inline const ReshardingMetrics& getMetrics() const {
+ invariant(_metrics);
+ return *_metrics;
}
boost::optional<BSONObj> reportForCurrentOp(
@@ -289,7 +289,7 @@ private:
// The primary-only service instance corresponding to the recipient instance. Not owned.
const ReshardingRecipientService* const _recipientService;
- std::unique_ptr<ReshardingMetricsNew> _metricsNew;
+ std::unique_ptr<ReshardingMetrics> _metrics;
ReshardingApplierMetricsMap _applierMetricsMap;
// The in-memory representation of the immutable portion of the document in
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
index 3e929815454..222a2c6f86a 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp
@@ -186,8 +186,12 @@ void RecipientStateMachineExternalStateImpl::updateCoordinatorDocument(Operation
}
}
-void RecipientStateMachineExternalStateImpl::clearFilteringMetadata(OperationContext* opCtx) {
- resharding::clearFilteringMetadata(opCtx, true /* scheduleAsyncRefresh */);
+void RecipientStateMachineExternalStateImpl::clearFilteringMetadata(
+ OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) {
+ stdx::unordered_set<NamespaceString> namespacesToRefresh{sourceNss, tempReshardingNss};
+ resharding::clearFilteringMetadata(opCtx, namespacesToRefresh, true /* scheduleAsyncRefresh */);
}
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
index c1597da7f7c..0a2749a66fc 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h
@@ -90,7 +90,9 @@ public:
const BSONObj& query,
const BSONObj& update) = 0;
- virtual void clearFilteringMetadata(OperationContext* opCtx) = 0;
+ virtual void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) = 0;
/**
* Creates the temporary resharding collection locally.
@@ -137,7 +139,9 @@ public:
const BSONObj& query,
const BSONObj& update) override;
- void clearFilteringMetadata(OperationContext* opCtx) override;
+ void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) override;
private:
template <typename Callable>
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
index c4e193e6897..62776bba466 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/unordered_fields_bsonobj_comparator.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -48,9 +45,6 @@
#include "mongo/s/database_version.h"
#include "mongo/s/stale_exception.h"
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
-
namespace mongo {
namespace {
@@ -168,7 +162,7 @@ public:
reshardingFields.setRecipientFields(recipientFields);
coll.setReshardingFields(reshardingFields);
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
@@ -193,7 +187,7 @@ public:
CollectionType coll(
origNss, epoch, timestamp, Date_t::now(), uuid, skey.getKeyPattern());
- ChunkVersion version(2, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {2, 0});
ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 78316aacca7..4e6a5489f71 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -83,7 +83,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_someDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
@@ -136,7 +136,9 @@ public:
const BSONObj& query,
const BSONObj& update) override {}
- void clearFilteringMetadata(OperationContext* opCtx) override {}
+ void clearFilteringMetadata(OperationContext* opCtx,
+ const NamespaceString& sourceNss,
+ const NamespaceString& tempReshardingNss) override {}
private:
RoutingTableHistoryValueHandle _makeStandaloneRoutingTableHistory(RoutingTableHistory rt) {
@@ -250,12 +252,12 @@ public:
NamespaceString sourceNss("sourcedb", "sourcecollection");
auto sourceUUID = UUID::gen();
- auto commonMetadata =
- CommonReshardingMetadata(UUID::gen(),
- sourceNss,
- sourceUUID,
- constructTemporaryReshardingNss(sourceNss.db(), sourceUUID),
- newShardKeyPattern());
+ auto commonMetadata = CommonReshardingMetadata(
+ UUID::gen(),
+ sourceNss,
+ sourceUUID,
+ resharding::constructTemporaryReshardingNss(sourceNss.db(), sourceUUID),
+ newShardKeyPattern());
commonMetadata.setStartTime(getServiceContext()->getFastClockSource()->now());
doc.setCommonReshardingMetadata(std::move(commonMetadata));
@@ -625,7 +627,8 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryOnReshardDoneCatchUp)
ErrorCodes::InterruptedDueToReplStateChange);
DBDirectClient client(opCtx.get());
- NamespaceString sourceNss = constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID());
+ NamespaceString sourceNss =
+ resharding::constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID());
FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace};
findRequest.setFilter(
@@ -671,7 +674,8 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryForImplicitShardColle
ErrorCodes::InterruptedDueToReplStateChange);
DBDirectClient client(opCtx.get());
- NamespaceString sourceNss = constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID());
+ NamespaceString sourceNss =
+ resharding::constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID());
FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace};
findRequest.setFilter(
@@ -739,7 +743,7 @@ TEST_F(ReshardingRecipientServiceTest, TruncatesXLErrorOnRecipientDocument) {
// to the primitive truncation algorithm - Check that the total size is less than
// kReshardErrorMaxBytes + a couple additional bytes to provide a buffer for the field
// name sizes.
- int maxReshardErrorBytesCeiling = kReshardErrorMaxBytes + 200;
+ int maxReshardErrorBytesCeiling = resharding::kReshardErrorMaxBytes + 200;
ASSERT_LT(persistedAbortReasonBSON->objsize(), maxReshardErrorBytesCeiling);
ASSERT_EQ(persistedAbortReasonBSON->getIntField("code"),
ErrorCodes::ReshardCollectionTruncatedError);
@@ -815,7 +819,8 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUp) {
for (const auto& donor : donorShards) {
// Setup oplogBuffer collection.
ReshardingDonorOplogId donorOplogId{{20, i}, {19, 0}};
- insertFn(getLocalOplogBufferNamespace(doc.getSourceUUID(), donor.getShardId()),
+ insertFn(resharding::getLocalOplogBufferNamespace(doc.getSourceUUID(),
+ donor.getShardId()),
InsertStatement{BSON("_id" << donorOplogId.toBSON())});
++i;
@@ -923,7 +928,7 @@ TEST_F(ReshardingRecipientServiceTest, RestoreMetricsAfterStepUpWithMissingProgr
// Setup oplogBuffer collection.
ReshardingDonorOplogId donorOplogId{{20, i}, {19, 0}};
- insertFn(getLocalOplogBufferNamespace(doc.getSourceUUID(), donor.getShardId()),
+ insertFn(resharding::getLocalOplogBufferNamespace(doc.getSourceUUID(), donor.getShardId()),
InsertStatement{BSON("_id" << donorOplogId.toBSON())});
// Setup reshardingApplierProgress collection.
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index 478c0272c7d..1f074af6f75 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -35,7 +35,6 @@
#include <vector>
#include "mongo/bson/bsonmisc.h"
-#include "mongo/client/query.h"
#include "mongo/client/read_preference.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/d_concurrency.h"
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 1e22bc5a4a7..24045678550 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -1004,6 +1004,16 @@ TEST_F(ReshardingTxnClonerTest,
retryableWriteTxnNumber);
TxnNumber internalTxnTxnNumber = 1;
+ // Make two in progress transactions so the one started by resharding must block.
+ {
+ auto newClientOwned = getServiceContext()->makeClient("newClient");
+ AlternativeClientRegion acr(newClientOwned);
+ auto newOpCtx = cc().makeOperationContext();
+ makeInProgressTxn(newOpCtx.get(),
+ makeLogicalSessionIdWithTxnNumberAndUUIDForTest(retryableWriteLsid,
+ retryableWriteTxnNumber),
+ internalTxnTxnNumber);
+ }
makeInProgressTxn(operationContext(), internalTxnLsid, internalTxnTxnNumber);
auto lastOplogTs = getLatestOplogTimestamp(operationContext());
@@ -1096,6 +1106,16 @@ TEST_F(ReshardingTxnClonerTest, CancelableWhileWaitingOnInProgressInternalTxnFor
retryableWriteTxnNumber);
TxnNumber internalTxnTxnNumber = 1;
+ // Make two in progress transactions so the one started by resharding must block.
+ {
+ auto newClientOwned = getServiceContext()->makeClient("newClient");
+ AlternativeClientRegion acr(newClientOwned);
+ auto newOpCtx = cc().makeOperationContext();
+ makeInProgressTxn(newOpCtx.get(),
+ makeLogicalSessionIdWithTxnNumberAndUUIDForTest(retryableWriteLsid,
+ retryableWriteTxnNumber),
+ internalTxnTxnNumber);
+ }
makeInProgressTxn(operationContext(), internalTxnLsid, internalTxnTxnNumber);
ON_BLOCK_EXIT([&] { abortTxn(operationContext(), internalTxnLsid, internalTxnTxnNumber); });
diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp
index d9a04064d3c..873fc7ce5d5 100644
--- a/src/mongo/db/s/resharding/resharding_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_util.cpp
@@ -48,7 +48,7 @@
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/resharding/document_source_resharding_add_resume_id.h"
#include "mongo/db/s/resharding/document_source_resharding_iterate_transaction.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/storage/write_unit_of_work.h"
#include "mongo/logv2/log.h"
@@ -63,6 +63,7 @@
namespace mongo {
+namespace resharding {
namespace {
/**
@@ -414,4 +415,5 @@ boost::optional<Milliseconds> estimateRemainingRecipientTime(bool applyingBegan,
return {};
}
+} // namespace resharding
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_util.h b/src/mongo/db/s/resharding/resharding_util.h
index 194381e7e78..0d8aacbe3f7 100644
--- a/src/mongo/db/s/resharding/resharding_util.h
+++ b/src/mongo/db/s/resharding/resharding_util.h
@@ -50,6 +50,7 @@
#include "mongo/util/str.h"
namespace mongo {
+namespace resharding {
constexpr auto kReshardFinalOpLogType = "reshardFinalOp"_sd;
constexpr auto kReshardProgressMark = "reshardProgressMark"_sd;
@@ -324,5 +325,6 @@ std::vector<std::shared_ptr<Instance>> getReshardingStateMachines(OperationConte
return result;
}
+} // namespace resharding
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_util_test.cpp b/src/mongo/db/s/resharding/resharding_util_test.cpp
index 5fd40fd86b7..12e5e15ddcd 100644
--- a/src/mongo/db/s/resharding/resharding_util_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_util_test.cpp
@@ -52,6 +52,7 @@
namespace mongo {
+namespace resharding {
namespace {
class ReshardingUtilTest : public ConfigServerTestFixture {
@@ -309,4 +310,7 @@ TEST_F(ReshardingTxnCloningPipelineTest, TxnPipelineAfterID) {
}
} // namespace
+
+} // namespace resharding
+
} // namespace mongo
diff --git a/src/mongo/db/s/resharding_test_commands.cpp b/src/mongo/db/s/resharding_test_commands.cpp
index 61fa835829f..74688928784 100644
--- a/src/mongo/db/s/resharding_test_commands.cpp
+++ b/src/mongo/db/s/resharding_test_commands.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/resharding/resharding_collection_cloner.h"
-#include "mongo/db/s/resharding/resharding_metrics_new.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
#include "mongo/db/s/resharding_test_commands_gen.h"
#include "mongo/db/vector_clock_metadata_hook.h"
#include "mongo/executor/network_interface_factory.h"
@@ -79,11 +79,11 @@ public:
}
};
- auto metrics = ReshardingMetricsNew::makeInstance(
+ auto metrics = ReshardingMetrics::makeInstance(
request().getUuid(),
request().getShardKey(),
ns(),
- ReshardingMetricsNew::Role::kRecipient,
+ ReshardingMetrics::Role::kRecipient,
opCtx->getServiceContext()->getFastClockSource()->now(),
opCtx->getServiceContext());
diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp
index 60c72dcab47..4376166a365 100644
--- a/src/mongo/db/s/sessions_collection_config_server.cpp
+++ b/src/mongo/db/s/sessions_collection_config_server.cpp
@@ -125,8 +125,10 @@ void SessionsCollectionConfigServer::setupSessionsCollection(OperationContext* o
auto filterQuery =
BSON("_id" << NamespaceString::kLogicalSessionsNamespace.ns()
<< CollectionType::kMaxChunkSizeBytesFieldName << BSON("$exists" << false));
- auto updateQuery =
- BSON("$set" << BSON(CollectionType::kMaxChunkSizeBytesFieldName << kMaxChunkSizeBytes));
+ auto updateQuery = BSON("$set" << BSON(CollectionType::kMaxChunkSizeBytesFieldName
+ << kMaxChunkSizeBytes
+ << CollectionType::kNoAutoSplitFieldName << true));
+
uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
CollectionType::ConfigNS,
diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.cpp b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
index 3395aa7f465..d8cb15afb2e 100644
--- a/src/mongo/db/s/set_allow_migrations_coordinator.cpp
+++ b/src/mongo/db/s/set_allow_migrations_coordinator.cpp
@@ -50,14 +50,6 @@ bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) {
}
}
-SetAllowMigrationsCoordinator::SetAllowMigrationsCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState)
- : ShardingDDLCoordinator(service, initialState),
- _doc(SetAllowMigrationsCoordinatorDocument::parse(
- IDLParserErrorContext("SetAllowMigrationsCoordinatorDocument"), initialState)),
- _allowMigrations(_doc.getAllowMigrations()) {}
-
-
void SetAllowMigrationsCoordinator::checkIfOptionsConflict(const BSONObj& doc) const {
// If we have two set allow migrations on the same namespace, then the arguments must be the
// same.
@@ -72,23 +64,9 @@ void SetAllowMigrationsCoordinator::checkIfOptionsConflict(const BSONObj& doc) c
otherDoc.getSetAllowMigrationsRequest().toBSON()));
}
-boost::optional<BSONObj> SetAllowMigrationsCoordinator::reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept {
- BSONObjBuilder cmdBob;
- if (const auto& optComment = getForwardableOpMetadata().getComment()) {
- cmdBob.append(optComment.get().firstElement());
- }
- cmdBob.appendElements(_doc.getSetAllowMigrationsRequest().toBSON());
-
- BSONObjBuilder bob;
- bob.append("type", "op");
- bob.append("desc", "SetAllowMigrationsCoordinator");
- bob.append("op", "command");
- bob.append("ns", nss().toString());
- bob.append("command", cmdBob.obj());
- bob.append("active", true);
- return bob.obj();
+void SetAllowMigrationsCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {
+ stdx::lock_guard lk{_docMutex};
+ cmdInfoBuilder->appendElements(_doc.getSetAllowMigrationsRequest().toBSON());
}
ExecutorFuture<void> SetAllowMigrationsCoordinator::_runImpl(
diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.h b/src/mongo/db/s/set_allow_migrations_coordinator.h
index cf8e14348d7..78d2e03696a 100644
--- a/src/mongo/db/s/set_allow_migrations_coordinator.h
+++ b/src/mongo/db/s/set_allow_migrations_coordinator.h
@@ -38,31 +38,27 @@
namespace mongo {
-class SetAllowMigrationsCoordinator final : public ShardingDDLCoordinator {
+class SetAllowMigrationsCoordinator final
+ : public ShardingDDLCoordinatorImpl<SetAllowMigrationsCoordinatorDocument> {
public:
SetAllowMigrationsCoordinator(ShardingDDLCoordinatorService* service,
- const BSONObj& initialState);
+ const BSONObj& initialState)
+ : ShardingDDLCoordinatorImpl(service, "SetAllowMigrationsCoordinator", initialState),
+ _allowMigrations(_doc.getAllowMigrations()) {}
void checkIfOptionsConflict(const BSONObj& coorDoc) const override;
- boost::optional<BSONObj> reportForCurrentOp(
- MongoProcessInterface::CurrentOpConnectionsMode connMode,
- MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override;
+ void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override;
bool canAlwaysStartWhenUserWritesAreDisabled() const override {
return true;
}
private:
- ShardingDDLCoordinatorMetadata const& metadata() const override {
- return _doc.getShardingDDLCoordinatorMetadata();
- }
-
ExecutorFuture<void> _runImpl(std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept override;
- SetAllowMigrationsCoordinatorDocument _doc;
const bool _allowMigrations;
};
} // namespace mongo
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
deleted file mode 100644
index 0c8e2da5037..00000000000
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/auth/privilege.h"
-#include "mongo/db/catalog/collection_catalog.h"
-#include "mongo/db/catalog_raii.h"
-#include "mongo/db/client.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/not_primary_error_tracker.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/s/collection_sharding_runtime.h"
-#include "mongo/db/s/shard_filtering_metadata_refresh.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/logv2/log.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/grid.h"
-#include "mongo/s/request_types/set_shard_version_request.h"
-#include "mongo/util/str.h"
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-
-namespace mongo {
-namespace {
-
-class SetShardVersion : public ErrmsgCommandDeprecated {
-public:
- SetShardVersion() : ErrmsgCommandDeprecated("setShardVersion") {}
-
- std::string help() const override {
- return "internal";
- }
-
- bool adminOnly() const override {
- return true;
- }
-
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
- return AllowedOnSecondary::kAlways;
- }
-
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
- return false;
- }
-
- void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) const override {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- bool errmsgRun(OperationContext* opCtx,
- const std::string&,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& result) {
- uassert(ErrorCodes::IllegalOperation,
- "can't issue setShardVersion from 'eval'",
- !opCtx->getClient()->isInDirectClient());
-
- auto const shardingState = ShardingState::get(opCtx);
- uassertStatusOK(shardingState->canAcceptShardedCommands());
-
- // Steps
- // 1. Set the `authoritative` variable from the command object.
- //
- // 2. Validate all command parameters against the info in our ShardingState, and return an
- // error if they do not match.
- //
- // 3. If the sent shardVersion is compatible with our shardVersion, return.
- //
- // 4. If the sent shardVersion indicates a drop, jump to step 6.
- //
- // 5. If the sent shardVersion is staler than ours, return a stale config error.
- //
- // 6. If the sent shardVersion is newer than ours (or indicates a drop), reload our metadata
- // and compare the sent shardVersion with what we reloaded. If the sent shardVersion is
- // staler than what we reloaded, return a stale config error, as in step 5.
-
- // Step 1
-
- Client* client = opCtx->getClient();
- NotPrimaryErrorTracker::get(client).disable();
-
- const bool authoritative = cmdObj.getBoolField("authoritative");
-
- // Step 2
-
- // Validate namespace parameter.
- const NamespaceString nss(cmdObj["setShardVersion"].String());
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid namespace " << nss.ns(),
- nss.isValid());
-
- // Validate chunk version parameter.
- auto requestedVersion = ChunkVersion::parse(cmdObj[SetShardVersionRequest::kVersion]);
-
- // Step 3
-
- {
- boost::optional<AutoGetDb> autoDb;
- autoDb.emplace(opCtx, nss.db(), MODE_IS);
-
- // Secondary nodes cannot support set shard version
- uassert(ErrorCodes::NotWritablePrimary,
- str::stream() << "setShardVersion with collection version is only supported "
- "against primary nodes, but it was received for namespace "
- << nss.ns(),
- repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx,
- nss.db()));
-
- boost::optional<Lock::CollectionLock> collLock;
- collLock.emplace(opCtx, nss, MODE_IS);
-
- // Views do not require a shard version check. We do not care about invalid system views
- // for this check, only to validate if a view already exists for this namespace.
- if (autoDb->getDb() &&
- !CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss) &&
- CollectionCatalog::get(opCtx)->lookupViewWithoutValidatingDurable(opCtx, nss)) {
- return true;
- }
-
- auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- const ChunkVersion collectionShardVersion = [&] {
- auto optMetadata = csr->getCurrentMetadataIfKnown();
- return (optMetadata && optMetadata->isSharded()) ? optMetadata->getShardVersion()
- : ChunkVersion::UNSHARDED();
- }();
-
- if (requestedVersion.isWriteCompatibleWith(collectionShardVersion)) {
- return true;
- }
-
- // Step 4
-
- const bool isDropRequested =
- !requestedVersion.isSet() && collectionShardVersion.isSet();
-
- if (isDropRequested) {
- if (!authoritative) {
- result.appendBool("need_authoritative", true);
- result.append("ns", nss.ns());
- collectionShardVersion.appendLegacyWithField(&result, "globalVersion");
- errmsg = "dropping needs to be authoritative";
- return false;
- }
-
- // Fall through to metadata reload below
- } else {
- // Not Dropping
-
- // Step 5
-
- const auto kTenSeconds = Milliseconds(10000);
-
- if (requestedVersion.isOlderThan(collectionShardVersion)) {
- auto critSecSignal = csr->getCriticalSectionSignal(
- opCtx, ShardingMigrationCriticalSection::kWrite);
- if (critSecSignal) {
- collLock.reset();
- autoDb.reset();
- LOGV2(22056, "waiting till out of critical section");
- auto deadline = opCtx->getServiceContext()->getFastClockSource()->now() +
- std::min(opCtx->getRemainingMaxTimeMillis(), kTenSeconds);
-
- opCtx->runWithDeadline(deadline, ErrorCodes::ExceededTimeLimit, [&] {
- critSecSignal->wait(opCtx);
- });
- }
-
- errmsg = str::stream() << "shard global version for collection is higher "
- << "than trying to set to '" << nss.ns() << "'";
- result.append("ns", nss.ns());
- requestedVersion.appendLegacyWithField(&result, "version");
- collectionShardVersion.appendLegacyWithField(&result, "globalVersion");
- result.appendBool("reloadConfig", true);
- return false;
- }
-
- if (!collectionShardVersion.isSet() && !authoritative) {
- // Needed b/c when the last chunk is moved off a shard, the version gets reset
- // to zero, which should require a reload.
- auto critSecSignal = csr->getCriticalSectionSignal(
- opCtx, ShardingMigrationCriticalSection::kWrite);
- if (critSecSignal) {
- collLock.reset();
- autoDb.reset();
- LOGV2(22057, "waiting till out of critical section");
-
- auto deadline = opCtx->getServiceContext()->getFastClockSource()->now() +
- std::min(opCtx->getRemainingMaxTimeMillis(), kTenSeconds);
-
- opCtx->runWithDeadline(deadline, ErrorCodes::ExceededTimeLimit, [&] {
- critSecSignal->wait(opCtx);
- });
- }
-
- // need authoritative for first look
- result.append("ns", nss.ns());
- result.appendBool("need_authoritative", true);
- errmsg = str::stream() << "first time for collection '" << nss.ns() << "'";
- return false;
- }
-
- // Fall through to metadata reload below
- }
- }
-
- // Step 6
-
- const auto status = [&] {
- try {
- // TODO (SERVER-50812) remove this if-else: just call onShardVersionMismatch
- if (requestedVersion == ChunkVersion::UNSHARDED()) {
- forceShardFilteringMetadataRefresh(opCtx, nss);
- } else {
- onShardVersionMismatch(opCtx, nss, requestedVersion);
- }
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
- return Status::OK();
- }();
-
- {
- // Avoid using AutoGetCollection() as it returns the InvalidViewDefinition error code
- // if an invalid view is in the 'system.views' collection.
- AutoGetDb autoDb(opCtx, nss.db(), MODE_IS);
- Lock::CollectionLock collLock(opCtx, nss, MODE_IS);
-
- const ChunkVersion currVersion = [&] {
- auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- auto optMetadata = csr->getCurrentMetadataIfKnown();
- return (optMetadata && optMetadata->isSharded()) ? optMetadata->getShardVersion()
- : ChunkVersion::UNSHARDED();
- }();
-
- if (!status.isOK()) {
- // The reload itself was interrupted or confused here
- LOGV2_WARNING(
- 22058,
- "Could not refresh metadata for the namespace {namespace} with the requested "
- "shard version {requestedShardVersion}; the current shard version is "
- "{currentShardVersion}: {error}",
- "Could not refresh metadata",
- "namespace"_attr = nss.ns(),
- "requestedShardVersion"_attr = requestedVersion,
- "currentShardVersion"_attr = currVersion,
- "error"_attr = redact(status));
-
- result.append("ns", nss.ns());
- status.serializeErrorToBSON(&result);
- requestedVersion.appendLegacyWithField(&result, "version");
- currVersion.appendLegacyWithField(&result, "globalVersion");
- result.appendBool("reloadConfig", true);
-
- return false;
- } else if (!requestedVersion.isWriteCompatibleWith(currVersion)) {
- // We reloaded a version that doesn't match the version mongos was trying to
- // set.
- static Occasionally sampler;
- if (sampler.tick()) {
- LOGV2_WARNING(
- 22059,
- "Requested shard version differs from the authoritative (current) shard "
- "version for the namespace {namespace}; the requested version is "
- "{requestedShardVersion}, but the current version is "
- "{currentShardVersion}",
- "Requested shard version differs from the authoritative (current) shard "
- "version for this namespace",
- "namespace"_attr = nss.ns(),
- "requestedShardVersion"_attr = requestedVersion,
- "currentShardVersion"_attr = currVersion);
- }
-
- // WARNING: the exact fields below are important for compatibility with mongos
- // version reload.
-
- result.append("ns", nss.ns());
- currVersion.appendLegacyWithField(&result, "globalVersion");
-
- // If this was a reset of a collection or the last chunk moved out, inform mongos to
- // do a full reload.
- if (currVersion.epoch() != requestedVersion.epoch() || !currVersion.isSet()) {
- result.appendBool("reloadConfig", true);
- // Zero-version also needed to trigger full mongos reload, sadly
- // TODO: Make this saner, and less impactful (full reload on last chunk is bad)
- ChunkVersion::UNSHARDED().appendLegacyWithField(&result, "version");
- // For debugging
- requestedVersion.appendLegacyWithField(&result, "origVersion");
- } else {
- requestedVersion.appendLegacyWithField(&result, "version");
- }
-
- return false;
- }
- }
-
- return true;
- }
-
-} setShardVersionCmd;
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/shard_key_index_util.cpp b/src/mongo/db/s/shard_key_index_util.cpp
index 9b3b6371a4a..1cdd4f99008 100644
--- a/src/mongo/db/s/shard_key_index_util.cpp
+++ b/src/mongo/db/s/shard_key_index_util.cpp
@@ -48,7 +48,8 @@ boost::optional<ShardKeyIndex> _findShardKeyPrefixedIndex(
const IndexCatalog* indexCatalog,
const boost::optional<std::string>& excludeName,
const BSONObj& shardKey,
- bool requireSingleKey) {
+ bool requireSingleKey,
+ std::string* errMsg = nullptr) {
if (collection->isClustered() &&
clustered_util::matchesClusterKey(shardKey, collection->getClusteredInfo())) {
auto clusteredIndexSpec = collection->getClusteredInfo()->getIndexSpec();
@@ -67,7 +68,8 @@ boost::optional<ShardKeyIndex> _findShardKeyPrefixedIndex(
continue;
}
- if (isCompatibleWithShardKey(opCtx, collection, indexEntry, shardKey, requireSingleKey)) {
+ if (isCompatibleWithShardKey(
+ opCtx, collection, indexEntry, shardKey, requireSingleKey, errMsg)) {
if (!indexEntry->isMultikey(opCtx, collection)) {
return ShardKeyIndex(indexDescriptor);
}
@@ -108,26 +110,72 @@ bool isCompatibleWithShardKey(OperationContext* opCtx,
const CollectionPtr& collection,
const IndexCatalogEntry* indexEntry,
const BSONObj& shardKey,
- bool requireSingleKey) {
+ bool requireSingleKey,
+ std::string* errMsg) {
+ // Return a descriptive error for each index that shares a prefix with shardKey but
+ // cannot be used for sharding.
+ const int kErrorPartial = 0x01;
+ const int kErrorSparse = 0x02;
+ const int kErrorMultikey = 0x04;
+ const int kErrorCollation = 0x08;
+ const int kErrorNotPrefix = 0x10;
+ int reasons = 0;
+
auto desc = indexEntry->descriptor();
bool hasSimpleCollation = desc->collation().isEmpty();
- if (desc->isPartial() || desc->isSparse()) {
- return false;
+ if (desc->isPartial()) {
+ reasons |= kErrorPartial;
+ }
+
+ if (desc->isSparse()) {
+ reasons |= kErrorSparse;
}
if (!shardKey.isPrefixOf(desc->keyPattern(), SimpleBSONElementComparator::kInstance)) {
- return false;
+ reasons |= kErrorNotPrefix;
}
- if (!indexEntry->isMultikey(opCtx, collection) && hasSimpleCollation) {
- return true;
+ if (reasons == 0) { // that is, not partial index, not sparse, and not prefix, then:
+ if (!indexEntry->isMultikey(opCtx, collection)) {
+ if (hasSimpleCollation) {
+ return true;
+ }
+ } else {
+ reasons |= kErrorMultikey;
+ }
+ if (!requireSingleKey && hasSimpleCollation) {
+ return true;
+ }
}
- if (!requireSingleKey && hasSimpleCollation) {
- return true;
+ if (!hasSimpleCollation) {
+ reasons |= kErrorCollation;
}
+ if (errMsg && reasons != 0) {
+ std::string errors = "Index " + indexEntry->descriptor()->indexName() +
+ " cannot be used for sharding because:";
+ if (reasons & kErrorPartial) {
+ errors += " Index key is partial.";
+ }
+ if (reasons & kErrorSparse) {
+ errors += " Index key is sparse.";
+ }
+ if (reasons & kErrorMultikey) {
+ errors += " Index key is multikey.";
+ }
+ if (reasons & kErrorCollation) {
+ errors += " Index has a non-simple collation.";
+ }
+ if (reasons & kErrorNotPrefix) {
+ errors += " Shard key is not a prefix of index key.";
+ }
+ if (!errMsg->empty()) {
+ *errMsg += "\n";
+ }
+ *errMsg += errors;
+ }
return false;
}
@@ -145,9 +193,10 @@ boost::optional<ShardKeyIndex> findShardKeyPrefixedIndex(OperationContext* opCtx
const CollectionPtr& collection,
const IndexCatalog* indexCatalog,
const BSONObj& shardKey,
- bool requireSingleKey) {
+ bool requireSingleKey,
+ std::string* errMsg) {
return _findShardKeyPrefixedIndex(
- opCtx, collection, indexCatalog, boost::none, shardKey, requireSingleKey);
+ opCtx, collection, indexCatalog, boost::none, shardKey, requireSingleKey, errMsg);
}
} // namespace mongo
diff --git a/src/mongo/db/s/shard_key_index_util.h b/src/mongo/db/s/shard_key_index_util.h
index 515523b0803..c474363d8ac 100644
--- a/src/mongo/db/s/shard_key_index_util.h
+++ b/src/mongo/db/s/shard_key_index_util.h
@@ -67,12 +67,16 @@ private:
/**
* Returns true if the given index is compatible with the shard key pattern.
+ *
+ * If return value is false and errMsg is non-null, the reasons that the existing index is
+ * incompatible will be appended to errMsg.
*/
bool isCompatibleWithShardKey(OperationContext* opCtx,
const CollectionPtr& collection,
const IndexCatalogEntry* indexEntry,
const BSONObj& shardKey,
- bool requireSingleKey);
+ bool requireSingleKey,
+ std::string* errMsg = nullptr);
/**
* Returns an index suitable for shard key range scans if it exists.
@@ -89,7 +93,8 @@ boost::optional<ShardKeyIndex> findShardKeyPrefixedIndex(OperationContext* opCtx
const CollectionPtr& collection,
const IndexCatalog* indexCatalog,
const BSONObj& shardKey,
- bool requireSingleKey);
+ bool requireSingleKey,
+ std::string* errMsg = nullptr);
/**
* Returns true if the given index name is the last remaining index that is compatible with the
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 5a0acaeb2a4..a0363a907d4 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -107,7 +107,8 @@ bool validShardKeyIndexExists(OperationContext* opCtx,
const ShardKeyPattern& shardKeyPattern,
const boost::optional<BSONObj>& defaultCollation,
bool requiresUnique,
- const ShardKeyValidationBehaviors& behaviors) {
+ const ShardKeyValidationBehaviors& behaviors,
+ std::string* errMsg) {
auto indexes = behaviors.loadIndexes(nss);
// 1. Verify consistency with existing unique indexes
@@ -124,7 +125,9 @@ bool validShardKeyIndexExists(OperationContext* opCtx,
// 2. Check for a useful index
bool hasUsefulIndexForKey = false;
+ std::string allReasons;
for (const auto& idx : indexes) {
+ std::string reasons;
BSONObj currentKey = idx["key"].embeddedObject();
// Check 2.i. and 2.ii.
if (!idx["sparse"].trueValue() && idx["filter"].eoo() && idx["collation"].eoo() &&
@@ -143,6 +146,19 @@ bool validShardKeyIndexExists(OperationContext* opCtx,
idx["seed"].numberInt() == BSONElementHasher::DEFAULT_HASH_SEED);
hasUsefulIndexForKey = true;
}
+ if (idx["sparse"].trueValue()) {
+ reasons += " Index key is sparse.";
+ }
+ if (idx["filter"].ok()) {
+ reasons += " Index key is partial.";
+ }
+ if (idx["collation"].ok()) {
+ reasons += " Index has a non-simple collation.";
+ }
+ if (!reasons.empty()) {
+ allReasons =
+ " Index " + idx["name"] + " cannot be used for sharding because [" + reasons + " ]";
+ }
}
// 3. If proposed key is required to be unique, additionally check for exact match.
@@ -173,6 +189,10 @@ bool validShardKeyIndexExists(OperationContext* opCtx,
}
}
+ if (errMsg && !allReasons.empty()) {
+ *errMsg += allReasons;
+ }
+
if (hasUsefulIndexForKey) {
// Check 2.iii Make sure that there is a useful, non-multikey index available.
behaviors.verifyUsefulNonMultiKeyIndex(nss, shardKeyPattern.toBSON());
@@ -188,17 +208,19 @@ bool validateShardKeyIndexExistsOrCreateIfPossible(OperationContext* opCtx,
bool unique,
bool enforceUniquenessCheck,
const ShardKeyValidationBehaviors& behaviors) {
+ std::string errMsg;
if (validShardKeyIndexExists(opCtx,
nss,
shardKeyPattern,
defaultCollation,
unique && enforceUniquenessCheck,
- behaviors)) {
+ behaviors,
+ &errMsg)) {
return false;
}
// 4. If no useful index, verify we can create one.
- behaviors.verifyCanCreateShardKeyIndex(nss);
+ behaviors.verifyCanCreateShardKeyIndex(nss, &errMsg);
// 5. If no useful index exists and we can create one, create one on proposedKey. Only need
// to call ensureIndex on primary shard, since indexes get copied to receiving shard
@@ -271,11 +293,12 @@ void ValidationBehaviorsShardCollection::verifyUsefulNonMultiKeyIndex(
uassert(ErrorCodes::InvalidOptions, res["errmsg"].str(), success);
}
-void ValidationBehaviorsShardCollection::verifyCanCreateShardKeyIndex(
- const NamespaceString& nss) const {
+void ValidationBehaviorsShardCollection::verifyCanCreateShardKeyIndex(const NamespaceString& nss,
+ std::string* errMsg) const {
uassert(ErrorCodes::InvalidOptions,
- "Please create an index that starts with the proposed shard key before "
- "sharding the collection",
+ str::stream() << "Please create an index that starts with the proposed shard key before"
+ " sharding the collection. "
+ << *errMsg,
_localClient->findOne(nss, BSONObj{}).isEmpty());
}
@@ -334,11 +357,13 @@ void ValidationBehaviorsRefineShardKey::verifyUsefulNonMultiKeyIndex(
uassertStatusOK(checkShardingIndexRes.commandStatus);
}
-void ValidationBehaviorsRefineShardKey::verifyCanCreateShardKeyIndex(
- const NamespaceString& nss) const {
- uasserted(ErrorCodes::InvalidOptions,
- "Please create an index that starts with the proposed shard key before "
- "refining the shard key of the collection");
+void ValidationBehaviorsRefineShardKey::verifyCanCreateShardKeyIndex(const NamespaceString& nss,
+ std::string* errMsg) const {
+ uasserted(
+ ErrorCodes::InvalidOptions,
+ str::stream() << "Please create an index that starts with the proposed shard key before"
+ " sharding the collection. "
+ << *errMsg);
}
void ValidationBehaviorsRefineShardKey::createShardKeyIndex(
diff --git a/src/mongo/db/s/shard_key_util.h b/src/mongo/db/s/shard_key_util.h
index 5d20a013bef..55905e7beb7 100644
--- a/src/mongo/db/s/shard_key_util.h
+++ b/src/mongo/db/s/shard_key_util.h
@@ -51,7 +51,8 @@ public:
virtual void verifyUsefulNonMultiKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey) const = 0;
- virtual void verifyCanCreateShardKeyIndex(const NamespaceString& nss) const = 0;
+ virtual void verifyCanCreateShardKeyIndex(const NamespaceString& nss,
+ std::string* errMsg) const = 0;
virtual void createShardKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey,
@@ -72,7 +73,8 @@ public:
void verifyUsefulNonMultiKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey) const override;
- void verifyCanCreateShardKeyIndex(const NamespaceString& nss) const override;
+ void verifyCanCreateShardKeyIndex(const NamespaceString& nss,
+ std::string* errMsg) const override;
void createShardKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey,
@@ -95,7 +97,8 @@ public:
void verifyUsefulNonMultiKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey) const override;
- void verifyCanCreateShardKeyIndex(const NamespaceString& nss) const override;
+ void verifyCanCreateShardKeyIndex(const NamespaceString& nss,
+ std::string* errMsg) const override;
void createShardKeyIndex(const NamespaceString& nss,
const BSONObj& proposedKey,
@@ -165,7 +168,8 @@ bool validShardKeyIndexExists(OperationContext* opCtx,
const ShardKeyPattern& shardKeyPattern,
const boost::optional<BSONObj>& defaultCollation,
bool requiresUnique,
- const ShardKeyValidationBehaviors& behaviors);
+ const ShardKeyValidationBehaviors& behaviors,
+ std::string* errMsg = nullptr);
void validateShardKeyIsNotEncrypted(OperationContext* opCtx,
const NamespaceString& nss,
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 1651cfc167e..e52a5e28d1a 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -27,13 +27,8 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/shard_metadata_util.h"
-#include <memory>
-
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/s/type_shard_collection.h"
@@ -49,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace shardmetadatautil {
namespace {
@@ -105,8 +99,9 @@ Status unsetPersistedRefreshFlags(OperationContext* opCtx,
// Set 'refreshing' to false and update the last refreshed collection version.
BSONObjBuilder updateBuilder;
updateBuilder.append(ShardCollectionType::kRefreshingFieldName, false);
- updateBuilder.appendTimestamp(ShardCollectionType::kLastRefreshedCollectionVersionFieldName,
- refreshedVersion.toLong());
+ updateBuilder.appendTimestamp(
+ ShardCollectionType::kLastRefreshedCollectionMajorMinorVersionFieldName,
+ refreshedVersion.toLong());
return updateShardCollectionsEntry(opCtx,
BSON(ShardCollectionType::kNssFieldName << nss.ns()),
@@ -141,12 +136,11 @@ StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
entry.getRefreshing() ? *entry.getRefreshing() : true,
entry.getLastRefreshedCollectionVersion()
? *entry.getLastRefreshedCollectionVersion()
- : ChunkVersion(0, 0, entry.getEpoch(), entry.getTimestamp())};
+ : ChunkVersion({entry.getEpoch(), entry.getTimestamp()}, {0, 0})};
}
StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCtx,
const NamespaceString& nss) {
-
try {
DBDirectClient client(opCtx);
FindCommandRequest findRequest{NamespaceString::kShardConfigCollectionsNamespace};
@@ -211,7 +205,8 @@ Status updateShardCollectionsEntry(OperationContext* opCtx,
if (upsert) {
// If upserting, this should be an update from the config server that does not have shard
// refresh / migration inc signal information.
- invariant(!update.hasField(ShardCollectionType::kLastRefreshedCollectionVersionFieldName));
+ invariant(!update.hasField(
+ ShardCollectionType::kLastRefreshedCollectionMajorMinorVersionFieldName));
}
try {
diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h
index 52f043a0b9a..a23efa4b577 100644
--- a/src/mongo/db/s/shard_metadata_util.h
+++ b/src/mongo/db/s/shard_metadata_util.h
@@ -32,7 +32,7 @@
#include <string>
#include <vector>
-#include "mongo/base/status.h"
+#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/oid.h"
#include "mongo/s/chunk_version.h"
@@ -40,17 +40,11 @@
namespace mongo {
class ChunkType;
-class CollectionMetadata;
class NamespaceString;
class OperationContext;
class ShardCollectionType;
class ShardDatabaseType;
-template <typename T>
-class StatusWith;
-/**
- * Function helpers to locally, using a DBDirectClient, read and write sharding metadata on a shard.
- */
namespace shardmetadatautil {
/**
@@ -62,25 +56,6 @@ struct QueryAndSort {
};
/**
- * Subset of the shard's collections collection document that relates to refresh state.
- */
-struct RefreshState {
- bool operator==(const RefreshState& other) const;
-
- std::string toString() const;
-
- // The current generation of the collection.
- CollectionGeneration generation;
-
- // Whether a refresh is currently in progress.
- bool refreshing;
-
- // The collection version after the last complete refresh. Indicates change if refreshing has
- // started and finished since last loaded.
- ChunkVersion lastRefreshedCollectionVersion;
-};
-
-/**
* Returns the query needed to find incremental changes to the chunks collection on a shard server.
*
* The query has to find all the chunks $gte the current max version. Currently, any splits, merges
@@ -115,6 +90,26 @@ Status unsetPersistedRefreshFlags(OperationContext* opCtx,
const ChunkVersion& refreshedVersion);
/**
+ * Represents a subset of a collection's config.cache.collections entry that relates to refresh
+ * state.
+ */
+struct RefreshState {
+ bool operator==(const RefreshState& other) const;
+
+ std::string toString() const;
+
+ // The current generation of the collection.
+ CollectionGeneration generation;
+
+ // Whether a refresh is currently in progress.
+ bool refreshing;
+
+ // The collection version after the last complete refresh. Indicates change if refreshing has
+ // started and finished since last loaded.
+ ChunkVersion lastRefreshedCollectionVersion;
+};
+
+/**
* Reads the persisted refresh signal for 'nss' and returns those settings.
*/
StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 6bad5d66ac1..af35cf373e8 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -27,14 +27,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/shard_metadata_util.h"
-
-#include "mongo/base/status.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/s/shard_metadata_util.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/db/s/type_shard_collection.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -159,7 +155,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
}
}
- ChunkVersion maxCollVersion{0, 0, OID::gen(), Timestamp(1, 1)};
+ ChunkVersion maxCollVersion{{OID::gen(), Timestamp(1, 1)}, {0, 0}};
const KeyPattern keyPattern{BSON("a" << 1)};
const BSONObj defaultCollation{BSON("locale"
<< "fr_CA")};
@@ -216,7 +212,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
ASSERT(state.generation.isSameCollection(maxCollVersion));
ASSERT_EQUALS(state.refreshing, true);
ASSERT_EQUALS(state.lastRefreshedCollectionVersion,
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
// Signal refresh finish
ASSERT_OK(unsetPersistedRefreshFlags(operationContext(), kNss, maxCollVersion));
@@ -235,7 +231,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
// read all the chunks
QueryAndSort allChunkDiff = createShardChunkDiffQuery(
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
allChunkDiff.query,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index e344e20b5e6..93a685475d4 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -27,17 +27,8 @@
* it in the license file.
*/
-
-#define LOGV2_FOR_CATALOG_REFRESH(ID, DLEVEL, MESSAGE, ...) \
- LOGV2_DEBUG_OPTIONS( \
- ID, DLEVEL, {logv2::LogComponent::kShardingCatalogRefresh}, MESSAGE, ##__VA_ARGS__)
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
-#include <memory>
-
#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -57,7 +48,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
using namespace shardmetadatautil;
@@ -66,7 +56,6 @@ using CollectionAndChangedChunks = CatalogCacheLoader::CollectionAndChangedChunk
namespace {
-MONGO_FAIL_POINT_DEFINE(hangPersistCollectionAndChangedChunksAfterDropChunks);
MONGO_FAIL_POINT_DEFINE(hangCollectionFlush);
AtomicWord<unsigned long long> taskIdGenerator{0};
@@ -85,11 +74,6 @@ void dropChunksIfEpochChanged(OperationContext* opCtx,
// Drop the 'config.cache.chunks.<ns>' collection
dropChunks(opCtx, nss);
- if (MONGO_unlikely(hangPersistCollectionAndChangedChunksAfterDropChunks.shouldFail())) {
- LOGV2(22093, "Hit hangPersistCollectionAndChangedChunksAfterDropChunks failpoint");
- hangPersistCollectionAndChangedChunksAfterDropChunks.pauseWhileSet(opCtx);
- }
-
LOGV2(5990400,
"Dropped persisted chunk metadata due to epoch change",
"namespace"_attr = nss,
@@ -131,7 +115,6 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
return status;
}
- // Update the chunk metadata.
try {
dropChunksIfEpochChanged(opCtx, maxLoaderVersion, collAndChunks.epoch, nss);
} catch (const DBException& ex) {
@@ -211,13 +194,13 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
return ChunkVersion::UNSHARDED();
}
- auto statusWithChunk = shardmetadatautil::readShardChunks(opCtx,
- nss,
- BSONObj(),
- BSON(ChunkType::lastmod() << -1),
- 1LL,
- cachedCollection.getEpoch(),
- cachedCollection.getTimestamp());
+ auto statusWithChunk = readShardChunks(opCtx,
+ nss,
+ BSONObj(),
+ BSON(ChunkType::lastmod() << -1),
+ 1LL,
+ cachedCollection.getEpoch(),
+ cachedCollection.getTimestamp());
uassertStatusOKWithContext(
statusWithChunk,
str::stream() << "Failed to read highest version persisted chunk for collection '"
@@ -247,11 +230,9 @@ CollectionAndChangedChunks getPersistedMetadataSinceVersion(OperationContext* op
// If the epochs are the same we can safely take the timestamp from the shard coll entry.
ChunkVersion startingVersion = version.isSameCollection({shardCollectionEntry.getEpoch(),
shardCollectionEntry.getTimestamp()})
- ? ChunkVersion(version.majorVersion(),
- version.minorVersion(),
- version.epoch(),
- shardCollectionEntry.getTimestamp())
- : ChunkVersion(0, 0, shardCollectionEntry.getEpoch(), shardCollectionEntry.getTimestamp());
+ ? version
+ : ChunkVersion({shardCollectionEntry.getEpoch(), shardCollectionEntry.getTimestamp()},
+ {0, 0});
QueryAndSort diff = createShardChunkDiffQuery(startingVersion);
@@ -647,7 +628,14 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_runSecond
const NamespaceString& nss,
const ChunkVersion& catalogCacheSinceVersion) {
+ Timer t;
forcePrimaryCollectionRefreshAndWaitForReplication(opCtx, nss);
+ LOGV2_FOR_CATALOG_REFRESH(5965800,
+ 2,
+ "Cache loader on secondary successfully waited for primary refresh "
+ "and replication of collection",
+ "namespace"_attr = nss,
+ "duration"_attr = Milliseconds(t.millis()));
// Read the local metadata.
@@ -776,7 +764,14 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
StatusWith<DatabaseType> ShardServerCatalogCacheLoader::_runSecondaryGetDatabase(
OperationContext* opCtx, StringData dbName) {
+ Timer t;
forcePrimaryDatabaseRefreshAndWaitForReplication(opCtx, dbName);
+ LOGV2_FOR_CATALOG_REFRESH(5965801,
+ 2,
+ "Cache loader on secondary successfully waited for primary refresh "
+ "and replication of database",
+ "db"_attr = dbName,
+ "duration"_attr = Milliseconds(t.millis()));
return readShardDatabasesEntry(opCtx, dbName);
}
@@ -1280,16 +1275,7 @@ ShardServerCatalogCacheLoader::CollAndChunkTask::CollAndChunkTask(
if (statusWithCollectionAndChangedChunks.isOK()) {
collectionAndChangedChunks = std::move(statusWithCollectionAndChangedChunks.getValue());
invariant(!collectionAndChangedChunks->changedChunks.empty());
- const auto highestVersion = collectionAndChangedChunks->changedChunks.back().getVersion();
- // Note that due to the way Phase 1 of the FCV upgrade writes timestamps to chunks
- // (non-atomically), it is possible that chunks exist with timestamps, but the
- // corresponding config.collections entry doesn't. In this case, the chunks timestamp
- // should be ignored when computing the max query version and we should use the
- // timestamp that comes from config.collections.
- maxQueryVersion = ChunkVersion(highestVersion.majorVersion(),
- highestVersion.minorVersion(),
- highestVersion.epoch(),
- collectionAndChangedChunks->timestamp);
+ maxQueryVersion = collectionAndChangedChunks->changedChunks.back().getVersion();
} else {
invariant(statusWithCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound);
dropped = true;
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 9f2f1ddf8d0..a111b9bf592 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
@@ -203,7 +201,7 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
std::pair<CollectionType, vector<ChunkType>>
ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -371,7 +369,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
// Then refresh again and find that the collection has been dropped and recreated.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
@@ -398,7 +396,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
// Then refresh again and retrieve chunks from the config server that have mixed epoches, like
// as if the chunks read yielded around a drop and recreate of the collection.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
vector<ChunkType> mixedChunks;
@@ -441,7 +439,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
}
TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedOnSSCCL) {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -483,7 +481,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
}
void ShardServerCatalogCacheLoaderTest::refreshCollectionEpochOnRemoteLoader() {
- ChunkVersion collectionVersion(1, 2, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 2});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
index add2ac6f728..dca4b07d7cf 100644
--- a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.cpp
@@ -50,8 +50,8 @@ constexpr auto kCountFailed = "countFailed";
constexpr auto kCountCanceled = "countCanceled";
constexpr auto kLastOpEndingChunkImbalance = "lastOpEndingChunkImbalance";
constexpr auto kActive = "active";
-constexpr auto kDocumentsProcessed = "documentsProcessed";
-constexpr auto kBytesWritten = "bytesWritten";
+constexpr auto kDocumentsCopied = "documentsCopied";
+constexpr auto kBytesCopied = "bytesCopied";
constexpr auto kOplogEntriesFetched = "oplogEntriesFetched";
constexpr auto kOplogEntriesApplied = "oplogEntriesApplied";
constexpr auto kInsertsApplied = "insertsApplied";
@@ -240,8 +240,8 @@ void ShardingDataTransformCumulativeMetrics::reportForServerStatus(BSONObjBuilde
void ShardingDataTransformCumulativeMetrics::reportActive(BSONObjBuilder* bob) const {
BSONObjBuilder s(bob->subobjStart(kActive));
- s.append(kDocumentsProcessed, _documentsProcessed.load());
- s.append(kBytesWritten, _bytesWritten.load());
+ s.append(kDocumentsCopied, _documentsCopied.load());
+ s.append(kBytesCopied, _bytesCopied.load());
s.append(kOplogEntriesFetched, _oplogEntriesFetched.load());
s.append(kOplogEntriesApplied, _oplogEntriesApplied.load());
s.append(kInsertsApplied, _insertsApplied.load());
@@ -422,8 +422,8 @@ const char* ShardingDataTransformCumulativeMetrics::fieldNameFor(
void ShardingDataTransformCumulativeMetrics::onInsertsDuringCloning(
int64_t count, int64_t bytes, const Milliseconds& elapsedTime) {
_collectionCloningTotalLocalBatchInserts.fetchAndAdd(1);
- _documentsProcessed.fetchAndAdd(count);
- _bytesWritten.fetchAndAdd(bytes);
+ _documentsCopied.fetchAndAdd(count);
+ _bytesCopied.fetchAndAdd(bytes);
_collectionCloningTotalLocalInsertTimeMillis.fetchAndAdd(
durationCount<Milliseconds>(elapsedTime));
}
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
index dfd8c989628..5e6949cf001 100644
--- a/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics.h
@@ -191,8 +191,8 @@ private:
AtomicWord<int64_t> _totalBatchRetrievedDuringCloneMillis{0};
AtomicWord<int64_t> _oplogBatchApplied{0};
AtomicWord<int64_t> _oplogBatchAppliedMillis{0};
- AtomicWord<int64_t> _documentsProcessed{0};
- AtomicWord<int64_t> _bytesWritten{0};
+ AtomicWord<int64_t> _documentsCopied{0};
+ AtomicWord<int64_t> _bytesCopied{0};
AtomicWord<int64_t> _lastOpEndingChunkImbalance{0};
AtomicWord<int64_t> _readsDuringCriticalSection{0};
diff --git a/src/mongo/db/s/sharding_data_transform_cumulative_metrics_test.cpp b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_test.cpp
index 99a221b10ba..5d6603c954c 100644
--- a/src/mongo/db/s/sharding_data_transform_cumulative_metrics_test.cpp
+++ b/src/mongo/db/s/sharding_data_transform_cumulative_metrics_test.cpp
@@ -330,8 +330,8 @@ TEST_F(ShardingDataTransformCumulativeMetricsTest, ReportContainsInsertsDuringCl
ASSERT_EQ(latencySection.getIntField("collectionCloningTotalLocalInsertTimeMillis"), 0);
auto activeSection = getActiveSection(_cumulativeMetrics);
- ASSERT_EQ(activeSection.getIntField("documentsProcessed"), 0);
- ASSERT_EQ(activeSection.getIntField("bytesWritten"), 0);
+ ASSERT_EQ(activeSection.getIntField("documentsCopied"), 0);
+ ASSERT_EQ(activeSection.getIntField("bytesCopied"), 0);
_cumulativeMetrics.onInsertsDuringCloning(140, 20763, Milliseconds(15));
@@ -340,8 +340,8 @@ TEST_F(ShardingDataTransformCumulativeMetricsTest, ReportContainsInsertsDuringCl
ASSERT_EQ(latencySection.getIntField("collectionCloningTotalLocalInsertTimeMillis"), 15);
activeSection = getActiveSection(_cumulativeMetrics);
- ASSERT_EQ(activeSection.getIntField("documentsProcessed"), 140);
- ASSERT_EQ(activeSection.getIntField("bytesWritten"), 20763);
+ ASSERT_EQ(activeSection.getIntField("documentsCopied"), 140);
+ ASSERT_EQ(activeSection.getIntField("bytesCopied"), 20763);
}
TEST_F(ShardingDataTransformCumulativeMetricsTest, ReportContainsInsertsDuringFetching) {
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp b/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
index e74155e374b..807195c689d 100644
--- a/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics.cpp
@@ -85,11 +85,11 @@ ShardingDataTransformInstanceMetrics::ShardingDataTransformInstanceMetrics(
_originalCommand{std::move(originalCommand)},
_sourceNs{std::move(sourceNs)},
_role{role},
+ _startTime{startTime},
_clockSource{clockSource},
_observer{std::move(observer)},
_cumulativeMetrics{cumulativeMetrics},
_deregister{_cumulativeMetrics->registerInstanceMetrics(_observer.get())},
- _startTime{startTime},
_copyingStartTime{kNoDate},
_copyingEndTime{kNoDate},
_approxDocumentsToCopy{0},
@@ -118,7 +118,8 @@ ShardingDataTransformInstanceMetrics::~ShardingDataTransformInstanceMetrics() {
Milliseconds ShardingDataTransformInstanceMetrics::getHighEstimateRemainingTimeMillis() const {
switch (_role) {
case Role::kRecipient: {
- auto estimate = estimateRemainingRecipientTime(_applyingStartTime.load() != kNoDate,
+ auto estimate =
+ resharding::estimateRemainingRecipientTime(_applyingStartTime.load() != kNoDate,
_bytesCopied.load(),
_approxBytesToCopy.load(),
getCopyingElapsedTimeSecs(),
diff --git a/src/mongo/db/s/sharding_data_transform_instance_metrics.h b/src/mongo/db/s/sharding_data_transform_instance_metrics.h
index 6c508bbafd8..dbf81eabffb 100644
--- a/src/mongo/db/s/sharding_data_transform_instance_metrics.h
+++ b/src/mongo/db/s/sharding_data_transform_instance_metrics.h
@@ -164,13 +164,13 @@ protected:
"allShardsHighestRemainingOperationTimeEstimatedSecs";
private:
+ const Date_t _startTime;
+
ClockSource* _clockSource;
ObserverPtr _observer;
ShardingDataTransformCumulativeMetrics* _cumulativeMetrics;
ShardingDataTransformCumulativeMetrics::DeregistrationFunction _deregister;
- const Date_t _startTime;
-
AtomicWord<Date_t> _copyingStartTime;
AtomicWord<Date_t> _copyingEndTime;
AtomicWord<int32_t> _approxDocumentsToCopy;
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h
index 5972c7ce9e6..51dcc023f60 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.h
+++ b/src/mongo/db/s/sharding_ddl_coordinator.h
@@ -40,8 +40,11 @@
#include "mongo/db/s/sharding_ddl_coordinator_gen.h"
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
#include "mongo/executor/task_executor.h"
+#include "mongo/logv2/log.h"
#include "mongo/util/future.h"
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
namespace mongo {
ShardingDDLCoordinatorMetadata extractShardingDDLCoordinatorMetadata(const BSONObj& coorDoc);
@@ -117,74 +120,6 @@ protected:
virtual ShardingDDLCoordinatorMetadata const& metadata() const = 0;
- template <typename StateDoc>
- StateDoc _insertStateDocument(StateDoc&& newDoc) {
- auto copyMetadata = newDoc.getShardingDDLCoordinatorMetadata();
- copyMetadata.setRecoveredFromDisk(true);
- newDoc.setShardingDDLCoordinatorMetadata(copyMetadata);
-
- auto opCtx = cc().makeOperationContext();
- PersistentTaskStore<StateDoc> store(NamespaceString::kShardingDDLCoordinatorsNamespace);
- try {
- store.add(opCtx.get(), newDoc, WriteConcerns::kMajorityWriteConcernNoTimeout);
- } catch (const ExceptionFor<ErrorCodes::DuplicateKey>&) {
- // A series of step-up and step-down events can cause a node to try and insert the
- // document when it has already been persisted locally, but we must still wait for
- // majority commit.
- const auto replCoord = repl::ReplicationCoordinator::get(opCtx.get());
- const auto lastLocalOpTime = replCoord->getMyLastAppliedOpTime();
- WaitForMajorityService::get(opCtx->getServiceContext())
- .waitUntilMajority(lastLocalOpTime, opCtx.get()->getCancellationToken())
- .get(opCtx.get());
- }
-
- return std::move(newDoc);
- }
-
- template <typename StateDoc>
- StateDoc _updateStateDocument(OperationContext* opCtx, StateDoc&& newDoc) {
- PersistentTaskStore<StateDoc> store(NamespaceString::kShardingDDLCoordinatorsNamespace);
- invariant(newDoc.getShardingDDLCoordinatorMetadata().getRecoveredFromDisk());
- store.update(opCtx,
- BSON(StateDoc::kIdFieldName << newDoc.getId().toBSON()),
- newDoc.toBSON(),
- WriteConcerns::kMajorityWriteConcernNoTimeout);
- return std::move(newDoc);
- }
-
- // lazily acqiure Logical Session ID and a txn number
- template <typename StateDoc>
- StateDoc _updateSession(OperationContext* opCtx, StateDoc const& doc) {
- auto newShardingDDLCoordinatorMetadata = doc.getShardingDDLCoordinatorMetadata();
-
- auto optSession = newShardingDDLCoordinatorMetadata.getSession();
- if (optSession) {
- auto txnNumber = optSession->getTxnNumber();
- optSession->setTxnNumber(++txnNumber);
- newShardingDDLCoordinatorMetadata.setSession(optSession);
- } else {
- auto session = InternalSessionPool::get(opCtx)->acquireSystemSession();
- newShardingDDLCoordinatorMetadata.setSession(
- ShardingDDLSession(session.getSessionId(), session.getTxnNumber()));
- }
-
- StateDoc newDoc(doc);
- newDoc.setShardingDDLCoordinatorMetadata(std::move(newShardingDDLCoordinatorMetadata));
- return _updateStateDocument(opCtx, std::move(newDoc));
- }
-
- template <typename StateDoc>
- OperationSessionInfo getCurrentSession(StateDoc const& doc) const {
- invariant(doc.getShardingDDLCoordinatorMetadata().getSession());
- ShardingDDLSession shardingDDLSession =
- *doc.getShardingDDLCoordinatorMetadata().getSession();
-
- OperationSessionInfo osi;
- osi.setSessionId(shardingDDLSession.getLsid());
- osi.setTxnNumber(shardingDDLSession.getTxnNumber());
- return osi;
- }
-
/*
* Performs a noop write on all shards and the configsvr using the sessionId and txnNumber
* specified in 'osi'.
@@ -237,4 +172,204 @@ private:
std::stack<DistLockManager::ScopedLock> _scopedLocks;
};
+template <class StateDoc>
+class ShardingDDLCoordinatorImpl : public ShardingDDLCoordinator {
+public:
+ boost::optional<BSONObj> reportForCurrentOp(
+ MongoProcessInterface::CurrentOpConnectionsMode connMode,
+ MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override {
+ return basicReportBuilder().obj();
+ }
+
+protected:
+ ShardingDDLCoordinatorImpl(ShardingDDLCoordinatorService* service,
+ const std::string& name,
+ const BSONObj& initialStateDoc)
+ : ShardingDDLCoordinator(service, initialStateDoc),
+ _coordinatorName(name),
+ _initialState(initialStateDoc.getOwned()),
+ _doc(StateDoc::parse(IDLParserErrorContext("CoordinatorDocument"), _initialState)) {}
+
+ ShardingDDLCoordinatorMetadata const& metadata() const override {
+ return _doc.getShardingDDLCoordinatorMetadata();
+ }
+
+
+ virtual void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const {};
+
+ virtual BSONObjBuilder basicReportBuilder() const noexcept {
+ BSONObjBuilder bob;
+
+ // Append static info
+ bob.append("type", "op");
+ bob.append("ns", nss().toString());
+ bob.append("desc", _coordinatorName);
+ bob.append("op", "command");
+ bob.append("active", true);
+
+ // Create command description
+ BSONObjBuilder cmdInfoBuilder;
+ {
+ stdx::lock_guard lk{_docMutex};
+ if (const auto& optComment = getForwardableOpMetadata().getComment()) {
+ cmdInfoBuilder.append(optComment.get().firstElement());
+ }
+ }
+ appendCommandInfo(&cmdInfoBuilder);
+ bob.append("command", cmdInfoBuilder.obj());
+
+ return bob;
+ }
+
+ const std::string _coordinatorName;
+ const BSONObj _initialState;
+ mutable Mutex _docMutex = MONGO_MAKE_LATCH("ShardingDDLCoordinator::_docMutex");
+ StateDoc _doc;
+};
+
+template <class StateDoc, class Phase>
+class RecoverableShardingDDLCoordinator : public ShardingDDLCoordinatorImpl<StateDoc> {
+protected:
+ using ShardingDDLCoordinatorImpl<StateDoc>::_doc;
+ using ShardingDDLCoordinatorImpl<StateDoc>::_docMutex;
+
+ RecoverableShardingDDLCoordinator(ShardingDDLCoordinatorService* service,
+ const std::string& name,
+ const BSONObj& initialStateDoc)
+ : ShardingDDLCoordinatorImpl<StateDoc>(service, name, initialStateDoc) {}
+
+ virtual StringData serializePhase(const Phase& phase) const = 0;
+
+ template <typename Func>
+ auto _executePhase(const Phase& newPhase, Func&& func) {
+ return [=] {
+ const auto& currPhase = _doc.getPhase();
+
+ if (currPhase > newPhase) {
+ // Do not execute this phase if we already reached a subsequent one.
+ return;
+ }
+ if (currPhase < newPhase) {
+ // Persist the new phase if this is the first time we are executing it.
+ _enterPhase(newPhase);
+ }
+ return func();
+ };
+ }
+
+ void _enterPhase(const Phase& newPhase) {
+ auto newDoc = [&] {
+ stdx::lock_guard lk{_docMutex};
+ return _doc;
+ }();
+
+ newDoc.setPhase(newPhase);
+
+ LOGV2_DEBUG(5390501,
+ 2,
+ "DDL coordinator phase transition",
+ "coordinatorId"_attr = _doc.getId(),
+ "newPhase"_attr = serializePhase(newDoc.getPhase()),
+ "oldPhase"_attr = serializePhase(_doc.getPhase()));
+
+ auto opCtx = cc().makeOperationContext();
+
+ if (_doc.getPhase() == Phase::kUnset) {
+ _insertStateDocument(opCtx.get(), std::move(newDoc));
+ } else {
+ _updateStateDocument(opCtx.get(), std::move(newDoc));
+ }
+ }
+
+ BSONObjBuilder basicReportBuilder() const noexcept override {
+ auto baseReportBuilder = ShardingDDLCoordinatorImpl<StateDoc>::basicReportBuilder();
+
+ const auto currPhase = [&]() {
+ stdx::lock_guard l{_docMutex};
+ return _doc.getPhase();
+ }();
+
+ baseReportBuilder.append("currentPhase", serializePhase(currPhase));
+ return baseReportBuilder;
+ }
+
+ void _insertStateDocument(OperationContext* opCtx, StateDoc&& newDoc) {
+ auto copyMetadata = newDoc.getShardingDDLCoordinatorMetadata();
+ copyMetadata.setRecoveredFromDisk(true);
+ newDoc.setShardingDDLCoordinatorMetadata(copyMetadata);
+
+ PersistentTaskStore<StateDoc> store(NamespaceString::kShardingDDLCoordinatorsNamespace);
+ try {
+ store.add(opCtx, newDoc, WriteConcerns::kMajorityWriteConcernNoTimeout);
+ } catch (const ExceptionFor<ErrorCodes::DuplicateKey>&) {
+ // A series of step-up and step-down events can cause a node to try and insert the
+ // document when it has already been persisted locally, but we must still wait for
+ // majority commit.
+ const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ const auto lastLocalOpTime = replCoord->getMyLastAppliedOpTime();
+ WaitForMajorityService::get(opCtx->getServiceContext())
+ .waitUntilMajority(lastLocalOpTime, opCtx->getCancellationToken())
+ .get(opCtx);
+ }
+
+ {
+ stdx::lock_guard lk{_docMutex};
+ _doc = std::move(newDoc);
+ }
+ }
+
+ void _updateStateDocument(OperationContext* opCtx, StateDoc&& newDoc) {
+ PersistentTaskStore<StateDoc> store(NamespaceString::kShardingDDLCoordinatorsNamespace);
+ invariant(newDoc.getShardingDDLCoordinatorMetadata().getRecoveredFromDisk());
+ store.update(opCtx,
+ BSON(StateDoc::kIdFieldName << newDoc.getId().toBSON()),
+ newDoc.toBSON(),
+ WriteConcerns::kMajorityWriteConcernNoTimeout);
+
+ {
+ stdx::lock_guard lk{_docMutex};
+ _doc = std::move(newDoc);
+ }
+ }
+
+ // lazily acqiure Logical Session ID and a txn number
+ void _updateSession(OperationContext* opCtx) {
+ auto newDoc = [&] {
+ stdx::lock_guard lk{_docMutex};
+ return _doc;
+ }();
+ auto newShardingDDLCoordinatorMetadata = newDoc.getShardingDDLCoordinatorMetadata();
+
+ auto optSession = newShardingDDLCoordinatorMetadata.getSession();
+ if (optSession) {
+ auto txnNumber = optSession->getTxnNumber();
+ optSession->setTxnNumber(++txnNumber);
+ newShardingDDLCoordinatorMetadata.setSession(optSession);
+ } else {
+ auto session = InternalSessionPool::get(opCtx)->acquireSystemSession();
+ newShardingDDLCoordinatorMetadata.setSession(
+ ShardingDDLSession(session.getSessionId(), session.getTxnNumber()));
+ }
+
+ newDoc.setShardingDDLCoordinatorMetadata(std::move(newShardingDDLCoordinatorMetadata));
+ _updateStateDocument(opCtx, std::move(newDoc));
+ }
+
+ OperationSessionInfo getCurrentSession() const {
+ auto optSession = [&] {
+ stdx::lock_guard lk{_docMutex};
+ return _doc.getShardingDDLCoordinatorMetadata().getSession();
+ }();
+
+ invariant(optSession);
+
+ OperationSessionInfo osi;
+ osi.setSessionId(optSession->getLsid());
+ osi.setTxnNumber(optSession->getTxnNumber());
+ return osi;
+ }
+};
+
+#undef MONGO_LOGV2_DEFAULT_COMPONENT
+
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_ddl_coordinator.idl b/src/mongo/db/s/sharding_ddl_coordinator.idl
index ce42c66a6e4..3a6b35e3eb4 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator.idl
+++ b/src/mongo/db/s/sharding_ddl_coordinator.idl
@@ -47,13 +47,9 @@ enums:
kDropDatabase: "dropDatabase"
kDropCollection: "dropCollection"
kRenameCollection: "renameCollection"
- # TODO SERVER-64720 remove once 6.0 becomes last LTS
- kCreateCollectionPre60Compatible: "createCollection"
kCreateCollection: "createCollection_V2"
kRefineCollectionShardKey: "refineCollectionShardKey"
kSetAllowMigrations: "setAllowMigrations"
- # TODO (SERVER-62325): Remove pre60 compatible collMod coordinator after 6.0 branching.
- kCollModPre60Compatible: "collMod"
kCollMod: "collMod_V2"
kReshardCollection: "reshardCollection"
kReshardCollectionNoResilient: "reshardCollectionNoResilient"
diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
index 4073c70fc58..f4494ace7eb 100644
--- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
+++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp
@@ -38,7 +38,6 @@
#include "mongo/db/pipeline/document_source_count.h"
#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/s/collmod_coordinator.h"
-#include "mongo/db/s/collmod_coordinator_pre60_compatible.h"
#include "mongo/db/s/compact_structured_encryption_data_coordinator.h"
#include "mongo/db/s/create_collection_coordinator.h"
#include "mongo/db/s/database_sharding_state.h"
@@ -76,10 +75,6 @@ std::shared_ptr<ShardingDDLCoordinator> constructShardingDDLCoordinatorInstance(
break;
case DDLCoordinatorTypeEnum::kRenameCollection:
return std::make_shared<RenameCollectionCoordinator>(service, std::move(initialState));
- case DDLCoordinatorTypeEnum::kCreateCollectionPre60Compatible:
- return std::make_shared<CreateCollectionCoordinatorPre60Compatible>(
- service, std::move(initialState));
- break;
case DDLCoordinatorTypeEnum::kCreateCollection:
return std::make_shared<CreateCollectionCoordinator>(service, std::move(initialState));
break;
@@ -94,10 +89,6 @@ std::shared_ptr<ShardingDDLCoordinator> constructShardingDDLCoordinatorInstance(
case DDLCoordinatorTypeEnum::kCollMod:
return std::make_shared<CollModCoordinator>(service, std::move(initialState));
break;
- case DDLCoordinatorTypeEnum::kCollModPre60Compatible:
- return std::make_shared<CollModCoordinatorPre60Compatible>(service,
- std::move(initialState));
- break;
case DDLCoordinatorTypeEnum::kReshardCollection:
return std::make_shared<ReshardCollectionCoordinator>(service, std::move(initialState));
break;
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 8b5c2113adf..89eb4107f60 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -340,14 +340,7 @@ void shardedRenameMetadata(OperationContext* opCtx,
auto now = VectorClock::get(opCtx)->getTime();
auto newTimestamp = now.clusterTime().asTimestamp();
fromCollType.setTimestamp(newTimestamp);
- {
- // Only bump the epoch if the whole cluster is in FCV 5.0, so chunks do not contain epochs.
- FixedFCVRegion fixedFCVRegion(opCtx);
- if (serverGlobalParams.featureCompatibility.isGreaterThanOrEqualTo(
- multiversion::FeatureCompatibilityVersion::kFullyDowngradedTo_5_0)) {
- fromCollType.setEpoch(OID::gen());
- }
- }
+ fromCollType.setEpoch(OID::gen());
// Insert the TO collection entry
uassertStatusOK(catalogClient->insertConfigDocument(
@@ -506,16 +499,8 @@ void sendDropCollectionParticipantCommandToShards(OperationContext* opCtx,
const auto cmdObj =
CommandHelpers::appendMajorityWriteConcern(dropCollectionParticipant.toBSON({}));
- try {
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx, nss.db(), cmdObj.addFields(osi.toBSON()), shardIds, executor);
- } catch (const ExceptionFor<ErrorCodes::NotARetryableWriteCommand>&) {
- // Older 5.0 binaries don't support running the _shardsvrDropCollectionParticipant
- // command as a retryable write yet. In that case, retry without attaching session
- // info.
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx, nss.db(), cmdObj, shardIds, executor);
- }
+ sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, nss.db(), cmdObj.addFields(osi.toBSON()), shardIds, executor);
}
} // namespace sharding_ddl_util
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index fd4e3905980..2ff3925c53e 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
#include "mongo/db/namespace_string.h"
@@ -47,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -119,7 +115,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const int nChunks = 10;
std::vector<ChunkType> chunks;
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, fromEpoch, collTimestamp);
+ ChunkVersion chunkVersion({fromEpoch, collTimestamp}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUUID);
@@ -138,7 +134,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const auto toEpoch = OID::gen();
const auto toUUID = UUID::gen();
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, toEpoch, Timestamp(2));
+ ChunkVersion chunkVersion({toEpoch, Timestamp(2)}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(toUUID);
@@ -215,7 +211,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsAreMet) {
opCtx, false /* sourceIsSharded */, kToNss, false /* dropTarget */);
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
@@ -256,7 +252,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsTargetCollectionExists) {
auto opCtx = operationContext();
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
index a05fddaa213..c7b078c89e0 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
@@ -71,7 +71,6 @@
#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
-#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/tick_source_mock.h"
diff --git a/src/mongo/db/s/sharding_server_status.cpp b/src/mongo/db/s/sharding_server_status.cpp
index 8d560454382..82de4cfc5c9 100644
--- a/src/mongo/db/s/sharding_server_status.cpp
+++ b/src/mongo/db/s/sharding_server_status.cpp
@@ -73,14 +73,20 @@ public:
result.append("configsvrConnectionString",
shardRegistry->getConfigServerConnectionString().toString());
+ const auto vcTime = VectorClock::get(opCtx)->getTime();
+
const auto configOpTime = [&]() {
- const auto vcTime = VectorClock::get(opCtx)->getTime();
const auto vcConfigTimeTs = vcTime.configTime().asTimestamp();
return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm);
}();
-
configOpTime.append(&result, "lastSeenConfigServerOpTime");
+ const auto topologyOpTime = [&]() {
+ const auto vcTopologyTimeTs = vcTime.topologyTime().asTimestamp();
+ return mongo::repl::OpTime(vcTopologyTimeTs, mongo::repl::OpTime::kUninitializedTerm);
+ }();
+ topologyOpTime.append(&result, "lastSeenTopologyOpTime");
+
const long long maxChunkSizeInBytes =
grid->getBalancerConfiguration()->getMaxChunkSizeBytes();
result.append("maxChunkSizeInBytes", maxChunkSizeInBytes);
diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp
index fde594f35cb..c082038d714 100644
--- a/src/mongo/db/s/sharding_util.cpp
+++ b/src/mongo/db/s/sharding_util.cpp
@@ -28,18 +28,12 @@
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/sharding_util.h"
#include <fmt/format.h>
#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/s/type_shard_collection.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -113,45 +107,5 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
return responses;
}
-void downgradeCollectionBalancingFieldsToPre53(OperationContext* opCtx) {
- const NamespaceString collNss = [&]() {
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- return NamespaceString::kShardConfigCollectionsNamespace;
- } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- return CollectionType::ConfigNS;
- }
- MONGO_UNREACHABLE;
- }();
-
- write_ops::UpdateCommandRequest updateOp(collNss);
- updateOp.setUpdates({[&] {
- write_ops::UpdateOpEntry entry;
- BSONObjBuilder updateCmd;
- BSONObjBuilder unsetBuilder(updateCmd.subobjStart("$unset"));
- unsetBuilder.append(CollectionType::kMaxChunkSizeBytesFieldName, 0);
- if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- unsetBuilder.append(CollectionType::kNoAutoSplitFieldName, 0);
- } else {
- unsetBuilder.append(ShardCollectionTypeBase::kAllowAutoSplitFieldName, 0);
- }
- unsetBuilder.doneFast();
- entry.setQ({});
- const BSONObj update = updateCmd.obj();
- entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(update));
- entry.setUpsert(false);
- entry.setMulti(true);
- return entry;
- }()});
-
- DBDirectClient client(opCtx);
- client.update(updateOp);
-
- const WriteConcernOptions majorityWC{
- WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(0)};
- WriteConcernResult ignoreResult;
- auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
- uassertStatusOK(waitForWriteConcern(opCtx, latestOpTime, majorityWC, &ignoreResult));
-}
-
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h
index c5021b4d46f..783c6703138 100644
--- a/src/mongo/db/s/sharding_util.h
+++ b/src/mongo/db/s/sharding_util.h
@@ -61,14 +61,5 @@ std::vector<AsyncRequestsSender::Response> sendCommandToShards(
const std::shared_ptr<executor::TaskExecutor>& executor,
bool throwOnError = true);
-/**
- * Unset the `noAutosplit` and `maxChunkSizeBytes` fields from:
- * - `config.collections` on the CSRS
- * - `config.cache.collections` on shards
- *
- * TODO SERVER-62693 remove this method and all its usages once 6.0 branches out
- */
-void downgradeCollectionBalancingFieldsToPre53(OperationContext* opCtx);
-
} // namespace sharding_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp
index 7a47c6eed21..6d20ad82215 100644
--- a/src/mongo/db/s/sharding_write_router_bm.cpp
+++ b/src/mongo/db/s/sharding_write_router_bm.cpp
@@ -103,7 +103,7 @@ std::pair<std::vector<mongo::ChunkType>, mongo::ChunkManager> createChunks(
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collIdentifier,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, collTimestamp},
+ ChunkVersion({collEpoch, collTimestamp}, {i + 1, 0}),
pessimalShardSelector(i, nShards, nChunks));
}
diff --git a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp
index 4e95395faaa..f0918cc5766 100644
--- a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp
@@ -99,7 +99,7 @@ public:
// If abort actually went through, the resharding documents should be cleaned up.
// If they still exists, it could be because that it was interrupted or it is no
// longer primary.
- doNoopWrite(opCtx, "_shardsvrAbortReshardCollection no-op", ns());
+ resharding::doNoopWrite(opCtx, "_shardsvrAbortReshardCollection no-op", ns());
PersistentTaskStore<CommonReshardingMetadata> donorReshardingOpStore(
NamespaceString::kDonorReshardingOperationsNamespace);
uassert(5563802,
diff --git a/src/mongo/db/s/shardsvr_collmod_command.cpp b/src/mongo/db/s/shardsvr_collmod_command.cpp
index f0564913aa1..3df3e521579 100644
--- a/src/mongo/db/s/shardsvr_collmod_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_command.cpp
@@ -33,19 +33,12 @@
#include "mongo/db/coll_mod_gen.h"
#include "mongo/db/coll_mod_reply_validation.h"
#include "mongo/db/commands.h"
-#include "mongo/db/commands/feature_compatibility_version.h"
#include "mongo/db/curop.h"
#include "mongo/db/s/collmod_coordinator.h"
-#include "mongo/db/s/collmod_coordinator_pre60_compatible.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_util.h"
#include "mongo/db/timeseries/catalog_helper.h"
-#include "mongo/db/timeseries/timeseries_commands_conversion_helper.h"
#include "mongo/logv2/log.h"
-#include "mongo/s/chunk_manager_targeter.h"
-#include "mongo/s/cluster_commands_helpers.h"
-#include "mongo/s/grid.h"
-#include "mongo/util/fail_point.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -53,9 +46,6 @@
namespace mongo {
namespace {
-MONGO_FAIL_POINT_DEFINE(collModPrimaryDispatching);
-MONGO_FAIL_POINT_DEFINE(collModCoordinatorPre60Compatible);
-
class ShardsvrCollModCommand final : public BasicCommandWithRequestParser<ShardsvrCollModCommand> {
public:
using Request = ShardsvrCollMod;
@@ -112,29 +102,6 @@ public:
CurOp::get(opCtx)->raiseDbProfileLevel(
CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(cmd.getNamespace().dbName()));
- boost::optional<FixedFCVRegion> fcvRegion;
- fcvRegion.emplace(opCtx);
-
- if (MONGO_unlikely(collModPrimaryDispatching.shouldFail())) {
- return runWithDispatchingCommands(opCtx, result, cmd);
- } else if (MONGO_unlikely(collModCoordinatorPre60Compatible.shouldFail())) {
- return runWithDDLCoordinatorPre60Compatible(opCtx, result, cmd, fcvRegion);
- }
-
- if (fcvRegion.get()->isLessThan(multiversion::FeatureCompatibilityVersion::kVersion_5_3)) {
- return runWithDispatchingCommands(opCtx, result, cmd);
- } else if (fcvRegion.get()->isLessThan(
- multiversion::FeatureCompatibilityVersion::kVersion_6_0)) {
- return runWithDDLCoordinatorPre60Compatible(opCtx, result, cmd, fcvRegion);
- } else {
- return runWithDDLCoordinator(opCtx, result, cmd, fcvRegion);
- }
- }
-
- bool runWithDDLCoordinator(OperationContext* opCtx,
- BSONObjBuilder& result,
- const ShardsvrCollMod& cmd,
- boost::optional<FixedFCVRegion>& fcvRegion) {
auto coordinatorDoc = CollModCoordinatorDocument();
coordinatorDoc.setCollModRequest(cmd.getCollModRequest());
coordinatorDoc.setShardingDDLCoordinatorMetadata(
@@ -142,73 +109,10 @@ public:
auto service = ShardingDDLCoordinatorService::getService(opCtx);
auto collModCoordinator = checked_pointer_cast<CollModCoordinator>(
service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));
- fcvRegion = boost::none;
- result.appendElements(collModCoordinator->getResult(opCtx));
- return true;
- }
-
- bool runWithDDLCoordinatorPre60Compatible(OperationContext* opCtx,
- BSONObjBuilder& result,
- const ShardsvrCollMod& cmd,
- boost::optional<FixedFCVRegion>& fcvRegion) {
- auto coordinatorDoc = CollModCoordinatorDocument();
- coordinatorDoc.setCollModRequest(cmd.getCollModRequest());
- coordinatorDoc.setShardingDDLCoordinatorMetadata(
- {{cmd.getNamespace(), DDLCoordinatorTypeEnum::kCollModPre60Compatible}});
- auto service = ShardingDDLCoordinatorService::getService(opCtx);
- auto collModCoordinator = checked_pointer_cast<CollModCoordinatorPre60Compatible>(
- service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));
- fcvRegion = boost::none;
result.appendElements(collModCoordinator->getResult(opCtx));
return true;
}
- bool runWithDispatchingCommands(OperationContext* opCtx,
- BSONObjBuilder& result,
- const ShardsvrCollMod& cmd) {
- const auto& nss = cmd.getNamespace();
- auto collModCmd = CollMod(nss);
- collModCmd.setCollModRequest(cmd.getCollModRequest());
- auto collModCmdObj = collModCmd.toBSON({});
-
- const auto targeter = ChunkManagerTargeter(opCtx, nss);
- const auto& routingInfo = targeter.getRoutingInfo();
- if (targeter.timeseriesNamespaceNeedsRewrite(nss)) {
- collModCmdObj =
- timeseries::makeTimeseriesCommand(collModCmdObj,
- nss,
- CollMod::kCommandName,
- CollMod::kIsTimeseriesNamespaceFieldName);
- }
-
- std::set<ShardId> participants;
- if (routingInfo.isSharded()) {
- std::unique_ptr<CollatorInterface> collator;
- const auto expCtx =
- make_intrusive<ExpressionContext>(opCtx, std::move(collator), targeter.getNS());
- routingInfo.getShardIdsForQuery(
- expCtx, {} /* query */, {} /* collation */, &participants);
- } else {
- participants.insert(routingInfo.dbPrimary());
- }
-
- auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
- const auto& responses = sharding_util::sendCommandToShards(
- opCtx,
- targeter.getNS().db(),
- CommandHelpers::appendMajorityWriteConcern(collModCmdObj, opCtx->getWriteConcern()),
- {std::make_move_iterator(participants.begin()),
- std::make_move_iterator(participants.end())},
- executor);
-
- std::string errmsg;
- auto ok = appendRawResponses(opCtx, &errmsg, &result, std::move(responses)).responseOK;
- if (!errmsg.empty()) {
- CommandHelpers::appendSimpleCommandStatus(result, ok, errmsg);
- }
- return ok;
- }
-
void validateResult(const BSONObj& resultObj) final {
StringDataSet ignorableFields({"raw", "ok", "errmsg"});
auto reply = Response::parse(IDLParserErrorContext("CollModReply"),
diff --git a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
index b321236caf1..2a7e78886b2 100644
--- a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
@@ -69,6 +69,10 @@ public:
return Command::AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
diff --git a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp
index f4240c1eb0a..3d9be030fcb 100644
--- a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp
@@ -107,7 +107,7 @@ public:
// If commit actually went through, the resharding documents will be cleaned up. If
// documents still exist, it could be because that commit was interrupted or that the
// underlying replica set node is no longer primary.
- doNoopWrite(opCtx, "_shardsvrCommitReshardCollection no-op", ns());
+ resharding::doNoopWrite(opCtx, "_shardsvrCommitReshardCollection no-op", ns());
PersistentTaskStore<CommonReshardingMetadata> donorReshardingOpStore(
NamespaceString::kDonorReshardingOperationsNamespace);
uassert(5795302,
diff --git a/src/mongo/db/s/shardsvr_create_collection_command.cpp b/src/mongo/db/s/shardsvr_create_collection_command.cpp
index bcc2e17a9fd..3769e253b7b 100644
--- a/src/mongo/db/s/shardsvr_create_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_create_collection_command.cpp
@@ -144,21 +144,11 @@ public:
FixedFCVRegion fixedFcvRegion(opCtx);
auto coordinatorDoc = [&] {
- if (serverGlobalParams.featureCompatibility.isLessThan(
- multiversion::FeatureCompatibilityVersion::kVersion_6_0)) {
- auto doc = CreateCollectionCoordinatorDocumentPre60Compatible();
- doc.setShardingDDLCoordinatorMetadata(
- {{std::move(nss),
- DDLCoordinatorTypeEnum::kCreateCollectionPre60Compatible}});
- doc.setCreateCollectionRequest(std::move(createCmdRequest));
- return doc.toBSON();
- } else {
- auto doc = CreateCollectionCoordinatorDocument();
- doc.setShardingDDLCoordinatorMetadata(
- {{std::move(nss), DDLCoordinatorTypeEnum::kCreateCollection}});
- doc.setCreateCollectionRequest(std::move(createCmdRequest));
- return doc.toBSON();
- }
+ auto doc = CreateCollectionCoordinatorDocument();
+ doc.setShardingDDLCoordinatorMetadata(
+ {{std::move(nss), DDLCoordinatorTypeEnum::kCreateCollection}});
+ doc.setCreateCollectionRequest(std::move(createCmdRequest));
+ return doc.toBSON();
}();
auto service = ShardingDDLCoordinatorService::getService(opCtx);
diff --git a/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp
index fd7c8217403..4157f1145f8 100644
--- a/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp
@@ -65,6 +65,10 @@ public:
return AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
@@ -76,6 +80,11 @@ public:
CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName,
opCtx->getWriteConcern());
+ const auto txnParticipant = TransactionParticipant::get(opCtx);
+ uassert(6077300,
+ str::stream() << Request::kCommandName << " must be run as a retryable write",
+ txnParticipant);
+
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
MigrationDestinationManager::cloneCollectionIndexesAndOptions(
@@ -86,23 +95,15 @@ public:
request().getIdIndex(),
request().getOptions()});
- // The txnParticipant will only be missing when the command was sent from a coordinator
- // running an old 5.0.0 binary that didn't attach a sessionId & txnNumber.
- // TODO SERVER-60773: Once 6.0 has branched out, txnParticipant must always exist. Add a
- // uassert for that.
- auto txnParticipant = TransactionParticipant::get(opCtx);
- if (txnParticipant) {
- // Since no write that generated a retryable write oplog entry with this sessionId
- // and txnNumber happened, we need to make a dummy write so that the session gets
- // durably persisted on the oplog. This must be the last operation done on this
- // command.
- DBDirectClient client(opCtx);
- client.update(NamespaceString::kServerConfigurationNamespace.ns(),
- BSON("_id" << Request::kCommandName),
- BSON("$inc" << BSON("count" << 1)),
- true /* upsert */,
- false /* multi */);
- }
+ // Since no write that generated a retryable write oplog entry with this sessionId and
+ // txnNumber happened, we need to make a dummy write so that the session gets durably
+ // persisted on the oplog. This must be the last operation done on this command.
+ DBDirectClient client(opCtx);
+ client.update(NamespaceString::kServerConfigurationNamespace.ns(),
+ BSON("_id" << Request::kCommandName),
+ BSON("$inc" << BSON("count" << 1)),
+ true /* upsert */,
+ false /* multi */);
}
private:
diff --git a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
index 658c894a209..31c19139c38 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp
@@ -64,6 +64,10 @@ public:
"directly. Participates in droping a collection.";
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
using Request = ShardsvrDropCollectionParticipant;
class Invocation final : public InvocationBase {
@@ -75,6 +79,11 @@ public:
CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName,
opCtx->getWriteConcern());
+ const auto txnParticipant = TransactionParticipant::get(opCtx);
+ uassert(6077301,
+ str::stream() << Request::kCommandName << " must be run as a retryable write",
+ txnParticipant);
+
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
try {
@@ -86,23 +95,16 @@ public:
"namespace"_attr = ns());
}
- // The txnParticipant will only be missing when the command was sent from a coordinator
- // running an old 5.0.0 binary that didn't attach a sessionId & txnNumber.
- // TODO SERVER-60773: Once 6.0 has branched out, txnParticipant must always exist. Add a
- // uassert for that.
- auto txnParticipant = TransactionParticipant::get(opCtx);
- if (txnParticipant) {
- // Since no write that generated a retryable write oplog entry with this sessionId
- // and txnNumber happened, we need to make a dummy write so that the session gets
- // durably persisted on the oplog. This must be the last operation done on this
- // command.
- DBDirectClient client(opCtx);
- client.update(NamespaceString::kServerConfigurationNamespace.ns(),
- BSON("_id" << Request::kCommandName),
- BSON("$inc" << BSON("count" << 1)),
- true /* upsert */,
- false /* multi */);
- }
+
+ // Since no write that generated a retryable write oplog entry with this sessionId and
+ // txnNumber happened, we need to make a dummy write so that the session gets durably
+ // persisted on the oplog. This must be the last operation done on this command.
+ DBDirectClient client(opCtx);
+ client.update(NamespaceString::kServerConfigurationNamespace.ns(),
+ BSON("_id" << Request::kCommandName),
+ BSON("$inc" << BSON("count" << 1)),
+ true /* upsert */,
+ false /* multi */);
}
private:
diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
index c3971e7afd6..8b3892a907b 100644
--- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
+++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
@@ -149,8 +149,7 @@ void mergeChunks(OperationContext* opCtx,
auto shardVersionReceived = [&]() -> boost::optional<ChunkVersion> {
// Old versions might not have the shardVersion field
if (cmdResponse.response[ChunkVersion::kShardVersionField]) {
- return ChunkVersion::fromBSONPositionalOrNewerFormat(
- cmdResponse.response[ChunkVersion::kShardVersionField]);
+ return ChunkVersion::parse(cmdResponse.response[ChunkVersion::kShardVersionField]);
}
return boost::none;
}();
diff --git a/src/mongo/db/s/shardsvr_participant_block_command.cpp b/src/mongo/db/s/shardsvr_participant_block_command.cpp
index 9ff5f58127c..c6774bd7bec 100644
--- a/src/mongo/db/s/shardsvr_participant_block_command.cpp
+++ b/src/mongo/db/s/shardsvr_participant_block_command.cpp
@@ -62,6 +62,10 @@ public:
return Command::AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
diff --git a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
index 73a182754e5..16d75a2bfb9 100644
--- a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp
@@ -68,6 +68,10 @@ public:
return AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
@@ -76,6 +80,11 @@ public:
CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName,
opCtx->getWriteConcern());
+ const auto txnParticipant = TransactionParticipant::get(opCtx);
+ uassert(6077302,
+ str::stream() << Request::kCommandName << " must be run as a retryable write",
+ txnParticipant);
+
auto const shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
auto const& req = request();
@@ -100,23 +109,15 @@ public:
renameCollectionParticipant->getBlockCRUDAndRenameCompletionFuture().get(opCtx);
- // The txnParticipant will only be missing when the command was sent from a coordinator
- // running an old 5.0.0 binary that didn't attach a sessionId & txnNumber.
- // TODO SERVER-60773: Once 6.0 has branched out, txnParticipant must always exist. Add a
- // uassert for that.
- auto txnParticipant = TransactionParticipant::get(opCtx);
- if (txnParticipant) {
- // Since no write that generated a retryable write oplog entry with this sessionId
- // and txnNumber happened, we need to make a dummy write so that the session gets
- // durably persisted on the oplog. This must be the last operation done on this
- // command.
- DBDirectClient client(opCtx);
- client.update(NamespaceString::kServerConfigurationNamespace.ns(),
- BSON("_id" << Request::kCommandName),
- BSON("$inc" << BSON("count" << 1)),
- true /* upsert */,
- false /* multi */);
- }
+ // Since no write that generated a retryable write oplog entry with this sessionId and
+ // txnNumber happened, we need to make a dummy write so that the session gets durably
+ // persisted on the oplog. This must be the last operation done on this command.
+ DBDirectClient client(opCtx);
+ client.update(NamespaceString::kServerConfigurationNamespace.ns(),
+ BSON("_id" << Request::kCommandName),
+ BSON("$inc" << BSON("count" << 1)),
+ true /* upsert */,
+ false /* multi */);
}
private:
@@ -162,6 +163,10 @@ public:
return AllowedOnSecondary::kNever;
}
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
+
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
@@ -170,6 +175,11 @@ public:
CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName,
opCtx->getWriteConcern());
+ const auto txnParticipant = TransactionParticipant::get(opCtx);
+ uassert(6077303,
+ str::stream() << Request::kCommandName << " must be run as a retryable write",
+ txnParticipant);
+
auto const shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
@@ -187,23 +197,16 @@ public:
optRenameCollectionParticipant.get()->getUnblockCrudFuture().get(opCtx);
}
- // The txnParticipant will only be missing when the command was sent from a coordinator
- // running an old 5.0.0 binary that didn't attach a sessionId & txnNumber.
- // TODO SERVER-60773: Once 6.0 has branched out, txnParticipant must always exist. Add a
- // uassert for that.
- auto txnParticipant = TransactionParticipant::get(opCtx);
- if (txnParticipant) {
- // Since no write that generated a retryable write oplog entry with this sessionId
- // and txnNumber happened, we need to make a dummy write so that the session gets
- // durably persisted on the oplog. This must be the last operation done on this
- // command.
- DBDirectClient client(opCtx);
- client.update(NamespaceString::kServerConfigurationNamespace.ns(),
- BSON("_id" << Request::kCommandName),
- BSON("$inc" << BSON("count" << 1)),
- true /* upsert */,
- false /* multi */);
- }
+ // Since no write that generated a retryable write oplog entry with this sessionId
+ // and txnNumber happened, we need to make a dummy write so that the session gets
+ // durably persisted on the oplog. This must be the last operation done on this
+ // command.
+ DBDirectClient client(opCtx);
+ client.update(NamespaceString::kServerConfigurationNamespace.ns(),
+ BSON("_id" << Request::kCommandName),
+ BSON("$inc" << BSON("count" << 1)),
+ true /* upsert */,
+ false /* multi */);
}
private:
diff --git a/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp b/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp
index 4c3e05a7879..56bf7b644f3 100644
--- a/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp
+++ b/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp
@@ -108,10 +108,9 @@ public:
}
Response typedRun(OperationContext* opCtx) {
- auto instances =
- getReshardingStateMachines<ReshardingRecipientService,
- ReshardingRecipientService::RecipientStateMachine>(opCtx,
- ns());
+ auto instances = resharding::getReshardingStateMachines<
+ ReshardingRecipientService,
+ ReshardingRecipientService::RecipientStateMachine>(opCtx, ns());
if (instances.empty()) {
return Response{boost::none, boost::none};
}
diff --git a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp
index d5d2593bdf2..e8ed9e14277 100644
--- a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp
+++ b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp
@@ -127,6 +127,10 @@ public:
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
}
+
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
} shardsvrSetClusterParameterCmd;
} // namespace
diff --git a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp
index 49bdc1b90bb..ceecece4027 100644
--- a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp
+++ b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp
@@ -198,6 +198,10 @@ public:
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
}
+
+ bool supportsRetryableWrite() const final {
+ return true;
+ }
} shardsvrSetUserWriteBlockModeCmd;
} // namespace
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index 062f5b47752..964871d3740 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -255,8 +255,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(
boost::optional<ChunkVersion> shardVersionReceived = [&]() -> boost::optional<ChunkVersion> {
// old versions might not have the shardVersion field
if (cmdResponse.response[ChunkVersion::kShardVersionField]) {
- return ChunkVersion::fromBSONPositionalOrNewerFormat(
- cmdResponse.response[ChunkVersion::kShardVersionField]);
+ return ChunkVersion::parse(cmdResponse.response[ChunkVersion::kShardVersionField]);
}
return boost::none;
}();
diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp
index 41b758cffec..c317922c251 100644
--- a/src/mongo/db/s/transaction_coordinator_service.cpp
+++ b/src/mongo/db/s/transaction_coordinator_service.cpp
@@ -379,6 +379,10 @@ TransactionCoordinatorService::getAllRemovalFuturesForCoordinatorsForInternalTra
std::shared_ptr<CatalogAndScheduler> cas = _getCatalogAndScheduler(opCtx);
auto& catalog = cas->catalog;
+ // On step up, we want to wait until the catalog has recovered all active transaction
+ // coordinators before getting the removal futures.
+ cas->recoveryTaskCompleted->get(opCtx);
+
auto predicate = [](const LogicalSessionId lsid,
const TxnNumberAndRetryCounter txnNumberAndRetryCounter,
const std::shared_ptr<TransactionCoordinator> transactionCoordinator) {
diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
index e60c2ad339e..dd4b94aae1c 100644
--- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
+++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp
@@ -59,6 +59,14 @@ public:
return true;
}
+ bool isTransactionCommand() const final {
+ return true;
+ }
+
+ bool allowedInTransactions() const final {
+ return true;
+ }
+
class PrepareTimestamp {
public:
PrepareTimestamp(Timestamp timestamp) : _timestamp(std::move(timestamp)) {}
@@ -383,6 +391,18 @@ public:
return AllowedOnSecondary::kNever;
}
+ bool isTransactionCommand() const final {
+ return true;
+ }
+
+ bool shouldCheckoutSession() const final {
+ return false;
+ }
+
+ bool allowedInTransactions() const final {
+ return true;
+ }
+
} coordinateCommitTransactionCmd;
} // namespace
diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp
index 4dbb0b1c2f8..2628297e0d7 100644
--- a/src/mongo/db/s/type_shard_collection.cpp
+++ b/src/mongo/db/s/type_shard_collection.cpp
@@ -52,15 +52,6 @@ ShardCollectionType::ShardCollectionType(const BSONObj& obj) {
uassert(ErrorCodes::ShardKeyNotFound,
str::stream() << "Empty shard key. Failed to parse: " << obj.toString(),
!getKeyPattern().toBSON().isEmpty());
-
- // Last refreshed collection version is stored as a timestamp in the BSON representation of
- // shard collection type for legacy reasons. We therefore explicitly convert this timestamp, if
- // it exists, into a chunk version.
- if (getLastRefreshedCollectionVersion()) {
- ChunkVersion version = *getLastRefreshedCollectionVersion();
- setLastRefreshedCollectionVersion(ChunkVersion(
- version.majorVersion(), version.minorVersion(), getEpoch(), getTimestamp()));
- }
}
BSONObj ShardCollectionType::toBSON() const {
@@ -83,4 +74,15 @@ void ShardCollectionType::setAllowMigrations(bool allowMigrations) {
setPre50CompatibleAllowMigrations(false);
}
+boost::optional<ChunkVersion> ShardCollectionType::getLastRefreshedCollectionVersion() const {
+ // Last refreshed collection version is stored as a timestamp in the BSON representation of
+ // shard collection type for legacy reasons. We therefore explicitly convert this timestamp, if
+ // it exists, into a chunk version.
+ if (!getLastRefreshedCollectionMajorMinorVersion())
+ return boost::none;
+
+ Timestamp majorMinor = *getLastRefreshedCollectionMajorMinorVersion();
+ return ChunkVersion({getEpoch(), getTimestamp()}, {majorMinor.getSecs(), majorMinor.getInc()});
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h
index 8180358174a..de6e56eb784 100644
--- a/src/mongo/db/s/type_shard_collection.h
+++ b/src/mongo/db/s/type_shard_collection.h
@@ -42,7 +42,7 @@ public:
using ShardCollectionTypeBase::kEnterCriticalSectionCounterFieldName;
using ShardCollectionTypeBase::kEpochFieldName;
using ShardCollectionTypeBase::kKeyPatternFieldName;
- using ShardCollectionTypeBase::kLastRefreshedCollectionVersionFieldName;
+ using ShardCollectionTypeBase::kLastRefreshedCollectionMajorMinorVersionFieldName;
using ShardCollectionTypeBase::kNssFieldName;
using ShardCollectionTypeBase::kRefreshingFieldName;
using ShardCollectionTypeBase::kReshardingFieldsFieldName;
@@ -57,7 +57,6 @@ public:
using ShardCollectionTypeBase::getEnterCriticalSectionCounter;
using ShardCollectionTypeBase::getEpoch;
using ShardCollectionTypeBase::getKeyPattern;
- using ShardCollectionTypeBase::getLastRefreshedCollectionVersion;
using ShardCollectionTypeBase::getMaxChunkSizeBytes;
using ShardCollectionTypeBase::getNss;
using ShardCollectionTypeBase::getRefreshing;
@@ -94,6 +93,8 @@ public:
return getPre50CompatibleAllowMigrations().value_or(true);
}
void setAllowMigrations(bool allowMigrations);
+
+ boost::optional<ChunkVersion> getLastRefreshedCollectionVersion() const;
};
} // namespace mongo
diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl
index 051a6de35d3..d56b231e302 100644
--- a/src/mongo/db/s/type_shard_collection.idl
+++ b/src/mongo/db/s/type_shard_collection.idl
@@ -80,15 +80,6 @@ imports:
- "mongo/s/resharding/type_collection_fields.idl"
- "mongo/s/type_collection_common_types.idl"
-types:
- ChunkVersionLegacy:
- bson_serialization_type: any
- description: "An object representing a chunk version for a collection. Ignores the
- component in the chunk version for legacy reasons."
- cpp_type: ChunkVersion
- serializer: ChunkVersion::serialiseMajorMinorVersionOnlyForShardCollectionType
- deserializer: ChunkVersion::parseMajorMinorVersionOnlyFromShardCollectionType
-
structs:
ShardCollectionTypeBase:
description: "Represents the layout and contents of documents contained in the shard
@@ -102,11 +93,12 @@ structs:
optional: false
epoch:
type: objectid
+ optional: false
description: "Uniquely identifies this instance of the collection, in case of
drop/create or shard key refine."
- optional: false
timestamp:
type: timestamp
+ optional: false
description: "Uniquely identifies this incarnation of the collection. Only changes
in case of drop and create, or shard key refine.
This field will store the ClusterTime of the Config Server when the
@@ -141,7 +133,8 @@ structs:
chunk metadata."
optional: true
lastRefreshedCollectionVersion:
- type: ChunkVersionLegacy
+ type: timestamp
+ cpp_name: lastRefreshedCollectionMajorMinorVersion
description: "Set by primaries and used by shard secondaries to safely refresh chunk
metadata. Indicates the collection version of the last complete chunk
metadata refresh, and is used to indicate if a refresh occurred if the
diff --git a/src/mongo/db/s/type_shard_collection_test.cpp b/src/mongo/db/s/type_shard_collection_test.cpp
index 59a85b1e13c..f21418cc206 100644
--- a/src/mongo/db/s/type_shard_collection_test.cpp
+++ b/src/mongo/db/s/type_shard_collection_test.cpp
@@ -67,25 +67,12 @@ TEST(ShardCollectionType, FromBSONEpochMatchesLastRefreshedCollectionVersionWhen
<< ShardCollectionType::kUuidFieldName << UUID::gen()
<< ShardCollectionType::kKeyPatternFieldName << kKeyPattern
<< ShardCollectionType::kUniqueFieldName << true
- << ShardCollectionType::kLastRefreshedCollectionVersionFieldName << Timestamp(1, 1)));
- ASSERT_EQ(epoch, shardCollType.getLastRefreshedCollectionVersion()->epoch());
- ASSERT_EQ(timestamp, shardCollType.getLastRefreshedCollectionVersion()->getTimestamp());
-}
-
-TEST(ShardCollectionType, FromBSONEpochMatchesLastRefreshedCollectionVersionWhenDate) {
- OID epoch = OID::gen();
- Timestamp timestamp(1, 1);
-
- ShardCollectionType shardCollType(
- BSON(ShardCollectionType::kNssFieldName
- << kNss.ns() << ShardCollectionType::kEpochFieldName << epoch
- << ShardCollectionType::kUuidFieldName << UUID::gen()
- << ShardCollectionType::kTimestampFieldName << timestamp
- << ShardCollectionType::kKeyPatternFieldName << kKeyPattern
- << ShardCollectionType::kUniqueFieldName << true
- << ShardCollectionType::kLastRefreshedCollectionVersionFieldName << Date_t()));
+ << ShardCollectionType::kLastRefreshedCollectionMajorMinorVersionFieldName
+ << Timestamp(123, 45)));
ASSERT_EQ(epoch, shardCollType.getLastRefreshedCollectionVersion()->epoch());
ASSERT_EQ(timestamp, shardCollType.getLastRefreshedCollectionVersion()->getTimestamp());
+ ASSERT_EQ(Timestamp(123, 45),
+ Timestamp(shardCollType.getLastRefreshedCollectionVersion()->toLong()));
}
TEST(ShardCollectionType, ToBSONEmptyDefaultCollationNotIncluded) {