summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2022-06-22 16:40:43 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-22 17:51:57 +0000
commit6564aa70bbd0b1c2a2ba29c6e71cf0e65366f7f1 (patch)
treef3d1e036b2d99d9f1ec12243770b1acfab8723e3
parentb618f09f9bf4f285266232ba4eb70d84c97355f9 (diff)
downloadmongo-6564aa70bbd0b1c2a2ba29c6e71cf0e65366f7f1.tar.gz
SERVER-64449 Get rid of the gFeatureFlagNewWriteErrorExceptionFormat flag
-rw-r--r--src/mongo/db/ops/SConscript1
-rw-r--r--src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl41
-rw-r--r--src/mongo/db/ops/write_ops.cpp39
-rw-r--r--src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp6
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp50
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp10
-rw-r--r--src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp3
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp17
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp4
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp47
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp120
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp27
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp29
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp4
-rw-r--r--src/mongo/db/s/operation_sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp9
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp14
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp3
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp12
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_write_router_bm.cpp2
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp7
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp10
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp30
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp36
-rw-r--r--src/mongo/s/catalog_cache_test.cpp15
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.cpp10
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp6
-rw-r--r--src/mongo/s/chunk_manager_refresh_bm.cpp6
-rw-r--r--src/mongo/s/chunk_map_test.cpp11
-rw-r--r--src/mongo/s/chunk_test.cpp13
-rw-r--r--src/mongo/s/chunk_version.h4
-rw-r--r--src/mongo/s/chunk_version_test.cpp46
-rw-r--r--src/mongo/s/comparable_chunk_version_test.cpp50
-rw-r--r--src/mongo/s/query/sharded_agg_test_fixture.h2
-rw-r--r--src/mongo/s/request_types/move_chunk_request_test.cpp6
-rw-r--r--src/mongo/s/routing_table_history_test.cpp94
-rw-r--r--src/mongo/s/stale_shard_version_helpers_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp220
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp6
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp41
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp34
51 files changed, 494 insertions, 681 deletions
diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript
index 0b736897acc..983698e5060 100644
--- a/src/mongo/db/ops/SConscript
+++ b/src/mongo/db/ops/SConscript
@@ -34,7 +34,6 @@ env.Library(
env.Library(
target='write_ops_parsers',
source=[
- 'new_write_error_exception_format_feature_flag.idl',
'write_ops.cpp',
'write_ops.idl',
],
diff --git a/src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl b/src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl
deleted file mode 100644
index f5fb71095b0..00000000000
--- a/src/mongo/db/ops/new_write_error_exception_format_feature_flag.idl
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (C) 2022-present MongoDB, Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the Server Side Public License, version 1,
-# as published by MongoDB, Inc.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# Server Side Public License for more details.
-#
-# You should have received a copy of the Server Side Public License
-# along with this program. If not, see
-# <http://www.mongodb.com/licensing/server-side-public-license>.
-#
-# As a special exception, the copyright holders give permission to link the
-# code of portions of this program with the OpenSSL library under certain
-# conditions as described in each individual source file and distribute
-# linked combinations including the program with the OpenSSL library. You
-# must comply with the Server Side Public License in all respects for
-# all of the code used other than as permitted herein. If you modify file(s)
-# with this exception, you may extend this exception to your version of the
-# file(s), but you are not obligated to do so. If you do not wish to do so,
-# delete this exception statement from your version. If you delete this
-# exception statement from all source files in the program, then also delete
-# it in the license file.
-#
-
-global:
- cpp_namespace: "mongo::feature_flags"
-
-imports:
- - "mongo/idl/basic_types.idl"
-
-feature_flags:
- featureFlagNewWriteErrorExceptionFormat:
- description: Feature flag for enabling the new write error format which avoids serialising
- StaleShardVersion with the information of StaleConfig.
- cpp_varname: gFeatureFlagNewWriteErrorExceptionFormat
- default: true
- version: 6.0
diff --git a/src/mongo/db/ops/write_ops.cpp b/src/mongo/db/ops/write_ops.cpp
index 54cef4d3d2a..92d0478a541 100644
--- a/src/mongo/db/ops/write_ops.cpp
+++ b/src/mongo/db/ops/write_ops.cpp
@@ -30,7 +30,6 @@
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/dbmessage.h"
-#include "mongo/db/ops/new_write_error_exception_format_feature_flag_gen.h"
#include "mongo/db/pipeline/aggregation_request_helper.h"
#include "mongo/db/update/update_oplog_entry_serialization.h"
#include "mongo/db/update/update_oplog_entry_version.h"
@@ -295,18 +294,6 @@ WriteError WriteError::parse(const BSONObj& obj) {
auto code = ErrorCodes::Error(obj[WriteError::kCodeFieldName].Int());
auto errmsg = obj[WriteError::kErrmsgFieldName].valueStringDataSafe();
- // At least up to FCV 5.x, the write commands operation used to convert StaleConfig errors
- // into StaleShardVersion and store the extra info of StaleConfig in a sub-field called
- // "errInfo".
- //
- // TODO (SERVER-64449): This special parsing should be removed in the stable version
- // following the resolution of this ticket.
- if (code == ErrorCodes::OBSOLETE_StaleShardVersion) {
- return Status(ErrorCodes::StaleConfig,
- std::move(errmsg),
- obj[WriteError::kErrInfoFieldName].Obj());
- }
-
// All remaining errors have the error stored at the same level as the code and errmsg (in
// the same way that Status is serialised as part of regular command response)
return Status(code, std::move(errmsg), obj);
@@ -319,28 +306,10 @@ BSONObj WriteError::serialize() const {
BSONObjBuilder errBuilder;
errBuilder.append(WriteError::kIndexFieldName, _index);
- // At least up to FCV 5.x, the write commands operation used to convert StaleConfig errors into
- // StaleShardVersion and store the extra info of StaleConfig in a sub-field called "errInfo".
- // This logic preserves this for backwards compatibility.
- //
- // TODO (SERVER-64449): This special serialisation should be removed in the stable version
- // following the resolution of this ticket.
- if (_status == ErrorCodes::StaleConfig &&
- !feature_flags::gFeatureFlagNewWriteErrorExceptionFormat.isEnabled(
- serverGlobalParams.featureCompatibility)) {
- errBuilder.append(WriteError::kCodeFieldName,
- int32_t(ErrorCodes::OBSOLETE_StaleShardVersion));
- errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
- auto extraInfo = _status.extraInfo();
- invariant(extraInfo);
- BSONObjBuilder extraInfoBuilder(errBuilder.subobjStart(WriteError::kErrInfoFieldName));
- extraInfo->serialize(&extraInfoBuilder);
- } else {
- errBuilder.append(WriteError::kCodeFieldName, int32_t(_status.code()));
- errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
- if (auto extraInfo = _status.extraInfo()) {
- extraInfo->serialize(&errBuilder);
- }
+ errBuilder.append(WriteError::kCodeFieldName, int32_t(_status.code()));
+ errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
+ if (auto extraInfo = _status.extraInfo()) {
+ extraInfo->serialize(&errBuilder);
}
return errBuilder.obj();
diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
index a0e9bd5e572..a8ca2a48896 100644
--- a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
+++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/pipeline/aggregation_context_fixture.h"
#include "mongo/db/pipeline/process_interface/standalone_process_interface.h"
#include "mongo/unittest/unittest.h"
@@ -67,7 +65,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
FailsToEnsureFieldsUniqueIfTargetCollectionVersionIsSpecifiedOnMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp(1, 1)));
+ boost::make_optional(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0}));
auto processInterface = makeProcessInterface();
// Test that 'targetCollectionVersion' is not accepted if not from mongos.
@@ -90,7 +88,7 @@ TEST_F(ProcessInterfaceStandaloneTest,
TEST_F(ProcessInterfaceStandaloneTest, FailsToEnsureFieldsUniqueIfJoinFieldsAreNotSentFromMongos) {
auto expCtx = getExpCtx();
auto targetCollectionVersion =
- boost::make_optional(ChunkVersion(0, 0, OID::gen(), Timestamp(1, 1)));
+ boost::make_optional(ChunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0}));
auto processInterface = makeProcessInterface();
expCtx->fromMongos = true;
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 79863fc7f14..a8d15b8dbbe 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/exec/document_value/document_value_test_util.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/pipeline/document_source_match.h"
@@ -163,10 +161,12 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
onCommand([&](const executor::RemoteCommandRequest& request) {
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries.
@@ -175,7 +175,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
const Timestamp timestamp(1, 1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -246,10 +246,12 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
OID epoch{OID::gen()};
Timestamp timestamp{1, 0};
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution as if a chunk [0, 10] was
@@ -259,7 +261,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
const Timestamp timestamp(1, 1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
@@ -337,23 +339,27 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
Timestamp timestamp{1, 1};
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(Status{
- StaleConfigInfo(
- kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
- "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(
+ Status{StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution so that all chunks are on
// the same shard.
const UUID uuid = UUID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(
cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), shardKeyPattern.getKeyPattern().globalMax()},
@@ -412,7 +418,7 @@ TEST_F(ShardedUnionTest, IncorporatesViewDefinitionAndRetriesWhenViewErrorReceiv
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
const Timestamp timestamp(1, 1);
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index 9381e0a2da6..aa7b056ae34 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/oid.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balance_stats.h"
@@ -79,7 +77,7 @@ private:
const Timestamp _timestamp{Timestamp(1, 1)};
const ShardId _shardPrimary{"dummyShardPrimary"};
const DatabaseVersion _dbVersion{UUID::gen(), _timestamp};
- ChunkVersion _nextVersion{1, 0, _epoch, _timestamp};
+ ChunkVersion _nextVersion{{_epoch, _timestamp}, {1, 0}};
};
TEST_F(BalanceStatsTest, SingleChunkNoZones) {
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index bf22d67619e..8b50d3d002f 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/commands.h"
#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/db/s/balancer/cluster_statistics_impl.h"
@@ -133,7 +131,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -192,7 +190,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
setUpCollection(kNamespace, collUUID, version);
@@ -251,7 +249,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
@@ -302,7 +300,7 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) {
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
- ChunkVersion version(2, 0, OID::gen(), Timestamp(42));
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0});
setUpDatabase(kDbName, kShardId0);
TypeCollectionTimeseriesFields tsFields;
diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
index 678e5f63f9f..72e86413aa9 100644
--- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler.h"
#include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h"
@@ -65,7 +63,7 @@ public:
chunk.setMax(BSON("x" << min + 10));
chunk.setJumbo(false);
chunk.setShard(shardId);
- chunk.setVersion(ChunkVersion(1, 1, OID::gen(), Timestamp(10)));
+ chunk.setVersion(ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}));
return chunk;
}
@@ -76,7 +74,7 @@ public:
kUuid,
BSON("x" << min),
BSON("x" << min + 10),
- ChunkVersion(1, 1, OID::gen(), Timestamp(10)),
+ ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1}),
MoveChunkRequest::ForceJumbo::kDoNotForce);
}
@@ -234,7 +232,7 @@ TEST_F(BalancerCommandsSchedulerTest, SuccessfulMergeChunkCommand) {
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse =
_scheduler.requestMergeChunks(operationContext(), kNss, kShardId0, range, version);
ASSERT_OK(futureResponse.getNoThrow());
@@ -246,7 +244,7 @@ TEST_F(BalancerCommandsSchedulerTest, MergeChunkNonexistentShard) {
auto remoteResponsesFuture = setRemoteResponses();
_scheduler.start(operationContext(), getMigrationRecoveryDefaultValues());
ChunkRange range(BSON("x" << 0), BSON("x" << 20));
- ChunkVersion version(1, 1, OID::gen(), Timestamp(10));
+ ChunkVersion version({OID::gen(), Timestamp(10)}, {1, 1});
auto futureResponse = _scheduler.requestMergeChunks(
operationContext(), kNss, ShardId("nonexistent"), range, version);
auto shardNotFoundError = Status{ErrorCodes::ShardNotFound, "Shard nonexistent not found"};
diff --git a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
index 607e57dab44..94b6e874cbf 100644
--- a/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
+++ b/src/mongo/db/s/balancer/cluster_chunks_resize_policy_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/s/balancer/cluster_chunks_resize_policy_impl.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
+
namespace mongo {
namespace {
@@ -37,7 +38,7 @@ class ClusterChunksResizePolicyTest : public ConfigServerTestFixture {
protected:
const NamespaceString kNss{"testDb.testColl"};
const UUID kUuid = UUID::gen();
- const ChunkVersion kCollectionVersion = ChunkVersion(1, 1, OID::gen(), Timestamp(10));
+ const ChunkVersion kCollectionVersion = ChunkVersion({OID::gen(), Timestamp(10)}, {1, 1});
const ShardId kShardId0 = ShardId("shard0");
const ShardId kShardId1 = ShardId("shard1");
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index f605983fe2c..610e150c963 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -27,12 +27,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/jsobj.h"
#include "mongo/db/s/balancer/type_migration.h"
#include "mongo/s/catalog/type_chunk.h"
-
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -48,7 +45,7 @@ const ShardId kToShard("shard0001");
const bool kWaitForDelete{true};
TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -68,7 +65,7 @@ TEST(MigrationTypeTest, FromAndToBSONWithoutOptionalFields) {
}
TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
const auto secondaryThrottle =
MigrationSecondaryThrottleOptions::createWithWriteConcern(WriteConcernOptions(
"majority", WriteConcernOptions::SyncMode::JOURNAL, Milliseconds(60000)));
@@ -94,7 +91,7 @@ TEST(MigrationTypeTest, FromAndToBSONWitOptionalFields) {
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::min(), kMin);
@@ -111,7 +108,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
}
TEST(MigrationTypeTest, MissingRequiredMinField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -128,7 +125,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
}
TEST(MigrationTypeTest, MissingRequiredMaxField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -145,7 +142,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
}
TEST(MigrationTypeTest, MissingRequiredFromShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
@@ -162,7 +159,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
}
TEST(MigrationTypeTest, MissingRequiredToShardField) {
- const ChunkVersion version(1, 2, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 2});
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index e2e3081b436..74dc6a9e655 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
@@ -79,7 +77,7 @@ protected:
boost::none,
true,
[&] {
- ChunkVersion version(1, 0, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunk1(uuid,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c9ed5d77272..4084fe8e9e2 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/base/status.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/s/collection_metadata.h"
@@ -62,7 +60,7 @@ CollectionMetadata makeCollectionMetadataImpl(
std::vector<ChunkType> allChunks;
auto nextMinKey = shardKeyPattern.globalMin();
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
for (const auto& myNextChunk : thisShardsChunks) {
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 1be2dd486fb..0b2ab1b0474 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/initial_split_policy.h"
#include "mongo/client/read_preference.h"
@@ -50,7 +47,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -73,17 +69,11 @@ void appendChunk(const SplitPolicyParams& params,
const BSONObj& min,
const BSONObj& max,
ChunkVersion* version,
- const Timestamp& creationTimestamp,
const ShardId& shardId,
std::vector<ChunkType>* chunks) {
- chunks->emplace_back(
- params.collectionUUID,
- ChunkRange(min, max),
- ChunkVersion(
- version->majorVersion(), version->minorVersion(), version->epoch(), creationTimestamp),
- shardId);
+ chunks->emplace_back(params.collectionUUID, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setHistory({ChunkHistory(creationTimestamp, shardId)});
+ chunk.setHistory({ChunkHistory(version->getTimestamp(), shardId)});
version->incMinor();
}
@@ -238,7 +228,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
finalSplitPoints.push_back(splitPoint);
}
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern(shardKeyPattern.getKeyPattern());
std::vector<ChunkType> chunks;
@@ -254,7 +244,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::generateShardColle
? params.primaryShardId
: allShardIds[(i / numContiguousChunksPerShard) % allShardIds.size()];
- appendChunk(params, min, max, &version, validAfter, shardId, &chunks);
+ appendChunk(params, min, max, &version, shardId, &chunks);
}
return {std::move(chunks)};
@@ -327,14 +317,13 @@ InitialSplitPolicy::ShardCollectionConfig SingleChunkOnPrimarySplitPolicy::creat
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
const auto& keyPattern = shardKeyPattern.getKeyPattern();
std::vector<ChunkType> chunks;
appendChunk(params,
keyPattern.globalMin(),
keyPattern.globalMax(),
&version,
- validAfter,
params.primaryShardId,
&chunks);
@@ -421,19 +410,14 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
return shardIds[indx++ % shardIds.size()];
};
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
auto lastChunkMax = keyPattern.globalMin();
std::vector<ChunkType> chunks;
for (const auto& tag : _tags) {
// Create a chunk for the hole [lastChunkMax, tag.getMinKey)
if (tag.getMinKey().woCompare(lastChunkMax) > 0) {
- appendChunk(params,
- lastChunkMax,
- tag.getMinKey(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, tag.getMinKey(), &version, nextShardIdForHole(), &chunks);
}
// Create chunk for the actual tag - [tag.getMinKey, tag.getMaxKey)
const auto it = tagToShards.find(tag.getTag());
@@ -470,7 +454,7 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
const BSONObj max = (splitPointIdx == splitInfo.splitPoints.size())
? tag.getMaxKey()
: splitInfo.splitPoints[splitPointIdx];
- appendChunk(params, min, max, &version, validAfter, targetShard, &chunks);
+ appendChunk(params, min, max, &version, targetShard, &chunks);
}
}
lastChunkMax = tag.getMaxKey();
@@ -478,13 +462,8 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi
// Create a chunk for the hole [lastChunkMax, MaxKey]
if (lastChunkMax.woCompare(keyPattern.globalMax()) < 0) {
- appendChunk(params,
- lastChunkMax,
- keyPattern.globalMax(),
- &version,
- validAfter,
- nextShardIdForHole(),
- &chunks);
+ appendChunk(
+ params, lastChunkMax, keyPattern.globalMax(), &version, nextShardIdForHole(), &chunks);
}
return {std::move(chunks)};
@@ -765,13 +744,13 @@ InitialSplitPolicy::ShardCollectionConfig ReshardingSplitPolicy::createFirstChun
const auto currentTime = VectorClock::get(opCtx)->getTime();
const auto validAfter = currentTime.clusterTime().asTimestamp();
- ChunkVersion version(1, 0, OID::gen(), validAfter);
+ ChunkVersion version({OID::gen(), validAfter}, {1, 0});
splitPoints.insert(keyPattern.globalMax());
for (const auto& splitPoint : splitPoints) {
auto bestShard = selectBestShard(
chunkDistribution, zoneInfo, zoneToShardMap, {lastChunkMax, splitPoint});
- appendChunk(params, lastChunkMax, splitPoint, &version, validAfter, bestShard, &chunks);
+ appendChunk(params, lastChunkMax, splitPoint, &version, bestShard, &chunks);
lastChunkMax = splitPoint;
chunkDistribution[bestShard]++;
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 2eea0b6905f..9fc9a5576d0 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/json.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/initial_split_policy.h"
@@ -40,7 +37,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -208,7 +204,7 @@ public:
std::vector<ChunkType> chunks;
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
- ChunkVersion version(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 0});
ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 762961eaac3..9f883997a3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/read_preference.h"
@@ -72,7 +70,7 @@ protected:
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- chunk.setVersion({12, 7, epoch, timestamp});
+ chunk.setVersion(ChunkVersion({epoch, timestamp}, {12, 7}));
chunk.setShard(_shardName);
chunk.setMin(jumboChunk().getMin());
chunk.setMax(jumboChunk().getMax());
@@ -81,7 +79,7 @@ protected:
ChunkType otherChunk;
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- otherChunk.setVersion({14, 7, epoch, timestamp});
+ otherChunk.setVersion(ChunkVersion({epoch, timestamp}, {14, 7}));
otherChunk.setShard(_shardName);
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
@@ -107,7 +105,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
operationContext(), collUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
auto chunkVersion = chunkDoc.getVersion();
- ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkVersion);
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {15, 0}), chunkVersion);
};
test(_nss2, Timestamp(42));
@@ -125,7 +123,7 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
auto chunkDoc = uassertStatusOK(getChunkDoc(
operationContext(), collUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
+ ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {14, 7}), chunkDoc.getVersion());
};
test(_nss2, Timestamp(42));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 20e8b2ecc6a..8921d0c2e8b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
@@ -95,7 +93,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
const auto requestedChunkType =
generateChunkType(_nss,
_collUuid,
- ChunkVersion(10, 2, OID::gen(), Timestamp(1, 1)),
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {10, 2}),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
@@ -112,12 +110,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Min key is different.
@@ -140,12 +139,13 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
// Max key is different.
@@ -169,20 +169,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -195,8 +197,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(EnsureChunkVersionIsGreaterThanTest,
@@ -204,20 +206,22 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(20, 3, collEpoch, collTimestamp),
- ShardId("shard0001"),
- BSON("a" << 11),
- BSON("a" << 20));
+ const auto highestChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {20, 3}),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
@@ -230,8 +234,8 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
assertChunkVersionWasBumpedTo(
existingChunkType,
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
- ChunkVersion(
- highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
+ ChunkVersion({collEpoch, collTimestamp},
+ {highestChunkType.getVersion().majorVersion() + 1, 0}));
}
TEST_F(
@@ -240,15 +244,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
@@ -269,15 +274,16 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(_nss,
- _collUuid,
- ChunkVersion(10, 2, collEpoch, collTimestamp),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
+ const auto requestedChunkType =
+ generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion({collEpoch, collTimestamp}, {10, 2}),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ existingChunkType.setVersion(ChunkVersion({collEpoch, collTimestamp}, {11, 1}));
setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 15ae1c5eb5c..9d7e68c9a93 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/client/read_preference.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
@@ -85,7 +83,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -126,10 +124,9 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
- origVersion.minorVersion() + 1,
- origVersion.epoch(),
- origVersion.getTimestamp());
+ auto expectedShardVersion =
+ ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 1});
ASSERT_EQ(expectedShardVersion, shardVersion);
@@ -170,7 +167,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -251,7 +248,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setName(OID::gen());
otherChunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -273,7 +270,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ChunkRange rangeToBeMerged(chunk.getMin(), chunk2.getMax());
// Set up other chunk with competing version
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
otherChunk.setVersion(competingVersion);
otherChunk.setShard(_shardId);
otherChunk.setMin(BSON("a" << 10));
@@ -334,7 +331,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(shardId);
@@ -415,7 +412,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
// Construct chunk to be merged
@@ -457,7 +454,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(_shardId);
@@ -503,7 +500,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
ChunkRange rangeToBeMerged(chunkMin, chunkMax);
// Store a chunk that matches the range that will be requested
- auto mergedVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto mergedVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
mergedVersion.incMinor();
ChunkType mergedChunk;
mergedChunk.setVersion(mergedVersion);
@@ -559,7 +556,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
chunk1.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk1.setVersion(origVersion);
chunk1.setShard(_shardId);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index bcb5557ff53..9b9e48cfe0b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -80,7 +80,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -112,8 +112,9 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_EQ(collVersion, shardVersion);
// Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp);
+ auto expectedShardVersion =
+ ChunkVersion({collEpoch, collTimestamp},
+ {origVersion.majorVersion(), origVersion.minorVersion() + 2});
ASSERT_EQ(expectedShardVersion, shardVersion);
ASSERT_EQ(shardVersion, collVersion);
@@ -163,7 +164,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -255,7 +256,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setCollectionUUID(collUuid);
// set up first chunk
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 2});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -269,7 +270,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
splitPoints.push_back(chunkSplitPoint);
// set up second chunk (chunk2)
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ auto competingVersion = ChunkVersion({collEpoch, collTimestamp}, {2, 1});
chunk2.setVersion(competingVersion);
chunk2.setShard(ShardId(_shardName));
chunk2.setMin(BSON("a" << 10));
@@ -323,7 +324,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -361,7 +362,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -397,7 +398,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -433,7 +434,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -469,7 +470,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -506,7 +507,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -542,7 +543,7 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
ChunkType chunk;
chunk.setCollectionUUID(UUID::gen());
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto origVersion = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(origVersion);
chunk.setShard(ShardId(_shardName));
@@ -587,7 +588,7 @@ TEST_F(SplitChunkTest, CantCommitSplitFromChunkSplitterDuringDefragmentation) {
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUuid);
- auto version = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ auto version = ChunkVersion({collEpoch, collTimestamp}, {1, 0});
chunk.setVersion(version);
chunk.setShard(ShardId(_shardName));
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2f39ef09147..a842e4cfe03 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include "mongo/bson/bsonobjbuilder.h"
@@ -93,7 +91,7 @@ protected:
boost::none,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{uuid, range, ChunkVersion(1, 0, epoch, Timestamp(1, 1)), kOtherShard}});
+ {ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen(), Timestamp(1, 1)),
diff --git a/src/mongo/db/s/operation_sharding_state_test.cpp b/src/mongo/db/s/operation_sharding_state_test.cpp
index 0c4732b51ab..9c275398f85 100644
--- a/src/mongo/db/s/operation_sharding_state_test.cpp
+++ b/src/mongo/db/s/operation_sharding_state_test.cpp
@@ -47,7 +47,7 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleDbVersion) {
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleShardVersion) {
- ChunkVersion shardVersion(1, 0, OID::gen(), Timestamp(1, 0));
+ ChunkVersion shardVersion({OID::gen(), Timestamp(1, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, shardVersion, boost::none);
auto& oss = OperationShardingState::get(operationContext());
@@ -58,13 +58,13 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
auto& oss = OperationShardingState::get(operationContext());
{
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(
operationContext(), kNss, shardVersion1, boost::none);
ASSERT_EQ(shardVersion1, *oss.getShardVersion(kNss));
}
{
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole2(
operationContext(), kNss, shardVersion2, boost::none);
ASSERT_EQ(shardVersion2, *oss.getShardVersion(kNss));
@@ -72,8 +72,8 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleChangeShardVersionSameNames
}
TEST_F(OperationShardingStateTest, ScopedSetShardRoleRecursiveShardVersionDifferentNamespaces) {
- ChunkVersion shardVersion1(1, 0, OID::gen(), Timestamp(10, 0));
- ChunkVersion shardVersion2(1, 0, OID::gen(), Timestamp(20, 0));
+ ChunkVersion shardVersion1({OID::gen(), Timestamp(10, 0)}, {1, 0});
+ ChunkVersion shardVersion2({OID::gen(), Timestamp(20, 0)}, {1, 0});
ScopedSetShardRole scopedSetShardRole1(operationContext(), kNss, shardVersion1, boost::none);
ScopedSetShardRole scopedSetShardRole2(
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index 52c52654e89..1fc380093bf 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional.hpp>
#include <functional>
@@ -59,7 +56,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -415,7 +411,7 @@ public:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index d02c0babe27..f5f588ac948 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/persistent_task_store.h"
#include "mongo/db/query/collation/collator_factory_mock.h"
@@ -50,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -78,7 +71,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
index bb47fe20b83..e5bd8defdbd 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp
@@ -111,8 +111,10 @@ protected:
const OID& epoch,
const ShardId& shardThatChunkExistsOn) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
- auto chunk = ChunkType(
- uuid, std::move(range), ChunkVersion(1, 0, epoch, timestamp), shardThatChunkExistsOn);
+ auto chunk = ChunkType(uuid,
+ std::move(range),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ shardThatChunkExistsOn);
ChunkManager cm(kThisShard.getShardId(),
DatabaseVersion(uuid, timestamp),
makeStandaloneRoutingTableHistory(
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index a9a60cd9aa0..9c09f5ebcf0 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -27,12 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
-#include <memory>
-#include <vector>
-
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog_raii.h"
@@ -289,16 +283,16 @@ private:
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY),
BSON(_currentShardKey << -std::numeric_limits<double>::infinity())},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << -std::numeric_limits<double>::infinity()),
BSON(_currentShardKey << 0)},
- ChunkVersion(100, 1, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 1}),
_otherDonorId},
ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << 0), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 2, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 2}),
_myDonorId}};
return makeChunkManager(
@@ -311,7 +305,7 @@ private:
std::vector<ChunkType> chunks = {
ChunkType{outputUuid,
ChunkRange{BSON(_newShardKey << MINKEY), BSON(_newShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_myDonorId}};
return makeChunkManager(
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index b72f0ad34e8..4e6a5489f71 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -83,7 +83,7 @@ public:
std::vector<ChunkType> chunks = {ChunkType{
_sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
- ChunkVersion(100, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 0}),
_someDonorId}};
auto rt = RoutingTableHistory::makeNew(_sourceNss,
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index d7d8386d10b..e52a5e28d1a 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -136,12 +136,11 @@ StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
entry.getRefreshing() ? *entry.getRefreshing() : true,
entry.getLastRefreshedCollectionVersion()
? *entry.getLastRefreshedCollectionVersion()
- : ChunkVersion(0, 0, entry.getEpoch(), entry.getTimestamp())};
+ : ChunkVersion({entry.getEpoch(), entry.getTimestamp()}, {0, 0})};
}
StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCtx,
const NamespaceString& nss) {
-
try {
DBDirectClient client(opCtx);
FindCommandRequest findRequest{NamespaceString::kShardConfigCollectionsNamespace};
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 6bad5d66ac1..af35cf373e8 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -27,14 +27,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/shard_metadata_util.h"
-
-#include "mongo/base/status.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/s/shard_metadata_util.h"
#include "mongo/db/s/shard_server_test_fixture.h"
#include "mongo/db/s/type_shard_collection.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -159,7 +155,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
}
}
- ChunkVersion maxCollVersion{0, 0, OID::gen(), Timestamp(1, 1)};
+ ChunkVersion maxCollVersion{{OID::gen(), Timestamp(1, 1)}, {0, 0}};
const KeyPattern keyPattern{BSON("a" << 1)};
const BSONObj defaultCollation{BSON("locale"
<< "fr_CA")};
@@ -216,7 +212,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
ASSERT(state.generation.isSameCollection(maxCollVersion));
ASSERT_EQUALS(state.refreshing, true);
ASSERT_EQUALS(state.lastRefreshedCollectionVersion,
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
// Signal refresh finish
ASSERT_OK(unsetPersistedRefreshFlags(operationContext(), kNss, maxCollVersion));
@@ -235,7 +231,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
// read all the chunks
QueryAndSort allChunkDiff = createShardChunkDiffQuery(
- ChunkVersion(0, 0, maxCollVersion.epoch(), maxCollVersion.getTimestamp()));
+ ChunkVersion({maxCollVersion.epoch(), maxCollVersion.getTimestamp()}, {0, 0}));
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
allChunkDiff.query,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 9f2f1ddf8d0..a111b9bf592 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
@@ -203,7 +201,7 @@ CollectionType ShardServerCatalogCacheLoaderTest::makeCollectionType(
std::pair<CollectionType, vector<ChunkType>>
ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -371,7 +369,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindNewEpoch)
// Then refresh again and find that the collection has been dropped and recreated.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionTypeWithNewEpoch);
@@ -398,7 +396,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
// Then refresh again and retrieve chunks from the config server that have mixed epoches, like
// as if the chunks read yielded around a drop and recreate of the collection.
- ChunkVersion collVersionWithNewEpoch(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion collVersionWithNewEpoch({OID::gen(), Timestamp(2, 0)}, {1, 0});
CollectionType collectionTypeWithNewEpoch = makeCollectionType(collVersionWithNewEpoch);
vector<ChunkType> chunksWithNewEpoch = makeFiveChunks(collVersionWithNewEpoch);
vector<ChunkType> mixedChunks;
@@ -441,7 +439,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
}
TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedOnSSCCL) {
- ChunkVersion collectionVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
@@ -483,7 +481,7 @@ TEST_F(ShardServerCatalogCacheLoaderTest, TimeseriesFieldsAreProperlyPropagatedO
}
void ShardServerCatalogCacheLoaderTest::refreshCollectionEpochOnRemoteLoader() {
- ChunkVersion collectionVersion(1, 2, OID::gen(), Timestamp(1, 1));
+ ChunkVersion collectionVersion({OID::gen(), Timestamp(1, 1)}, {1, 2});
CollectionType collectionType = makeCollectionType(collectionVersion);
vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
_remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index fd4e3905980..2ff3925c53e 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/logical_session_cache_noop.h"
#include "mongo/db/namespace_string.h"
@@ -47,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -119,7 +115,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const int nChunks = 10;
std::vector<ChunkType> chunks;
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, fromEpoch, collTimestamp);
+ ChunkVersion chunkVersion({fromEpoch, collTimestamp}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(collUUID);
@@ -138,7 +134,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
const auto toEpoch = OID::gen();
const auto toUUID = UUID::gen();
for (int i = 0; i < nChunks; i++) {
- ChunkVersion chunkVersion(1, i, toEpoch, Timestamp(2));
+ ChunkVersion chunkVersion({toEpoch, Timestamp(2)}, {1, uint32_t(i)});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(toUUID);
@@ -215,7 +211,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsAreMet) {
opCtx, false /* sourceIsSharded */, kToNss, false /* dropTarget */);
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
@@ -256,7 +252,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsTargetCollectionExists) {
auto opCtx = operationContext();
// Initialize a chunk
- ChunkVersion chunkVersion(1, 1, OID::gen(), Timestamp(2, 1));
+ ChunkVersion chunkVersion({OID::gen(), Timestamp(2, 1)}, {1, 1});
ChunkType chunk;
chunk.setName(OID::gen());
chunk.setCollectionUUID(UUID::gen());
diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp
index 7a47c6eed21..6d20ad82215 100644
--- a/src/mongo/db/s/sharding_write_router_bm.cpp
+++ b/src/mongo/db/s/sharding_write_router_bm.cpp
@@ -103,7 +103,7 @@ std::pair<std::vector<mongo::ChunkType>, mongo::ChunkManager> createChunks(
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collIdentifier,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, collTimestamp},
+ ChunkVersion({collEpoch, collTimestamp}, {i + 1, 0}),
pessimalShardSelector(i, nShards, nChunks));
}
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index 528e8ba4876..1bcfb9c8bc8 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -27,10 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include "mongo/unittest/unittest.h"
-
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -39,6 +35,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/sharding_router_test_fixture.h"
+#include "mongo/unittest/unittest.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
@@ -200,7 +197,7 @@ protected:
Timestamp timestamp{1, 0};
return StaleConfigInfo(
NamespaceString("Foo.Bar"),
- ChunkVersion(1, 0, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
boost::none,
ShardId{"dummy"});
}(),
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 8ce3d377491..7ed4cc739cc 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -27,13 +27,8 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/s/catalog/type_chunk.h"
-#include <cstring>
-
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
@@ -46,7 +41,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
const NamespaceString ChunkType::ConfigNS("config.chunks");
@@ -296,7 +290,7 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSON(const BSONObj& source,
if (versionElem.type() == bsonTimestamp || versionElem.type() == Date) {
auto chunkLastmod = Timestamp(versionElem._numberLong());
chunk._version =
- ChunkVersion(chunkLastmod.getSecs(), chunkLastmod.getInc(), epoch, timestamp);
+ ChunkVersion({epoch, timestamp}, {chunkLastmod.getSecs(), chunkLastmod.getInc()});
} else {
return {ErrorCodes::BadValue,
str::stream() << "The field " << ChunkType::lastmod() << " cannot be parsed."};
@@ -381,7 +375,7 @@ StatusWith<ChunkType> ChunkType::parseFromShardBSON(const BSONObj& source,
if (lastmodElem.type() == bsonTimestamp || lastmodElem.type() == Date) {
auto chunkLastmod = Timestamp(lastmodElem._numberLong());
chunk._version =
- ChunkVersion(chunkLastmod.getSecs(), chunkLastmod.getInc(), epoch, timestamp);
+ ChunkVersion({epoch, timestamp}, {chunkLastmod.getSecs(), chunkLastmod.getInc()});
} else {
return {ErrorCodes::NoSuchKey,
str::stream() << "Expected field " << ChunkType::lastmod() << " not found."};
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index bc8d012f290..18c199b69ea 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -47,7 +47,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -81,7 +81,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
TEST(ChunkType, MissingShardRequiredFields) {
const OID epoch = OID::gen();
const Timestamp timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, epoch, timestamp);
+ ChunkVersion chunkVersion({epoch, timestamp}, {1, 2});
const auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj objModMin =
@@ -109,15 +109,16 @@ TEST(ChunkType, MissingShardRequiredFields) {
}
TEST(ChunkType, ToFromShardBSON) {
- const OID epoch = OID::gen();
- const Timestamp timestamp(1, 1);
- ChunkVersion chunkVersion(1, 2, epoch, timestamp);
+ const OID collEpoch = OID::gen();
+ const Timestamp collTimestamp(1, 1);
+
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
auto lastmod = Timestamp(chunkVersion.toLong());
BSONObj obj = BSON(ChunkType::minShardID(kMin)
<< ChunkType::max(kMax) << ChunkType::shard(kShard.toString()) << "lastmod"
<< lastmod);
- ChunkType shardChunk = assertGet(ChunkType::parseFromShardBSON(obj, epoch, timestamp));
+ ChunkType shardChunk = assertGet(ChunkType::parseFromShardBSON(obj, collEpoch, collTimestamp));
ASSERT_BSONOBJ_EQ(obj, shardChunk.toShardBSON());
@@ -132,7 +133,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj = BSON(
ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10 << "b" << 10))
@@ -149,7 +150,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
@@ -166,7 +167,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 20))
@@ -182,7 +183,7 @@ TEST(ChunkType, ToFromConfigBSON) {
const auto collTimestamp = Timestamp(1);
const auto chunkID = OID::gen();
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj obj = BSON(ChunkType::name(chunkID)
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
@@ -217,7 +218,7 @@ TEST(ChunkType, BothNsAndUUID) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -235,7 +236,7 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
const auto collEpoch = OID::gen();
const auto collTimestamp = Timestamp(1);
- ChunkVersion chunkVersion(1, 2, collEpoch, collTimestamp);
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
BSONObj objModNS = BSON(
ChunkType::name(OID::gen())
@@ -249,7 +250,10 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
}
TEST(ChunkType, ParseFromNetworkRequest) {
- ChunkVersion chunkVersion(1, 2, OID::gen(), Timestamp(1, 0));
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = Timestamp(1, 0);
+
+ ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
auto chunk = assertGet(ChunkType::parseFromNetworkRequest(
BSON(ChunkType::name(OID::gen())
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index 8df10d20a43..4b44f5693de 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/concurrency/locker_noop.h"
#include "mongo/db/pipeline/aggregation_request_helper.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -42,7 +39,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace {
@@ -115,7 +111,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoad) {
expectGetDatabase();
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(reshardingUUID,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
@@ -328,7 +324,7 @@ TEST_F(CatalogCacheRefreshTest, ChunksBSONCorrupted) {
const auto chunk1 =
ChunkType(coll.getUuid(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
- ChunkVersion(1, 0, epoch, Timestamp(1, 1)),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
{"0"});
return std::vector<BSONObj>{/* collection */
coll.toBSON(),
@@ -359,7 +355,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithLowestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -415,7 +411,7 @@ TEST_F(CatalogCacheRefreshTest, FullLoadMissingChunkWithHighestVersion) {
expectGetDatabase();
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -473,7 +469,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithLowestVersion) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the lowest version.
@@ -531,7 +527,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadMissingChunkWithHighestVersion) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
const auto incompleteChunks = [&]() {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
// Chunk from (MinKey, -100) is missing (as if someone is dropping the collection
// concurrently) and has the higest version.
@@ -621,7 +617,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
// recreated collection.
ChunkType chunk3(coll.getUuid(),
{BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
- ChunkVersion(5, 2, newEpoch, newTimestamp),
+ ChunkVersion({newEpoch, newTimestamp}, {5, 2}),
{"1"});
chunk3.setName(OID::gen());
@@ -631,7 +627,7 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
});
// On the second retry attempt, return the correct set of chunks from the recreated collection
- ChunkVersion newVersion(5, 0, newEpoch, newTimestamp);
+ ChunkVersion newVersion({newEpoch, newTimestamp}, {5, 0});
onFindCommand([&](const RemoteCommandRequest& request) {
const auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
const auto aggRequest = unittest::assertGet(
@@ -676,9 +672,9 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
ASSERT(cm.isSharded());
ASSERT_EQ(3, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(5, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {5, 1}),
cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(5, 2, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {5, 2}),
cm.getVersion({"1"}));
}
@@ -693,7 +689,7 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
auto future = scheduleRoutingInfoIncrementalRefresh(kNss);
ChunkVersion oldVersion = initialRoutingInfo.getVersion();
- ChunkVersion newVersion(1, 0, OID::gen(), Timestamp(2));
+ ChunkVersion newVersion({OID::gen(), Timestamp(2)}, {1, 0});
const UUID uuid = initialRoutingInfo.getUUID();
// Return collection with a different epoch and a set of chunks, which represent a split
@@ -736,9 +732,9 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
ASSERT(cm.isSharded());
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(newVersion, cm.getVersion());
- ASSERT_EQ(ChunkVersion(1, 0, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {1, 0}),
cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(1, 1, newVersion.epoch(), newVersion.getTimestamp()),
+ ASSERT_EQ(ChunkVersion({newVersion.epoch(), newVersion.getTimestamp()}, {1, 1}),
cm.getVersion({"1"}));
}
@@ -798,7 +794,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
ASSERT_EQ(2, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
ASSERT_EQ(version, cm.getVersion({"0"}));
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"1"}));
+ ASSERT_EQ(ChunkVersion({version.epoch(), version.getTimestamp()}, {0, 0}),
+ cm.getVersion({"1"}));
}
TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveWithReshardingFieldsAdded) {
@@ -877,7 +874,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterMoveLastChunkWithReshardingF
ASSERT(cm.isSharded());
ASSERT_EQ(1, cm.numChunks());
ASSERT_EQ(version, cm.getVersion());
- ASSERT_EQ(ChunkVersion(0, 0, version.epoch(), version.getTimestamp()), cm.getVersion({"0"}));
+ ASSERT_EQ(ChunkVersion({version.epoch(), version.getTimestamp()}, {0, 0}),
+ cm.getVersion({"0"}));
ASSERT_EQ(version, cm.getVersion({"1"}));
ASSERT(boost::none == cm.getReshardingFields());
}
diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp
index bb22d6c0915..b41aafde12c 100644
--- a/src/mongo/s/catalog_cache_test.cpp
+++ b/src/mongo/s/catalog_cache_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <boost/optional/optional_io.hpp>
#include "mongo/s/catalog/type_database_gen.h"
@@ -264,7 +261,7 @@ TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -275,7 +272,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -288,9 +285,9 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) {
TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
- const auto cachedCollVersion = ChunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const auto wantedCollVersion =
- ChunkVersion(2, 0, cachedCollVersion.epoch(), cachedCollVersion.getTimestamp());
+ ChunkVersion({cachedCollVersion.epoch(), cachedCollVersion.getTimestamp()}, {2, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
loadCollection(cachedCollVersion);
@@ -304,7 +301,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) {
TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto epoch = OID::gen();
- const auto version = ChunkVersion(1, 0, epoch, Timestamp(42));
+ const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
@@ -360,7 +357,7 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) {
TEST_F(CatalogCacheTest, LookupCollectionWithInvalidOptions) {
const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1));
const auto epoch = OID::gen();
- const auto version = ChunkVersion(1, 0, epoch, Timestamp(42));
+ const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0});
loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)});
diff --git a/src/mongo/s/catalog_cache_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp
index 6e66a30d6b2..b83657c246e 100644
--- a/src/mongo/s/catalog_cache_test_fixture.cpp
+++ b/src/mongo/s/catalog_cache_test_fixture.cpp
@@ -27,14 +27,8 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/catalog_cache_test_fixture.h"
-#include <memory>
-#include <set>
-#include <vector>
-
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/client.h"
@@ -130,7 +124,7 @@ ChunkManager CatalogCacheTestFixture::makeChunkManager(
bool unique,
const std::vector<BSONObj>& splitPoints,
boost::optional<ReshardingFields> reshardingFields) {
- ChunkVersion version(1, 0, OID::gen(), Timestamp(42) /* timestamp */);
+ ChunkVersion version({OID::gen(), Timestamp(42)}, {1, 0});
DatabaseType db(nss.db().toString(), {"0"}, DatabaseVersion(UUID::gen(), Timestamp()));
@@ -270,7 +264,7 @@ ChunkManager CatalogCacheTestFixture::loadRoutingTableWithTwoChunksAndTwoShardsI
CollectionType collType(
nss, epoch, timestamp, Date_t::now(), uuid, shardKeyPattern.toBSON());
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
ChunkType chunk1(
uuid, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index c7a95b8020c..936175610f7 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include <set>
#include "mongo/db/catalog/catalog_test_fixture.h"
@@ -42,7 +39,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
-
namespace mongo {
namespace {
@@ -506,7 +502,7 @@ TEST_F(ChunkManagerQueryTest, SimpleCollationNumbersMultiShard) {
TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
const auto uuid = UUID::gen();
const auto epoch = OID::gen();
- ChunkVersion version(1, 0, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunk0(uuid, {BSON("x" << MINKEY), BSON("x" << 0)}, version, ShardId("0"));
chunk0.setName(OID::gen());
diff --git a/src/mongo/s/chunk_manager_refresh_bm.cpp b/src/mongo/s/chunk_manager_refresh_bm.cpp
index 3c7f3adb6b3..12253be4ab2 100644
--- a/src/mongo/s/chunk_manager_refresh_bm.cpp
+++ b/src/mongo/s/chunk_manager_refresh_bm.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include <benchmark/benchmark.h>
#include "mongo/base/init.h"
@@ -77,7 +75,7 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collUuid,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, Timestamp(1, 0)},
+ ChunkVersion({collEpoch, Timestamp(1, 0)}, {i + 1, 0}),
selectShard(i, nShards, nChunks));
}
@@ -169,7 +167,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
for (uint32_t i = 0; i < nChunks; ++i) {
chunks.emplace_back(collUuid,
getRangeForChunk(i, nChunks),
- ChunkVersion{i + 1, 0, collEpoch, Timestamp(1, 0)},
+ ChunkVersion({collEpoch, Timestamp(1, 0)}, {i + 1, 0}),
selectShard(i, nShards, nChunks));
}
diff --git a/src/mongo/s/chunk_map_test.cpp b/src/mongo/s/chunk_map_test.cpp
index 6514fc00745..88378ff53e1 100644
--- a/src/mongo/s/chunk_map_test.cpp
+++ b/src/mongo/s/chunk_map_test.cpp
@@ -27,13 +27,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/chunk_manager.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-
namespace {
const NamespaceString kNss("TestDB", "TestColl");
@@ -58,7 +55,7 @@ private:
TEST_F(ChunkMapTest, TestAddChunk) {
const OID epoch = OID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto chunk = std::make_shared<ChunkInfo>(
ChunkType{uuid(),
@@ -75,7 +72,7 @@ TEST_F(ChunkMapTest, TestAddChunk) {
TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -110,7 +107,7 @@ TEST_F(ChunkMapTest, TestEnumerateAllChunks) {
TEST_F(ChunkMapTest, TestIntersectingChunk) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
@@ -140,7 +137,7 @@ TEST_F(ChunkMapTest, TestIntersectingChunk) {
TEST_F(ChunkMapTest, TestEnumerateOverlappingChunks) {
const OID epoch = OID::gen();
ChunkMap chunkMap{epoch, Timestamp(1, 1)};
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
auto newChunkMap = chunkMap.createMerged(
{std::make_shared<ChunkInfo>(
diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp
index 2902c0e41dd..d1c595c05e8 100644
--- a/src/mongo/s/chunk_test.cpp
+++ b/src/mongo/s/chunk_test.cpp
@@ -27,14 +27,11 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/shard_id.h"
-
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -48,7 +45,7 @@ const KeyPattern kShardKeyPattern(BSON("a" << 1));
TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -65,7 +62,7 @@ TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -83,7 +80,7 @@ TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -101,7 +98,7 @@ TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
@@ -117,7 +114,7 @@ TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_MoreThanOneEntry) {
const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- ChunkVersion version{1, 0, epoch, Timestamp(1, 1)};
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0});
ChunkType chunkType(uuid,
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h
index 68f33c6b018..2a7911bfefe 100644
--- a/src/mongo/s/chunk_version.h
+++ b/src/mongo/s/chunk_version.h
@@ -124,10 +124,6 @@ public:
ChunkVersion() : ChunkVersion({OID(), Timestamp()}, {0, 0}) {}
- // TODO: Do not add any new usages of this constructor. Use the one above instead.
- ChunkVersion(uint32_t major, uint32_t minor, OID epoch, Timestamp timestamp)
- : CollectionGeneration(epoch, timestamp), CollectionPlacement(major, minor) {}
-
/**
* Indicates that the collection is not sharded.
*/
diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp
index f735bfd7ef0..7693ff55c85 100644
--- a/src/mongo/s/chunk_version_test.cpp
+++ b/src/mongo/s/chunk_version_test.cpp
@@ -40,13 +40,15 @@ TEST(ChunkVersionTest, EqualityOperators) {
OID epoch = OID::gen();
Timestamp timestamp = Timestamp(1);
- ASSERT_EQ(ChunkVersion(3, 1, epoch, Timestamp(1, 1)),
- ChunkVersion(3, 1, epoch, Timestamp(1, 1)));
- ASSERT_EQ(ChunkVersion(3, 1, OID(), timestamp), ChunkVersion(3, 1, OID(), timestamp));
-
- ASSERT_NE(ChunkVersion(3, 1, epoch, timestamp), ChunkVersion(3, 1, OID(), Timestamp(1, 1)));
- ASSERT_NE(ChunkVersion(3, 1, OID(), Timestamp(1, 1)), ChunkVersion(3, 1, epoch, timestamp));
- ASSERT_NE(ChunkVersion(4, 2, epoch, timestamp), ChunkVersion(4, 1, epoch, timestamp));
+ ASSERT_EQ(ChunkVersion({epoch, Timestamp(1, 1)}, {3, 1}),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {3, 1}));
+ ASSERT_EQ(ChunkVersion({OID(), timestamp}, {3, 1}), ChunkVersion({OID(), timestamp}, {3, 1}));
+
+ ASSERT_NE(ChunkVersion({epoch, timestamp}, {3, 1}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {3, 1}));
+ ASSERT_NE(ChunkVersion({OID(), Timestamp(1, 1)}, {3, 1}),
+ ChunkVersion({epoch, timestamp}, {3, 1}));
+ ASSERT_NE(ChunkVersion({epoch, timestamp}, {4, 2}), ChunkVersion({epoch, timestamp}, {4, 1}));
}
TEST(ChunkVersionTest, OlderThan) {
@@ -54,19 +56,23 @@ TEST(ChunkVersionTest, OlderThan) {
Timestamp timestamp(1);
Timestamp newerTimestamp(2);
- ASSERT(ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion(4, 1, epoch, timestamp)));
- ASSERT(!ChunkVersion(4, 1, epoch, timestamp).isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {4, 1})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {4, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
- ASSERT(ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion(3, 2, epoch, timestamp)));
- ASSERT(!ChunkVersion(3, 2, epoch, timestamp).isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 2})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {3, 2})
+ .isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
- ASSERT(ChunkVersion(3, 1, epoch, timestamp)
- .isOlderThan(ChunkVersion(3, 1, OID::gen(), newerTimestamp)));
- ASSERT(!ChunkVersion(3, 1, epoch, newerTimestamp)
- .isOlderThan(ChunkVersion(3, 1, OID::gen(), timestamp)));
+ ASSERT(ChunkVersion({epoch, timestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({OID::gen(), newerTimestamp}, {3, 1})));
+ ASSERT(!ChunkVersion({epoch, newerTimestamp}, {3, 1})
+ .isOlderThan(ChunkVersion({OID::gen(), timestamp}, {3, 1})));
- ASSERT(!ChunkVersion::UNSHARDED().isOlderThan(ChunkVersion(3, 1, epoch, timestamp)));
- ASSERT(!ChunkVersion(3, 1, epoch, timestamp).isOlderThan(ChunkVersion::UNSHARDED()));
+ ASSERT(!ChunkVersion::UNSHARDED().isOlderThan(ChunkVersion({epoch, timestamp}, {3, 1})));
+ ASSERT(!ChunkVersion({epoch, timestamp}, {3, 1}).isOlderThan(ChunkVersion::UNSHARDED()));
}
TEST(ChunkVersionTest, CreateWithLargeValues) {
@@ -74,7 +80,7 @@ TEST(ChunkVersionTest, CreateWithLargeValues) {
const uint32_t minorVersion = std::numeric_limits<uint32_t>::max();
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -86,7 +92,7 @@ TEST(ChunkVersionTest, ThrowsErrorIfOverflowIsAttemptedForMajorVersion) {
const uint32_t minorVersion = 0;
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
@@ -99,7 +105,7 @@ TEST(ChunkVersionTest, ThrowsErrorIfOverflowIsAttemptedForMinorVersion) {
const uint32_t minorVersion = std::numeric_limits<uint32_t>::max();
const auto epoch = OID::gen();
- ChunkVersion version(majorVersion, minorVersion, epoch, Timestamp(1, 1));
+ ChunkVersion version({epoch, Timestamp(1, 1)}, {majorVersion, minorVersion});
ASSERT_EQ(majorVersion, version.majorVersion());
ASSERT_EQ(minorVersion, version.minorVersion());
ASSERT_EQ(epoch, version.epoch());
diff --git a/src/mongo/s/comparable_chunk_version_test.cpp b/src/mongo/s/comparable_chunk_version_test.cpp
index a5d47981709..63f6ca4a59c 100644
--- a/src/mongo/s/comparable_chunk_version_test.cpp
+++ b/src/mongo/s/comparable_chunk_version_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/chunk_manager.h"
#include "mongo/unittest/unittest.h"
@@ -38,15 +36,15 @@ namespace {
TEST(ComparableChunkVersionTest, VersionsEqual) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(version1 == version2);
}
TEST(ComparableChunkVersionTest, VersionsEqualAfterCopy) {
- const ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const auto version2 = version1;
ASSERT(version1 == version2);
@@ -54,8 +52,8 @@ TEST(ComparableChunkVersionTest, VersionsEqualAfterCopy) {
TEST(ComparableChunkVersionTest, CompareDifferentTimestamps) {
- const ChunkVersion v1(2, 0, OID::gen(), Timestamp(1));
- const ChunkVersion v2(1, 0, OID::gen(), Timestamp(2));
+ const ChunkVersion v1({OID::gen(), Timestamp(1)}, {2, 0});
+ const ChunkVersion v2({OID::gen(), Timestamp(2)}, {1, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(version2 != version1);
@@ -65,9 +63,9 @@ TEST(ComparableChunkVersionTest, CompareDifferentTimestamps) {
TEST(ComparableChunkVersionTest, CompareDifferentVersionsTimestampsIgnoreSequenceNumber) {
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(
- ChunkVersion(2, 0, OID::gen(), Timestamp(2)));
+ ChunkVersion({OID::gen(), Timestamp(2)}, {2, 0}));
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(
- ChunkVersion(2, 0, OID::gen(), Timestamp(1)));
+ ChunkVersion({OID::gen(), Timestamp(1)}, {2, 0}));
ASSERT(version1 != version2);
ASSERT(version1 > version2);
ASSERT_FALSE(version1 < version2);
@@ -76,9 +74,9 @@ TEST(ComparableChunkVersionTest, CompareDifferentVersionsTimestampsIgnoreSequenc
TEST(ComparableChunkVersionTest, VersionGreaterSameTimestamps) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 2, epoch, timestamp);
- const ChunkVersion v3(2, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 2});
+ const ChunkVersion v3({epoch, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -93,9 +91,9 @@ TEST(ComparableChunkVersionTest, VersionGreaterSameTimestamps) {
TEST(ComparableChunkVersionTest, VersionLessSameTimestamps) {
const auto epoch = OID::gen();
const Timestamp timestamp(1, 1);
- const ChunkVersion v1(1, 0, epoch, timestamp);
- const ChunkVersion v2(1, 2, epoch, timestamp);
- const ChunkVersion v3(2, 0, epoch, timestamp);
+ const ChunkVersion v1({epoch, timestamp}, {1, 0});
+ const ChunkVersion v2({epoch, timestamp}, {1, 2});
+ const ChunkVersion v3({epoch, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto version2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -115,7 +113,7 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionsAreEqual) {
}
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithChunksVersion) {
- const ChunkVersion chunkVersion(1, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0});
const ComparableChunkVersion defaultVersion{};
const auto withChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
ASSERT(defaultVersion != withChunksVersion);
@@ -124,7 +122,7 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanWithCh
}
TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanNoChunksVersion) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {0, 0});
const ComparableChunkVersion defaultVersion{};
const auto noChunksVersion = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
ASSERT(defaultVersion != noChunksVersion);
@@ -143,8 +141,8 @@ TEST(ComparableChunkVersionTest, DefaultConstructedVersionIsAlwaysLessThanUnshar
TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
const auto oid = OID::gen();
- const ChunkVersion v1(0, 0, oid, Timestamp(1, 1));
- const ChunkVersion v2(0, 0, oid, Timestamp(1, 1));
+ const ChunkVersion v1({oid, Timestamp(1, 1)}, {0, 0});
+ const ChunkVersion v2({oid, Timestamp(1, 1)}, {0, 0});
const auto noChunksVersion1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto noChunksVersion2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
ASSERT(noChunksVersion1 == noChunksVersion2);
@@ -155,9 +153,9 @@ TEST(ComparableChunkVersionTest, TwoNoChunksVersionsAreTheSame) {
TEST(ComparableChunkVersionTest, NoChunksComparedBySequenceNum) {
const auto oid = OID::gen();
const Timestamp timestamp(1);
- const ChunkVersion v1(1, 0, oid, timestamp);
- const ChunkVersion v2(0, 0, oid, timestamp);
- const ChunkVersion v3(2, 0, oid, timestamp);
+ const ChunkVersion v1({oid, timestamp}, {1, 0});
+ const ChunkVersion v2({oid, timestamp}, {0, 0});
+ const ChunkVersion v3({oid, timestamp}, {2, 0});
const auto version1 = ComparableChunkVersion::makeComparableChunkVersion(v1);
const auto noChunksVersion2 = ComparableChunkVersion::makeComparableChunkVersion(v2);
const auto version3 = ComparableChunkVersion::makeComparableChunkVersion(v3);
@@ -168,7 +166,7 @@ TEST(ComparableChunkVersionTest, NoChunksComparedBySequenceNum) {
}
TEST(ComparableChunkVersionTest, NoChunksGreaterThanUnshardedBySequenceNum) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
@@ -177,7 +175,7 @@ TEST(ComparableChunkVersionTest, NoChunksGreaterThanUnshardedBySequenceNum) {
}
TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const auto unsharded =
ComparableChunkVersion::makeComparableChunkVersion(ChunkVersion::UNSHARDED());
@@ -186,7 +184,7 @@ TEST(ComparableChunkVersionTest, UnshardedGreaterThanNoChunksBySequenceNum) {
}
TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
- const ChunkVersion chunkVersion(0, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {0, 0});
const auto noChunkSV = ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
const ComparableChunkVersion defaultVersion{};
ASSERT(noChunkSV != defaultVersion);
@@ -194,7 +192,7 @@ TEST(ComparableChunkVersionTest, NoChunksGreaterThanDefault) {
}
TEST(ComparableChunkVersionTest, CompareForcedRefreshVersionVersusValidChunkVersion) {
- const ChunkVersion chunkVersion(100, 0, OID::gen(), Timestamp(1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1)}, {100, 0});
const ComparableChunkVersion defaultVersionBeforeForce;
const auto versionBeforeForce =
ComparableChunkVersion::makeComparableChunkVersion(chunkVersion);
diff --git a/src/mongo/s/query/sharded_agg_test_fixture.h b/src/mongo/s/query/sharded_agg_test_fixture.h
index d5c02d84b3c..f36ae36eabb 100644
--- a/src/mongo/s/query/sharded_agg_test_fixture.h
+++ b/src/mongo/s/query/sharded_agg_test_fixture.h
@@ -80,7 +80,7 @@ public:
const OID epoch,
const Timestamp timestamp,
std::vector<std::pair<ChunkRange, ShardId>> chunkInfos) {
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version({epoch, timestamp}, {1, 0});
std::vector<ChunkType> chunks;
for (auto&& pair : chunkInfos) {
chunks.emplace_back(uuid, pair.first, version, pair.second);
diff --git a/src/mongo/s/request_types/move_chunk_request_test.cpp b/src/mongo/s/request_types/move_chunk_request_test.cpp
index e8020086d35..688d117b5c2 100644
--- a/src/mongo/s/request_types/move_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/move_chunk_request_test.cpp
@@ -49,7 +49,7 @@ const int kMaxChunkSizeBytes = 1024;
const bool kWaitForDelete = true;
TEST(MoveChunkRequest, Roundtrip) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -81,7 +81,7 @@ TEST(MoveChunkRequest, Roundtrip) {
}
TEST(MoveChunkRequest, EqualityOperatorSameValue) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder;
MoveChunkRequest::appendAsCommand(
@@ -106,7 +106,7 @@ TEST(MoveChunkRequest, EqualityOperatorSameValue) {
}
TEST(MoveChunkRequest, EqualityOperatorDifferentValues) {
- const ChunkVersion chunkVersion(3, 1, OID::gen(), Timestamp(1, 1));
+ const ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {3, 1});
BSONObjBuilder builder1;
MoveChunkRequest::appendAsCommand(
diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp
index 9651911ee64..7c8973a7237 100644
--- a/src/mongo/s/routing_table_history_test.cpp
+++ b/src/mongo/s/routing_table_history_test.cpp
@@ -154,7 +154,7 @@ public:
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
const Timestamp timestamp(1);
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
auto initChunk =
ChunkType{uuid,
@@ -332,7 +332,7 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
const Timestamp timestamp(1);
- ChunkVersion version{1, 0, epoch, timestamp};
+ ChunkVersion version({epoch, timestamp}, {1, 0});
auto chunkAll =
ChunkType{uuid,
@@ -356,35 +356,35 @@ TEST_F(RoutingTableHistoryTest, TestSplits) {
std::vector<ChunkType> chunks1 = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 =
rt.makeUpdated(boost::none /* timeseriesFields */, boost::none, boost::none, true, chunks1);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
std::vector<ChunkType> chunks2 = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -1)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << -1), BSON("a" << 0)},
- ChunkVersion{3, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 2}),
kThisShard}};
auto rt2 = rt1.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, chunks2);
- auto v2 = ChunkVersion{3, 2, epoch, timestamp};
+ auto v2 = ChunkVersion({epoch, timestamp}, {3, 2});
ASSERT_EQ(v2, rt2.getVersion(kThisShard));
}
@@ -396,7 +396,7 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -416,16 +416,16 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
@@ -451,7 +451,7 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -471,20 +471,20 @@ TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{1, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {1, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{2, 2, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {2, 2});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -497,11 +497,11 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -521,21 +521,21 @@ TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) {
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 0, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 0}));
ASSERT_EQ(chunk1->getMin().woCompare(BSON("a" << 0)), 0);
ASSERT_EQ(chunk1->getMax().woCompare(getShardKeyPattern().globalMax()), 0);
}
@@ -548,15 +548,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 0), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -572,21 +572,21 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunks) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2}));
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
}
@@ -599,15 +599,15 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << -10), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -500)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard}};
auto rt = RoutingTableHistory::makeNew(kNss,
@@ -623,26 +623,26 @@ TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 3);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(2, 2, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2}));
std::vector<ChunkType> changedChunks = {
ChunkType{uuid,
ChunkRange{BSON("a" << -500), BSON("a" << -10)},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -10)},
- ChunkVersion{3, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 1}),
kThisShard}};
auto rt1 = rt.makeUpdated(
boost::none /* timeseriesFields */, boost::none, boost::none, true, changedChunks);
- auto v1 = ChunkVersion{3, 1, epoch, timestamp};
+ auto v1 = ChunkVersion({epoch, timestamp}, {3, 1});
ASSERT_EQ(v1, rt1.getVersion(kThisShard));
ASSERT_EQ(rt1.numChunks(), 2);
auto chunk1 = rt1.findIntersectingChunk(BSON("a" << -500));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(3, 1, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 1}));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << -10)), 0);
}
@@ -655,27 +655,27 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
std::vector<ChunkType> initialChunks = {
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{2, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), BSON("a" << 20)},
- ChunkVersion{2, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 1}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 20), getShardKeyPattern().globalMax()},
- ChunkVersion{2, 2, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {2, 2}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()},
- ChunkVersion{3, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {3, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)},
- ChunkVersion{4, 0, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {4, 0}),
kThisShard},
ChunkType{uuid,
ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()},
- ChunkVersion{4, 1, epoch, timestamp},
+ ChunkVersion({epoch, timestamp}, {4, 1}),
kThisShard},
};
@@ -692,10 +692,10 @@ TEST_F(RoutingTableHistoryTest, TestFlatten) {
true,
initialChunks);
ASSERT_EQ(rt.numChunks(), 2);
- ASSERT_EQ(rt.getVersion(), ChunkVersion(4, 1, epoch, timestamp));
+ ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {4, 1}));
auto chunk1 = rt.findIntersectingChunk(BSON("a" << 0));
- ASSERT_EQ(chunk1->getLastmod(), ChunkVersion(4, 0, epoch, timestamp));
+ ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {4, 0}));
ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0);
ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << 10)), 0);
}
diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp
index 89a7c0d9d11..0acedd12eae 100644
--- a/src/mongo/s/stale_shard_version_helpers_test.cpp
+++ b/src/mongo/s/stale_shard_version_helpers_test.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/logv2/log.h"
#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/stale_shard_version_helpers.h"
@@ -38,7 +35,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
-
namespace mongo {
namespace {
@@ -98,8 +94,8 @@ TEST_F(AsyncShardVersionRetry, LimitedStaleErrorsShouldReturnCorrectValue) {
service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) {
if (++tries < 5) {
uassert(StaleConfigInfo(nss(),
- ChunkVersion(5, 23, OID::gen(), {}),
- ChunkVersion(6, 99, OID::gen(), {}),
+ ChunkVersion({OID::gen(), Timestamp(1, 0)}, {5, 23}),
+ ChunkVersion({OID::gen(), Timestamp(1, 0)}, {6, 99}),
ShardId("sB")),
"testX",
false);
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index a0ec8867628..aba9c8367c2 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
@@ -94,8 +92,8 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss,
staleResponse.addToErrDetails(
write_ops::WriteError(i,
Status(StaleConfigInfo(nss,
- ChunkVersion(1, 0, epoch, timestamp),
- ChunkVersion(2, 0, epoch, timestamp),
+ ChunkVersion({epoch, timestamp}, {1, 0}),
+ ChunkVersion({epoch, timestamp}, {2, 0}),
ShardId(kShardName1)),
"Stale error")));
++i;
@@ -335,7 +333,7 @@ public:
MockNSTargeter singleShardNSTargeter{
nss,
{MockRange(ShardEndpoint(kShardName1,
- ChunkVersion(100, 200, OID::gen(), Timestamp(1, 1)),
+ ChunkVersion({OID::gen(), Timestamp(1, 1)}, {100, 200}),
boost::none),
BSON("x" << MINKEY),
BSON("x" << MAXKEY))}};
@@ -406,19 +404,19 @@ TEST_F(BatchWriteExecTest, SingleUpdateTargetsShardWithLet) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
- return std::vector{
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ return std::vector{ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -493,18 +491,20 @@ TEST_F(BatchWriteExecTest, SingleDeleteTargetsShardWithLet) {
std::vector<ShardEndpoint> targetDelete(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, Timestamp(1, 1)), boost::none)};
+ kShardName2, ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
- {MockRange(ShardEndpoint(
- kShardName1, ChunkVersion(100, 200, epoch, Timestamp(1, 1)), boost::none),
+ {MockRange(ShardEndpoint(kShardName1,
+ ChunkVersion({epoch, Timestamp(1, 1)}, {100, 200}),
+ boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
- MockRange(ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, Timestamp(1, 1)), boost::none),
+ MockRange(ShardEndpoint(kShardName2,
+ ChunkVersion({epoch, Timestamp(1, 1)}, {101, 200}),
+ boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -685,19 +685,21 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -726,13 +728,13 @@ TEST_F(BatchWriteExecTest, StaleShardVersionReturnedFromBatchWithSingleMultiWrit
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -783,19 +785,21 @@ TEST_F(BatchWriteExecTest,
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -824,20 +828,20 @@ TEST_F(BatchWriteExecTest,
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -887,19 +891,21 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -918,13 +924,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -934,13 +940,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1Firs) {
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1001,19 +1007,21 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1032,13 +1040,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1048,13 +1056,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromMultiWriteWithShard1FirstOK
BatchedCommandResponse response;
response.setStatus(Status::OK());
response.setNModified(0);
- response.addToErrDetails(
- write_ops::WriteError(1,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 1,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
return response.toBSON();
});
@@ -1112,12 +1120,12 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
if (targetAll) {
return std::vector{
ShardEndpoint(
- kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
} else {
return std::vector{ShardEndpoint(
- kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
}
@@ -1127,11 +1135,11 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("sk" << MINKEY),
BSON("sk" << 10)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("sk" << 10),
BSON("sk" << MAXKEY))});
@@ -1151,13 +1159,13 @@ TEST_F(BatchWriteExecTest, RetryableErrorReturnedFromWriteWithShard1SSVShard2OK)
response.setStatus(Status::OK());
response.setNModified(0);
response.setN(0);
- response.addToErrDetails(
- write_ops::WriteError(0,
- Status(StaleConfigInfo(nss,
- ChunkVersion(101, 200, epoch, timestamp),
- ChunkVersion(105, 200, epoch, timestamp),
- ShardId(kShardName2)),
- "Stale error")));
+ response.addToErrDetails(write_ops::WriteError(
+ 0,
+ Status(StaleConfigInfo(nss,
+ ChunkVersion({epoch, timestamp}, {101, 200}),
+ ChunkVersion({epoch, timestamp}, {105, 200}),
+ ShardId(kShardName2)),
+ "Stale error")));
// This simulates a migration of the last chunk on shard 1 to shard 2, which means that
// future rounds on the batchExecutor should only target shard 2
@@ -1874,19 +1882,21 @@ TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) {
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -2010,19 +2020,21 @@ TEST_F(BatchWriteExecTransactionTargeterErrorTest, TargetedFailedAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
@@ -2154,19 +2166,21 @@ TEST_F(BatchWriteExecTransactionMultiShardTest, TargetedSucceededAndErrorRespons
std::vector<ShardEndpoint> targetUpdate(OperationContext* opCtx,
const BatchItemRef& itemRef) const override {
return std::vector{
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none)};
+ ShardEndpoint(
+ kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
+ ShardEndpoint(
+ kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none)};
}
};
MultiShardTargeter multiShardNSTargeter(
nss,
{MockRange(
- ShardEndpoint(kShardName1, ChunkVersion(100, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName1, ChunkVersion({epoch, timestamp}, {100, 200}), boost::none),
BSON("x" << MINKEY),
BSON("x" << 0)),
MockRange(
- ShardEndpoint(kShardName2, ChunkVersion(101, 200, epoch, timestamp), boost::none),
+ ShardEndpoint(kShardName2, ChunkVersion({epoch, timestamp}, {101, 200}), boost::none),
BSON("x" << 0),
BSON("x" << MAXKEY))});
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index baa0c786e2a..0ba795e44a5 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -27,10 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
-#include <memory>
-
#include "mongo/bson/json.h"
#include "mongo/db/ops/write_ops_parsers_test_helpers.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -76,7 +72,7 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns());
ASSERT(insertRequest.hasShardVersion());
- ASSERT_EQ(ChunkVersion(1, 2, epoch, timestamp).toString(),
+ ASSERT_EQ(ChunkVersion({epoch, timestamp}, {1, 2}).toString(),
insertRequest.getShardVersion().toString());
}
}
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index f17637ade04..4d7acf32c22 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -67,45 +67,12 @@ TEST(BatchedCommandResponseTest, Basic) {
ASSERT_BSONOBJ_EQ(origResponseObj, genResponseObj);
}
-// TODO (SERVER-64449): Get rid of this entire test case
-TEST(BatchedCommandResponseTest, StaleErrorAsStaleShardVersionCompatibility) {
+TEST(BatchedCommandResponseTest, StaleConfigInfo) {
OID epoch = OID::gen();
StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ChunkVersion(1, 0, epoch, Timestamp(100, 0)),
- ChunkVersion(2, 0, epoch, Timestamp(100, 0)),
- ShardId("TestShard"));
- BSONObjBuilder builder;
- staleInfo.serialize(&builder);
-
- BSONArray writeErrorsArray(
- BSON_ARRAY(BSON("index" << 0 << "code" << ErrorCodes::OBSOLETE_StaleShardVersion << "errmsg"
- << "OBSOLETE_StaleShardVersion error"
- << "errInfo" << builder.obj())
- << BSON("index" << 1 << "code" << ErrorCodes::InvalidNamespace << "errmsg"
- << "index 1 failed too")));
-
- BSONObj origResponseObj =
- BSON("n" << 0 << "opTime" << mongo::Timestamp(1ULL) << "writeErrors" << writeErrorsArray
- << "retriedStmtIds" << BSON_ARRAY(1 << 3) << "ok" << 1.0);
-
- std::string errMsg;
- BatchedCommandResponse response;
- ASSERT_TRUE(response.parseBSON(origResponseObj, &errMsg));
- ASSERT_EQ(0, response.getErrDetailsAt(0).getIndex());
- ASSERT_EQ(ErrorCodes::StaleConfig, response.getErrDetailsAt(0).getStatus().code());
- auto extraInfo = response.getErrDetailsAt(0).getStatus().extraInfo<StaleConfigInfo>();
- ASSERT_EQ(staleInfo.getVersionReceived(), extraInfo->getVersionReceived());
- ASSERT_EQ(*staleInfo.getVersionWanted(), *extraInfo->getVersionWanted());
- ASSERT_EQ(staleInfo.getShardId(), extraInfo->getShardId());
-}
-
-TEST(BatchedCommandResponseTest, StaleErrorAsStaleConfigCompatibility) {
- OID epoch = OID::gen();
-
- StaleConfigInfo staleInfo(NamespaceString("TestDB.TestColl"),
- ChunkVersion(1, 0, epoch, Timestamp(100, 0)),
- ChunkVersion(2, 0, epoch, Timestamp(100, 0)),
+ ChunkVersion({epoch, Timestamp(100, 0)}, {1, 0}),
+ ChunkVersion({epoch, Timestamp(100, 0)}, {2, 0}),
ShardId("TestShard"));
BSONObjBuilder builder(BSON("index" << 0 << "code" << ErrorCodes::StaleConfig << "errmsg"
<< "StaleConfig error"));
@@ -189,7 +156,7 @@ TEST(BatchedCommandResponseTest, TooManyBigErrors) {
}
TEST(BatchedCommandResponseTest, CompatibilityFromWriteErrorToBatchCommandResponse) {
- ChunkVersion versionReceived(1, 0, OID::gen(), Timestamp(2, 0));
+ ChunkVersion versionReceived({OID::gen(), Timestamp(2, 0)}, {1, 0});
write_ops::UpdateCommandReply reply;
reply.getWriteCommandReplyBase().setN(1);
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 884ffc906c3..2d179b6593f 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -119,11 +119,11 @@ TEST_F(WriteOpTest, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST_F(WriteOpTest, TargetMultiOneShard) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -154,11 +154,11 @@ TEST_F(WriteOpTest, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST_F(WriteOpTest, TargetMultiAllShards) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -196,9 +196,9 @@ TEST_F(WriteOpTest, TargetMultiAllShards) {
TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -228,8 +228,8 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
write_ops::WriteError retryableError(
0,
{StaleConfigInfo(kNss,
- ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
- ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}),
ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
@@ -346,11 +346,11 @@ private:
TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
ShardEndpoint endpointC(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -386,9 +386,9 @@ TEST_F(WriteOpTransactionTest, TargetMultiDoesNotTargetAllShards) {
TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
ShardEndpoint endpointA(
- ShardId("shardA"), ChunkVersion(10, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardA"), ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}), boost::none);
ShardEndpoint endpointB(
- ShardId("shardB"), ChunkVersion(20, 0, OID(), Timestamp(1, 1)), boost::none);
+ ShardId("shardB"), ChunkVersion({OID(), Timestamp(1, 1)}, {20, 0}), boost::none);
BatchedCommandRequest request([&] {
write_ops::DeleteCommandRequest deleteOp(kNss);
@@ -422,8 +422,8 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
write_ops::WriteError retryableError(
0,
{StaleConfigInfo(kNss,
- ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
- ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {10, 0}),
+ ChunkVersion({OID(), Timestamp(1, 1)}, {11, 0}),
ShardId("shardA")),
"simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);