summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/commands/test_commands.h1
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp12
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp18
-rw-r--r--src/mongo/db/s/balancer/balance_stats_test.cpp5
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp6
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.cpp25
-rw-r--r--src/mongo/db/s/balancer/balancer_policy.h5
-rw-r--r--src/mongo/db/s/balancer/balancer_policy_test.cpp3
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp13
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp43
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.cpp31
-rw-r--r--src/mongo/db/s/balancer/migration_test_fixture.h9
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp17
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp16
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp14
-rw-r--r--src/mongo/db/s/balancer/type_migration.h4
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp5
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp2
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp11
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp15
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp29
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h4
-rw-r--r--src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp1
-rw-r--r--src/mongo/db/s/config/configsvr_move_chunk_command.cpp8
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp1
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h5
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp32
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp186
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp28
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp21
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp97
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp24
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp48
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp14
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp35
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp20
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp2
-rw-r--r--src/mongo/db/s/migration_util_test.cpp9
-rw-r--r--src/mongo/db/s/op_observer_sharding_test.cpp5
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp24
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp19
-rw-r--r--src/mongo/db/s/resharding/resharding_data_replication_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp15
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp8
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp4
56 files changed, 438 insertions, 509 deletions
diff --git a/src/mongo/db/commands/test_commands.h b/src/mongo/db/commands/test_commands.h
index c3b9759d3d6..4903cc4a8cc 100644
--- a/src/mongo/db/commands/test_commands.h
+++ b/src/mongo/db/commands/test_commands.h
@@ -45,7 +45,6 @@ namespace mongo {
class TestingDurableHistoryPin : public DurableHistoryPin {
public:
std::string getName() override;
-
boost::optional<Timestamp> calculatePin(OperationContext* opCtx) override;
};
} // namespace mongo
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index 1a79b972817..932645f78d1 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -223,17 +223,13 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(kTestAggregateNss,
- {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
- version,
- {"0"});
+ ChunkType chunk1(
+ uuid, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
chunk1.setName(OID::gen());
version.incMinor();
- ChunkType chunk2(kTestAggregateNss,
- {BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()},
- version,
- {"1"});
+ ChunkType chunk2(
+ uuid, {BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()}, version, {"1"});
chunk2.setName(OID::gen());
version.incMinor();
expectCollectionAndChunksAggregation(
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index 7f1ecf1b870..6c4756abd0c 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -133,7 +133,7 @@ TEST_F(ShardedUnionTest, ForwardsMaxTimeMSToRemotes) {
TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
// Sharded by {_id: 1}, [MinKey, 0) on shard "0", [0, MaxKey) on shard "1".
setupNShards(2);
- loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
+ const auto cm = loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
auto pipeline = Pipeline::create(
{DocumentSourceMatch::create(fromjson("{_id: 'unionResult'}"), expCtx())}, expCtx());
@@ -168,14 +168,14 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(kTestAggregateNss,
+ ChunkType chunk1(*cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
version,
{"0"});
chunk1.setName(OID::gen());
version.incMinor();
- ChunkType chunk2(kTestAggregateNss,
+ ChunkType chunk2(*cm.getUUID(),
{BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()},
version,
{"1"});
@@ -197,7 +197,7 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequiresIt) {
// Sharded by {_id: 1}, [MinKey, 0) on shard "0", [0, MaxKey) on shard "1".
auto shards = setupNShards(2);
- loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
+ const auto cm = loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
auto&& parser = AccumulationStatement::getParser("$sum", boost::none);
auto accumulatorArg = BSON("" << 1);
@@ -244,7 +244,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(kTestAggregateNss,
+ ChunkType chunk1(*cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)},
version,
{shards[0].getName()});
@@ -252,11 +252,11 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
version.incMinor();
ChunkType chunk2(
- kTestAggregateNss, {BSON("_id" << 0), BSON("_id" << 10)}, version, {shards[1].getName()});
+ *cm.getUUID(), {BSON("_id" << 0), BSON("_id" << 10)}, version, {shards[1].getName()});
chunk2.setName(OID::gen());
version.incMinor();
- ChunkType chunk3(kTestAggregateNss,
+ ChunkType chunk3(*cm.getUUID(),
{BSON("_id" << 10), shardKeyPattern.getKeyPattern().globalMax()},
version,
{shards[0].getName()});
@@ -283,7 +283,7 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNotRequire) {
// Sharded by {_id: 1}, [MinKey, 0) on shard "0", [0, MaxKey) on shard "1".
auto shards = setupNShards(2);
- loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
+ const auto cm = loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss);
auto&& parser = AccumulationStatement::getParser("$sum", boost::none);
auto accumulatorArg = BSON("" << 1);
@@ -330,7 +330,7 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
ChunkType chunk1(
- kTestAggregateNss,
+ *cm.getUUID(),
{shardKeyPattern.getKeyPattern().globalMin(), shardKeyPattern.getKeyPattern().globalMax()},
version,
{shards[0].getName()});
diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp
index 21d1f7c13f8..ed1a430126f 100644
--- a/src/mongo/db/s/balancer/balance_stats_test.cpp
+++ b/src/mongo/db/s/balancer/balance_stats_test.cpp
@@ -43,7 +43,7 @@ class BalanceStatsTest : public mongo::unittest::Test {
public:
ChunkType makeChunk(const BSONObj& minKey, const BSONObj& maxKey, const ShardId& shard) {
_nextVersion.incMinor();
- return ChunkType(_nss, ChunkRange(minKey, maxKey), _nextVersion, shard);
+ return ChunkType(_uuid, ChunkRange(minKey, maxKey), _nextVersion, shard);
}
ShardType makeShard(const std::string& name, std::vector<std::string> tags = {}) {
@@ -53,7 +53,7 @@ public:
ChunkManager makeRoutingInfo(const KeyPattern& shardKeyPattern,
const std::vector<ChunkType>& chunks) {
auto routingTableHistory = RoutingTableHistory::makeNew(_nss,
- boost::none, // UUID
+ _uuid, // UUID
shardKeyPattern,
{}, // collator
false, // unique
@@ -73,6 +73,7 @@ public:
private:
const NamespaceString _nss{"foo.bar"};
+ const UUID _uuid = UUID::gen();
const OID _epoch{OID::gen()};
const ShardId _shardPrimary{"dummyShardPrimary"};
const DatabaseVersion _dbVersion{UUID::gen(), Timestamp()};
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index e048d6205c1..711fcea4397 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -660,6 +660,7 @@ Status Balancer::_splitChunksIfNeeded(OperationContext* opCtx) {
int Balancer::_moveChunks(OperationContext* opCtx,
const BalancerChunkSelectionPolicy::MigrateInfoVector& candidateChunks) {
auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
+ auto catalogClient = Grid::get(opCtx)->catalogClient();
// If the balancer was disabled since we started this round, don't start new chunk moves
if (_stopOrPauseRequested() || !balancerConfig->shouldBalance()) {
@@ -707,8 +708,11 @@ int Balancer::_moveChunks(OperationContext* opCtx,
"migrateInfo"_attr = redact(requestIt->toString()),
"error"_attr = redact(status));
+ const CollectionType collection = catalogClient->getCollection(
+ opCtx, requestIt->uuid, repl::ReadConcernLevel::kLocalReadConcern);
+
ShardingCatalogManager::get(opCtx)->splitOrMarkJumbo(
- opCtx, requestIt->nss, requestIt->minKey);
+ opCtx, collection.getNss(), requestIt->minKey);
continue;
}
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 62d044a30ef..1c8d241778b 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -76,7 +76,7 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
chunkMgr.forEachChunk([&](const auto& chunkEntry) {
ChunkType chunk;
- chunk.setNS(nss);
+ chunk.setCollectionUUID(*chunkMgr.getUUID());
chunk.setMin(chunkEntry.getMin());
chunk.setMax(chunkEntry.getMax());
chunk.setJumbo(chunkEntry.isJumbo());
@@ -442,7 +442,10 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* op
const auto& shardStats = shardStatsStatus.getValue();
- const auto& nss = chunk.getNS();
+ const CollectionType collection = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, chunk.getCollectionUUID(), repl::ReadConcernLevel::kLocalReadConcern);
+
+ const auto& nss = collection.getNss();
auto routingInfoStatus =
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss);
@@ -470,7 +473,10 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCt
return shardStatsStatus.getStatus();
}
- const auto& nss = chunk.getNS();
+ const CollectionType collection = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, chunk.getCollectionUUID(), repl::ReadConcernLevel::kLocalReadConcern);
+ const auto& nss = collection.getNss();
+
auto shardStats = std::move(shardStatsStatus.getValue());
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 9866790f137..1b2e9f33dcc 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -222,7 +222,7 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
expectGetStatsCommands(2);
future.default_timed_get();
- removeAllChunks(kNamespace);
+ removeAllChunks(kNamespace, collUUID);
};
assertErrorWhenMoveChunk({{kKeyPattern.globalMin(), BSON(kPattern << -5)},
diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp
index a8db6188d18..73f503233b2 100644
--- a/src/mongo/db/s/balancer/balancer_policy.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy.cpp
@@ -39,6 +39,7 @@
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/grid.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/str.h"
@@ -667,13 +668,13 @@ string ZoneRange::toString() const {
MigrateInfo::MigrateInfo(const ShardId& a_to,
const ChunkType& a_chunk,
const MoveChunkRequest::ForceJumbo a_forceJumbo,
- MigrationReason a_reason) {
+ MigrationReason a_reason)
+ : uuid(a_chunk.getCollectionUUID()) {
invariant(a_chunk.validate());
invariant(a_to.isValid());
to = a_to;
- nss = a_chunk.getNS();
from = a_chunk.getShard();
minKey = a_chunk.getMin();
maxKey = a_chunk.getMax();
@@ -686,7 +687,7 @@ std::string MigrateInfo::getName() const {
// Generates a unique name for a MigrateInfo based on the namespace and the lower bound of the
// chunk being moved.
StringBuilder buf;
- buf << nss.ns() << "-";
+ buf << uuid << "-";
BSONObjIterator i(minKey);
while (i.more()) {
@@ -697,14 +698,28 @@ std::string MigrateInfo::getName() const {
return buf.str();
}
-BSONObj MigrateInfo::getMigrationTypeQuery() const {
+StatusWith<NamespaceString> MigrateInfo::getNss(OperationContext* opCtx) const {
+ auto grid = Grid::get(opCtx);
+ invariant(grid != nullptr);
+ auto catalogClient = grid->catalogClient();
+ invariant(catalogClient != nullptr);
+ try {
+ const CollectionType collection =
+ catalogClient->getCollection(opCtx, uuid, repl::ReadConcernLevel::kLocalReadConcern);
+ return collection.getNss();
+ } catch (DBException const& e) {
+ return StatusWith<NamespaceString>(e.code(), e.reason());
+ }
+}
+
+BSONObj MigrateInfo::getMigrationTypeQuery(NamespaceString const& nss) const {
// Generates a query object for a single MigrationType based on the namespace and the lower
// bound of the chunk being moved.
return BSON(MigrationType::ns(nss.ns()) << MigrationType::min(minKey));
}
string MigrateInfo::toString() const {
- return str::stream() << nss.ns() << ": [" << minKey << ", " << maxKey << "), from " << from
+ return str::stream() << uuid << ": [" << minKey << ", " << maxKey << "), from " << from
<< ", to " << to;
}
diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h
index 6196127e40e..b4c14ef223f 100644
--- a/src/mongo/db/s/balancer/balancer_policy.h
+++ b/src/mongo/db/s/balancer/balancer_policy.h
@@ -62,12 +62,13 @@ struct MigrateInfo {
MigrationReason a_reason);
std::string getName() const;
+ StatusWith<NamespaceString> getNss(OperationContext* opCtx) const;
- BSONObj getMigrationTypeQuery() const;
+ BSONObj getMigrationTypeQuery(NamespaceString const& nss) const;
std::string toString() const;
- NamespaceString nss;
+ UUID uuid;
ShardId to;
ShardId from;
BSONObj minKey;
diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp
index f5b45711037..43b340f2906 100644
--- a/src/mongo/db/s/balancer/balancer_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp
@@ -78,6 +78,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
int64_t currentChunk = 0;
ChunkVersion chunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
+ const UUID uuid = UUID::gen();
const KeyPattern shardKeyPattern(BSON("x" << 1));
@@ -91,7 +92,7 @@ std::pair<ShardStatisticsVector, ShardToChunksMap> generateCluster(
for (size_t i = 0; i < numChunks; i++, currentChunk++) {
ChunkType chunk;
- chunk.setNS(kNamespace);
+ chunk.setCollectionUUID(uuid);
chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin()
: BSON("x" << currentChunk));
chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax()
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index eaa55ecf332..b2c34f7f555 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -168,6 +168,10 @@ Status MigrationManager::executeManualMigration(
_waitForRecovery();
ScopedMigrationRequestsMap scopedMigrationRequests;
+ const auto nssStatus = migrateInfo.getNss(opCtx);
+ if (!nssStatus.isOK()) {
+ return nssStatus.getStatus();
+ }
RemoteCommandResponse remoteCommandResponse = _schedule(opCtx,
migrateInfo,
@@ -178,7 +182,7 @@ Status MigrationManager::executeManualMigration(
->get();
auto swCM = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
- opCtx, migrateInfo.nss);
+ opCtx, nssStatus.getValue());
if (!swCM.isOK()) {
return swCM.getStatus();
}
@@ -344,7 +348,7 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
while (!migrateInfos.empty()) {
auto migrationType = std::move(migrateInfos.front());
- const auto migrationInfo = migrationType.toMigrateInfo();
+ const auto migrationInfo = migrationType.toMigrateInfo(opCtx);
auto waitForDelete = migrationType.getWaitForDelete();
migrateInfos.pop_front();
@@ -439,7 +443,10 @@ std::shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete,
ScopedMigrationRequestsMap* scopedMigrationRequests) {
- const NamespaceString& nss = migrateInfo.nss;
+
+ const CollectionType collection = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, migrateInfo.uuid, repl::ReadConcernLevel::kLocalReadConcern);
+ const NamespaceString& nss = collection.getNss();
// Ensure we are not stopped in order to avoid doing the extra work
{
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 8435bc562aa..94f232b6df0 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -71,18 +71,18 @@ protected:
* Sets up mock network to expect a moveChunk command and returns a fixed BSON response or a
* "returnStatus".
*/
- void expectMoveChunkCommand(const ChunkType& chunk,
+ void expectMoveChunkCommand(const NamespaceString& nss,
+ const ChunkType& chunk,
const ShardId& toShardId,
const BSONObj& response) {
- onCommand([&chunk, &toShardId, &response](const RemoteCommandRequest& request) {
+ onCommand([&nss, &chunk, &toShardId, &response](const RemoteCommandRequest& request) {
NamespaceString nss(request.cmdObj.firstElement().valueStringData());
- ASSERT_EQ(chunk.getNS(), nss);
const StatusWith<MoveChunkRequest> moveChunkRequestWithStatus =
MoveChunkRequest::createFromCommand(nss, request.cmdObj);
ASSERT_OK(moveChunkRequestWithStatus.getStatus());
- ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss());
+ ASSERT_EQ(nss, moveChunkRequestWithStatus.getValue().getNss());
ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
@@ -93,12 +93,13 @@ protected:
});
}
- void expectMoveChunkCommand(const ChunkType& chunk,
+ void expectMoveChunkCommand(const NamespaceString& nss,
+ const ChunkType& chunk,
const ShardId& toShardId,
const Status& returnStatus) {
BSONObjBuilder resultBuilder;
CommandHelpers::appendCommandStatusNoThrow(resultBuilder, returnStatus);
- expectMoveChunkCommand(chunk, toShardId, resultBuilder.obj());
+ expectMoveChunkCommand(nss, chunk, toShardId, resultBuilder.obj());
}
std::unique_ptr<MigrationManager> _migrationManager;
@@ -156,8 +157,8 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
});
// Expect two moveChunk commands.
- expectMoveChunkCommand(chunk1, kShardId1, Status::OK());
- expectMoveChunkCommand(chunk2, kShardId3, Status::OK());
+ expectMoveChunkCommand(collName, chunk1, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName, chunk2, kShardId3, Status::OK());
// Run the MigrationManager code.
future.default_timed_get();
@@ -232,10 +233,10 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
});
// Expect four moveChunk commands.
- expectMoveChunkCommand(chunk1coll1, kShardId1, Status::OK());
- expectMoveChunkCommand(chunk2coll1, kShardId3, Status::OK());
- expectMoveChunkCommand(chunk1coll2, kShardId1, Status::OK());
- expectMoveChunkCommand(chunk2coll2, kShardId3, Status::OK());
+ expectMoveChunkCommand(collName1, chunk1coll1, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName1, chunk2coll1, kShardId3, Status::OK());
+ expectMoveChunkCommand(collName2, chunk1coll2, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName2, chunk2coll2, kShardId3, Status::OK());
// Run the MigrationManager code.
future.default_timed_get();
@@ -296,7 +297,7 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
});
// Expect only one moveChunk command to be called.
- expectMoveChunkCommand(chunk1, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName, chunk1, kShardId1, Status::OK());
// Run the MigrationManager code.
future.default_timed_get();
@@ -343,7 +344,7 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
});
// Expect only one moveChunk command to be called.
- expectMoveChunkCommand(chunk1, kShardId1, BSON("ok" << 0 << "chunkTooBig" << true));
+ expectMoveChunkCommand(collName, chunk1, kShardId1, BSON("ok" << 0 << "chunkTooBig" << true));
// Run the MigrationManager code.
future.default_timed_get();
@@ -490,7 +491,7 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
});
// Expect only one moveChunk command to be called.
- expectMoveChunkCommand(chunk1, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName, chunk1, kShardId1, Status::OK());
// Run the MigrationManager code.
future.default_timed_get();
@@ -523,8 +524,8 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
_migrationManager->interruptAndDisableMigrations();
_migrationManager->drainActiveMigrations();
- setUpMigration(chunk1, kShardId1.toString());
- setUpMigration(chunk2, kShardId3.toString());
+ setUpMigration(collName, chunk1, kShardId1.toString());
+ setUpMigration(collName, chunk2, kShardId3.toString());
// Mimic all config distlocks being released on config server stepup to primary.
DistLockManager::get(operationContext())->unlockAll(operationContext());
@@ -544,8 +545,8 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
});
// Expect two moveChunk commands.
- expectMoveChunkCommand(chunk1, kShardId1, Status::OK());
- expectMoveChunkCommand(chunk2, kShardId3, Status::OK());
+ expectMoveChunkCommand(collName, chunk1, kShardId1, Status::OK());
+ expectMoveChunkCommand(collName, chunk2, kShardId3, Status::OK());
// Run the MigrationManager code.
future.default_timed_get();
@@ -577,7 +578,7 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
_migrationManager->interruptAndDisableMigrations();
_migrationManager->drainActiveMigrations();
- setUpMigration(chunk1, kShardId1.toString());
+ setUpMigration(collName, chunk1, kShardId1.toString());
// Set up a fake active migration document that will fail MigrationType parsing -- missing
// field.
@@ -663,6 +664,7 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
// Expect a moveChunk command that will fail with a retriable error.
expectMoveChunkCommand(
+ collName,
chunk1,
kShardId1,
Status(ErrorCodes::NotPrimaryOrSecondary,
@@ -670,6 +672,7 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
// Expect a moveChunk command that will fail with a replset monitor updating error.
expectMoveChunkCommand(
+ collName,
chunk2,
kShardId3,
Status(ErrorCodes::NetworkInterfaceExceededTimeLimit,
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp
index 7744c36d0a7..6a913f0c45d 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.cpp
+++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp
@@ -73,11 +73,7 @@ ChunkType MigrationTestFixture::setUpChunk(const NamespaceString& collName,
const ShardId& shardId,
const ChunkVersion& version) {
ChunkType chunk;
- // The ns is not present in 5.0 chunks but some testing utils rely on it
- chunk.setNS(collName);
-
- if (version.getTimestamp())
- chunk.setCollectionUUID(collUUID);
+ chunk.setCollectionUUID(collUUID);
chunk.setMin(chunkMin);
chunk.setMax(chunkMax);
@@ -105,26 +101,27 @@ void MigrationTestFixture::setUpTags(const NamespaceString& collName,
}
}
-void MigrationTestFixture::removeAllDocs(const NamespaceString& configNS,
- const NamespaceString& collName) {
+void MigrationTestFixture::removeAllTags(const NamespaceString& collName) {
const auto query = BSON("ns" << collName.ns());
ASSERT_OK(catalogClient()->removeConfigDocuments(
- operationContext(), configNS, query, kMajorityWriteConcern));
- auto findStatus = findOneOnConfigCollection(operationContext(), configNS, query);
+ operationContext(), TagsType::ConfigNS, query, kMajorityWriteConcern));
+ auto findStatus = findOneOnConfigCollection(operationContext(), collName, query);
ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus);
}
-void MigrationTestFixture::removeAllTags(const NamespaceString& collName) {
- removeAllDocs(TagsType::ConfigNS, collName);
-}
-
-void MigrationTestFixture::removeAllChunks(const NamespaceString& collName) {
- removeAllDocs(ChunkType::ConfigNS, collName);
+void MigrationTestFixture::removeAllChunks(const NamespaceString& collName, const UUID& uuid) {
+ const auto query = BSON("uuid" << uuid);
+ ASSERT_OK(catalogClient()->removeConfigDocuments(
+ operationContext(), ChunkType::ConfigNS, query, kMajorityWriteConcern));
+ auto findStatus = findOneOnConfigCollection(operationContext(), collName, query);
+ ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus);
}
-void MigrationTestFixture::setUpMigration(const ChunkType& chunk, const ShardId& toShard) {
+void MigrationTestFixture::setUpMigration(const NamespaceString& ns,
+ const ChunkType& chunk,
+ const ShardId& toShard) {
BSONObjBuilder builder;
- builder.append(MigrationType::ns(), chunk.getNS().ns());
+ builder.append(MigrationType::ns(), ns.ns());
builder.append(MigrationType::min(), chunk.getMin());
builder.append(MigrationType::max(), chunk.getMax());
builder.append(MigrationType::toShard(), toShard.toString());
diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h
index f74b55be93d..5583728faeb 100644
--- a/src/mongo/db/s/balancer/migration_test_fixture.h
+++ b/src/mongo/db/s/balancer/migration_test_fixture.h
@@ -97,11 +97,6 @@ protected:
void setUpTags(const NamespaceString& collName, const StringMap<ChunkRange>& tagChunkRanges);
/**
- * Removes all document in the given config collection for the collection.
- */
- void removeAllDocs(const NamespaceString& configNS, const NamespaceString& collName);
-
- /**
* Removes all document in the config.tags for the collection.
*/
void removeAllTags(const NamespaceString& collName);
@@ -109,12 +104,12 @@ protected:
/**
* Removes all document in the config.chunks for the collection.
*/
- void removeAllChunks(const NamespaceString& collName);
+ void removeAllChunks(const NamespaceString& collName, const UUID& uuid);
/**
* Inserts a document into the config.migrations collection as an active migration.
*/
- void setUpMigration(const ChunkType& chunk, const ShardId& toShard);
+ void setUpMigration(const NamespaceString& ns, const ChunkType& chunk, const ShardId& toShard);
/**
* Asserts that config.migrations is empty and config.locks contains no locked documents other
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index 9a7cdff8fd5..5cc3fed4581 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -98,8 +98,14 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
OperationContext* opCtx, const MigrateInfo& migrateInfo, bool waitForDelete) {
auto const grid = Grid::get(opCtx);
+ const auto nssStatus = migrateInfo.getNss(opCtx);
+ if (!nssStatus.isOK()) {
+ return nssStatus.getStatus();
+ }
+ const auto nss = nssStatus.getValue();
+
// Try to write a unique migration document to config.migrations.
- const MigrationType migrationType(migrateInfo, waitForDelete);
+ const MigrationType migrationType(nss, migrateInfo, waitForDelete);
for (int retry = 0; retry < kDuplicateKeyErrorMaxRetries; ++retry) {
Status result = grid->catalogClient()->insertConfigDocument(
@@ -115,7 +121,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
MigrationType::ConfigNS,
- migrateInfo.getMigrationTypeQuery(),
+ migrateInfo.getMigrationTypeQuery(nss),
BSONObj(),
boost::none);
if (!statusWithMigrationQueryResult.isOK()) {
@@ -141,7 +147,8 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
<< redact(activeMigrationBSON.toString()) << "'.");
}
- MigrateInfo activeMigrateInfo = statusWithActiveMigration.getValue().toMigrateInfo();
+ MigrateInfo activeMigrateInfo =
+ statusWithActiveMigration.getValue().toMigrateInfo(opCtx);
if (activeMigrateInfo.to != migrateInfo.to ||
activeMigrateInfo.from != migrateInfo.from) {
LOGV2(
@@ -163,7 +170,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// As long as there isn't a DuplicateKey error, the document may have been written, and it's
// safe (won't delete another migration's document) and necessary to try to clean up the
// document via the destructor.
- ScopedMigrationRequest scopedMigrationRequest(opCtx, migrateInfo.nss, migrateInfo.minKey);
+ ScopedMigrationRequest scopedMigrationRequest(opCtx, nss, migrateInfo.minKey);
// If there was a write error, let the object go out of scope and clean up in the
// destructor.
@@ -178,7 +185,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
str::stream() << "Failed to insert the config.migrations document after max "
<< "number of retries. Chunk '"
<< ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
- << "' in collection '" << migrateInfo.nss.ns()
+ << "' in collection '" << nss
<< "' was being moved (somewhere) by another operation.");
}
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 3731321ad2d..db4ddcf77f5 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -63,22 +63,26 @@ public:
*/
ScopedMigrationRequest makeScopedMigrationRequest(const MigrateInfo& migrateInfo);
+ MigrateInfo makeMigrateInfo();
+
private:
void setUp() override;
};
void ScopedMigrationRequestTest::setUp() {
setUpAndInitializeConfigDb();
+ setupShards({ShardType{"shard", "shard:12"}});
}
void ScopedMigrationRequestTest::checkMigrationsCollectionForDocument(
const MigrateInfo& migrateInfo, unsigned long expectedNumberOfDocuments) {
+ auto const query = BSON(MigrationType::ns(kNs) << MigrationType::min(migrateInfo.minKey));
auto response = shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
MigrationType::ConfigNS,
- migrateInfo.getMigrationTypeQuery(),
+ query,
BSONObj(),
boost::none);
Shard::QueryResponse queryResponse = unittest::assertGet(response);
@@ -96,11 +100,12 @@ ScopedMigrationRequest ScopedMigrationRequestTest::makeScopedMigrationRequest(
return scopedMigrationRequest;
}
-MigrateInfo makeMigrateInfo() {
+MigrateInfo ScopedMigrationRequestTest::makeMigrateInfo() {
+ const auto collUuid = UUID::gen();
const ChunkVersion kChunkVersion{1, 2, OID::gen(), boost::none /* timestamp */};
BSONObjBuilder chunkBuilder;
- chunkBuilder.append(ChunkType::ns(), kNs);
+ collUuid.appendToBuilder(&chunkBuilder, ChunkType::collectionUUID.name());
chunkBuilder.append(ChunkType::min(), kMin);
chunkBuilder.append(ChunkType::max(), kMax);
kChunkVersion.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod());
@@ -109,6 +114,9 @@ MigrateInfo makeMigrateInfo() {
ChunkType chunkType = assertGet(ChunkType::parseFromConfigBSONCommand(chunkBuilder.obj()));
ASSERT_OK(chunkType.validate());
+ // Initialize the sharded TO collection
+ setupCollection(NamespaceString(kNs), KeyPattern(BSON("_id" << 1)), {chunkType});
+
return MigrateInfo(kToShard,
chunkType,
MoveChunkRequest::ForceJumbo::kDoNotForce,
@@ -169,7 +177,7 @@ TEST_F(ScopedMigrationRequestTest, CreateScopedMigrationRequestOnRecovery) {
// still removes the document corresponding to the MigrationRequest.
{
ScopedMigrationRequest scopedMigrationRequest = ScopedMigrationRequest::createForRecovery(
- operationContext(), migrateInfo.nss, migrateInfo.minKey);
+ operationContext(), NamespaceString(kNs), migrateInfo.minKey);
checkMigrationsCollectionForDocument(migrateInfo, 1);
}
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index 16085881e8b..1367926056c 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -34,6 +34,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/grid.h"
namespace mongo {
namespace {
@@ -54,8 +55,10 @@ const BSONField<std::string> MigrationType::forceJumbo("forceJumbo");
MigrationType::MigrationType() = default;
-MigrationType::MigrationType(MigrateInfo info, bool waitForDelete)
- : _nss(info.nss),
+MigrationType::MigrationType(const NamespaceString& nss,
+ const MigrateInfo& info,
+ bool waitForDelete)
+ : _nss(nss),
_min(info.minKey),
_max(info.maxKey),
_fromShard(info.from),
@@ -153,10 +156,13 @@ BSONObj MigrationType::toBSON() const {
return builder.obj();
}
-MigrateInfo MigrationType::toMigrateInfo() const {
+MigrateInfo MigrationType::toMigrateInfo(OperationContext* opCtx) const {
+ const CollectionType collection = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, _nss, repl::ReadConcernLevel::kLocalReadConcern);
+
ChunkType chunk;
- chunk.setNS(_nss);
chunk.setShard(_fromShard);
+ chunk.setCollectionUUID(collection.getUuid());
chunk.setMin(_min);
chunk.setMax(_max);
chunk.setVersion(_chunkVersion);
diff --git a/src/mongo/db/s/balancer/type_migration.h b/src/mongo/db/s/balancer/type_migration.h
index 9f383244f8d..24ada030c3a 100644
--- a/src/mongo/db/s/balancer/type_migration.h
+++ b/src/mongo/db/s/balancer/type_migration.h
@@ -60,7 +60,7 @@ public:
* The Balancer encapsulates migration information in MigrateInfo objects, so this facilitates
* conversion to a config.migrations entry format.
*/
- explicit MigrationType(MigrateInfo info, bool waitForDelete);
+ MigrationType(const NamespaceString& nss, const MigrateInfo& info, bool waitForDelete);
/**
* Constructs a new MigrationType object from BSON. Expects all fields to be present, and errors
@@ -76,7 +76,7 @@ public:
/**
* Helper function for the Balancer that uses MigrateInfo objects to schedule migrations.
*/
- MigrateInfo toMigrateInfo() const;
+ MigrateInfo toMigrateInfo(OperationContext* opCtx) const;
const NamespaceString& getNss() const {
return _nss;
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index ec1d52a8a38..b8af0eaa339 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -48,13 +48,14 @@ const ShardId kToShard("shard0001");
const bool kWaitForDelete{true};
TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
+ const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
const auto collTimestamp = boost::none;
const ChunkVersion version(1, 2, collEpoch, collTimestamp);
BSONObjBuilder chunkBuilder;
chunkBuilder.append(ChunkType::name(), OID::gen());
- chunkBuilder.append(ChunkType::ns(), kNs);
+ collUuid.appendToBuilder(&chunkBuilder, ChunkType::collectionUUID.name());
chunkBuilder.append(ChunkType::min(), kMin);
chunkBuilder.append(ChunkType::max(), kMax);
version.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod());
@@ -68,7 +69,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
chunkType,
MoveChunkRequest::ForceJumbo::kDoNotForce,
MigrateInfo::chunksImbalance);
- MigrationType migrationType(migrateInfo, kWaitForDelete);
+ MigrationType migrationType(NamespaceString(kNs), migrateInfo, kWaitForDelete);
BSONObjBuilder builder;
builder.append(MigrationType::ns(), kNs);
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 01b84977aa3..3f653a714ec 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -121,7 +121,7 @@ void moveChunk(OperationContext* opCtx, const NamespaceString& nss, const BSONOb
const auto suggestedChunk = cm.findIntersectingChunkWithSimpleCollation(minKey);
ChunkType chunkToMove;
- chunkToMove.setNS(nss);
+ chunkToMove.setCollectionUUID(*cm.getUUID());
chunkToMove.setShard(suggestedChunk.getShardId());
chunkToMove.setMin(suggestedChunk.getMin());
chunkToMove.setMax(suggestedChunk.getMax());
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 3bd22a6cded..231808a7b5c 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -62,12 +62,13 @@ protected:
*/
void prepareTestData(
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields = boost::none) {
+ const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
auto rt = RoutingTableHistory::makeNew(
kNss,
- UUID::gen(),
+ uuid,
shardKeyPattern.getKeyPattern(),
nullptr,
false,
@@ -80,7 +81,7 @@ protected:
[&] {
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(kNss,
+ ChunkType chunk1(uuid,
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
version,
{"0"});
@@ -88,17 +89,17 @@ protected:
ChunkHistory(Timestamp(25, 0), ShardId("1"))});
version.incMinor();
- ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ ChunkType chunk2(uuid, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
chunk2.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")),
ChunkHistory(Timestamp(25, 0), ShardId("0"))});
version.incMinor();
- ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ ChunkType chunk3(uuid, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
chunk3.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")),
ChunkHistory(Timestamp(25, 0), ShardId("1"))});
version.incMinor();
- ChunkType chunk4(kNss,
+ ChunkType chunk4(uuid,
{BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
version,
{"1"});
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 1b7c31a3e8d..7d01b0fa39f 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -67,12 +67,12 @@ CollectionMetadata makeCollectionMetadataImpl(
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < myNextChunk.first)) {
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
allChunks.emplace_back(
- kNss, ChunkRange{nextMinKey, myNextChunk.first}, version, kOtherShard);
+ uuid, ChunkRange{nextMinKey, myNextChunk.first}, version, kOtherShard);
allChunks.back().setHistory({ChunkHistory(kRouting, kOtherShard)});
version.incMajor();
}
allChunks.emplace_back(
- kNss, ChunkRange{myNextChunk.first, myNextChunk.second}, version, kThisShard);
+ uuid, ChunkRange{myNextChunk.first, myNextChunk.second}, version, kThisShard);
allChunks.back().setHistory({ChunkHistory(kRouting, kThisShard)});
version.incMajor();
nextMinKey = myNextChunk.second;
@@ -80,7 +80,7 @@ CollectionMetadata makeCollectionMetadataImpl(
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < shardKeyPattern.globalMax())) {
allChunks.emplace_back(
- kNss, ChunkRange{nextMinKey, shardKeyPattern.globalMax()}, version, kOtherShard);
+ uuid, ChunkRange{nextMinKey, shardKeyPattern.globalMax()}, version, kOtherShard);
allChunks.back().setHistory({ChunkHistory(kRouting, kOtherShard)});
}
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index bf7de89a625..54bb235b299 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -57,7 +57,7 @@ protected:
UUID uuid = UUID::gen()) {
const OID epoch = OID::gen();
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
- auto chunk = ChunkType(kTestNss,
+ auto chunk = ChunkType(uuid,
std::move(range),
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("other"));
@@ -275,14 +275,15 @@ public:
}
std::vector<ChunkType> createChunks(const OID& epoch,
+ const UUID& uuid,
boost::optional<Timestamp> timestamp = boost::none) {
auto range1 = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << 5));
ChunkType chunk1(
- kNss, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
+ uuid, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
auto range2 = ChunkRange(BSON(kShardKey << 5), BSON(kShardKey << MAXKEY));
ChunkType chunk2(
- kNss, range2, ChunkVersion(1, 1, epoch, timestamp), kShardList[0].getName());
+ uuid, range2, ChunkVersion(1, 1, epoch, timestamp), kShardList[0].getName());
return {chunk1, chunk2};
}
@@ -302,10 +303,10 @@ TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
const Timestamp timestamp(42);
const auto coll = createCollection(epoch);
- const auto chunks = createChunks(epoch);
+ const auto chunks = createChunks(epoch, coll.getUuid());
const auto timestampedColl = createCollection(epoch, timestamp);
- const auto timestampedChunks = createChunks(epoch, timestamp);
+ const auto timestampedChunks = createChunks(epoch, timestampedColl.getUuid(), timestamp);
auto checkForceFilteringMetadataRefresh = [&](const auto& coll, const auto& chunks) {
auto opCtx = operationContext();
@@ -340,11 +341,11 @@ TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
const Timestamp timestamp(42);
const auto coll = createCollection(epoch);
- const auto chunks = createChunks(epoch);
+ const auto chunks = createChunks(epoch, coll.getUuid());
const auto collVersion = chunks.back().getVersion();
const auto timestampedColl = createCollection(epoch, timestamp);
- const auto timestampedChunks = createChunks(epoch, timestamp);
+ const auto timestampedChunks = createChunks(epoch, timestampedColl.getUuid(), timestamp);
const auto timestampedCollVersion = timestampedChunks.back().getVersion();
auto opCtx = operationContext();
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index ffd9c9b4baf..f6b6566ef99 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -326,19 +326,11 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
setupDatabase(nss.db().toString(), ShardId(shard.getName()), true /* sharded */);
}
- const auto collUUID = [&]() {
- const auto& chunk = chunks.front();
- if (chunk.getVersion().getTimestamp()) {
- return chunk.getCollectionUUID();
- } else {
- return UUID::gen();
- }
- }();
CollectionType coll(nss,
chunks[0].getVersion().epoch(),
chunks[0].getVersion().getTimestamp(),
Date_t::now(),
- collUUID);
+ chunks[0].getCollectionUUID());
coll.setTimestamp(chunks.front().getVersion().getTimestamp());
coll.setKeyPattern(shardKey);
ASSERT_OK(
@@ -352,13 +344,12 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
OperationContext* opCtx,
- const NamespaceStringOrUUID& nssOrUuid,
+ const UUID& uuid,
const BSONObj& minKey,
const OID& collEpoch,
const boost::optional<Timestamp>& collTimestamp) {
- const auto query = nssOrUuid.uuid()
- ? BSON(ChunkType::collectionUUID() << *nssOrUuid.uuid() << ChunkType::min(minKey))
- : BSON(ChunkType::ns(nssOrUuid.nss()->ns()) << ChunkType::min(minKey));
+
+ const auto query = BSON(ChunkType::collectionUUID() << uuid << ChunkType::min(minKey));
auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, query);
if (!doc.isOK())
return doc.getStatus();
@@ -387,12 +378,12 @@ StatusWith<ChunkVersion> ConfigServerTestFixture::getCollectionVersion(Operation
const CollectionType coll(collectionDoc.getValue());
- auto chunkDoc = findOneOnConfigCollection(
- opCtx,
- ChunkType::ConfigNS,
- coll.getTimestamp() ? BSON(ChunkType::collectionUUID << coll.getUuid())
- : BSON(ChunkType::ns << coll.getNss().ns()) /* query */,
- BSON(ChunkType::lastmod << -1) /* sort */);
+ invariant(coll.getTimestamp());
+ auto chunkDoc =
+ findOneOnConfigCollection(opCtx,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::collectionUUID << coll.getUuid()) /* query */,
+ BSON(ChunkType::lastmod << -1) /* sort */);
if (!chunkDoc.isOK())
return chunkDoc.getStatus();
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index 91037615543..624cf39ab5d 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -107,11 +107,11 @@ protected:
const std::vector<ChunkType>& chunks);
/**
- * Retrieves the chunk document <nssOrUuid, minKey> from the config server.
+ * Retrieves the chunk document <uuid, minKey> from the config server.
* This is the recommended way to get a chunk document.
*/
StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx,
- const NamespaceStringOrUUID& nssOrUuid,
+ const UUID& uuid,
const BSONObj& minKey,
const OID& collEpoch,
const boost::optional<Timestamp>& collTimestamp);
diff --git a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
index 6403ec22c8a..a4c782094f1 100644
--- a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
+++ b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
@@ -58,7 +58,6 @@ public:
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
ShardingCatalogManager::get(opCtx)->ensureChunkVersionIsGreaterThan(
opCtx,
- request().getNss(),
request().getCollectionUUID(),
request().getMinKey(),
request().getMaxKey(),
diff --git a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
index b055f49bedd..3fed8170825 100644
--- a/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_move_chunk_command.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/balancer/balancer.h"
+#include "mongo/s/grid.h"
#include "mongo/s/request_types/balance_chunk_request_type.h"
#include "mongo/util/str.h"
@@ -93,6 +94,13 @@ public:
auto request = uassertStatusOK(BalanceChunkRequest::parseFromConfigCommand(cmdObj));
+ // pre v5.1 compatibility
+ if (request.getNss()) {
+ const auto collection = Grid::get(opCtx)->catalogClient()->getCollection(
+ opCtx, *request.getNss(), repl::ReadConcernLevel::kLocalReadConcern);
+ request.setCollectionUUID(collection.getUuid());
+ }
+
if (request.hasToShardId()) {
uassertStatusOK(Balancer::get(opCtx)->moveSingleChunk(opCtx,
request.getChunk(),
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 35fd27b5f4d..03f9446e352 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -80,7 +80,6 @@ void appendChunk(const SplitPolicyParams& params,
ChunkVersion(
version->majorVersion(), version->minorVersion(), version->epoch(), creationTimestamp),
shardId);
-
auto& chunk = chunks->back();
chunk.setHistory({ChunkHistory(creationTimestamp, shardId)});
version->incMinor();
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 3e354efb060..3fcdfba1282 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -207,7 +207,7 @@ public:
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
ChunkVersion version(1, 0, OID::gen(), boost::none /* timestamp */);
- ChunkType chunk(_nss, chunkRanges[i], version, shardIds[i]);
+ ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
chunks.push_back(chunk);
}
@@ -230,6 +230,10 @@ public:
return _nss;
}
+ const UUID& uuid() {
+ return _uuid;
+ }
+
const ShardKeyPattern& shardKeyPattern() {
return _shardKeyPattern;
}
@@ -248,6 +252,7 @@ public:
private:
const NamespaceString _nss{"test.foo"};
+ const UUID _uuid{UUID::gen()};
const ShardKeyPattern _shardKeyPattern = ShardKeyPattern(BSON("x"
<< "hashed"));
const std::string _shardName = "testShard";
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 391db3a5c8b..78b703ff813 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -300,8 +300,7 @@ public:
* that doesn't pass these extra fields.
*/
void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
- const boost::optional<NamespaceString>& nss,
- const boost::optional<UUID>& collUUID,
+ const UUID& collUUID,
const BSONObj& minKey,
const BSONObj& maxKey,
const ChunkVersion& version);
@@ -572,7 +571,7 @@ private:
* Retrieve the full chunk description from the config.
*/
StatusWith<ChunkType> _findChunkOnConfig(OperationContext* opCtx,
- const NamespaceStringOrUUID& nsOrUUID,
+ const UUID& uuid,
const OID& epoch,
const boost::optional<Timestamp>& timestamp,
const BSONObj& key);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
index 752a72aa2d7..82ddf586f76 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
@@ -73,14 +73,14 @@ class ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest
}
protected:
- ChunkType generateChunkType(const NamespaceString& nss,
+ ChunkType generateChunkType(const UUID& uuid,
const ChunkVersion& chunkVersion,
const ShardId& shardId,
const BSONObj& minKey,
const BSONObj& maxKey) {
ChunkType chunkType;
chunkType.setName(OID::gen());
- chunkType.setNS(nss);
+ chunkType.setCollectionUUID(uuid);
chunkType.setVersion(chunkVersion);
chunkType.setShard(shardId);
chunkType.setMin(minKey);
@@ -113,19 +113,20 @@ protected:
TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
BumpsOnlyMinorVersionOfNewestChunk) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
+ const auto collUUID = UUID::gen();
- const auto shard0Chunk0 = generateChunkType(kNss,
+ const auto shard0Chunk0 = generateChunkType(collUUID,
ChunkVersion(10, 1, collEpoch, collTimestamp),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
- const auto shard0Chunk1 = generateChunkType(kNss,
+ const auto shard0Chunk1 = generateChunkType(collUUID,
ChunkVersion(11, 2, collEpoch, collTimestamp),
kShard0.getName(),
BSON("a" << 11),
BSON("a" << 20));
- const auto shard1Chunk0 = generateChunkType(kNss,
+ const auto shard1Chunk0 = generateChunkType(collUUID,
ChunkVersion(8, 1, collEpoch, collTimestamp),
kShard1.getName(),
BSON("a" << 21),
@@ -148,9 +149,10 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest, NoChunks) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
+ const auto collUUID = UUID::gen();
- const auto shard0Chunk0 = generateChunkType(kNss,
+ const auto shard0Chunk0 = generateChunkType(collUUID,
ChunkVersion(10, 1, collEpoch, collTimestamp),
kShard0.getName(),
BSON("a" << 1),
@@ -172,14 +174,15 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest, NoChunk
TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
SucceedsInThePresenceOfTransientTransactionErrors) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
+ const auto collUUID = UUID::gen();
- const auto shard0Chunk0 = generateChunkType(kNss,
+ const auto shard0Chunk0 = generateChunkType(collUUID,
ChunkVersion(10, 1, collEpoch, collTimestamp),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
- const auto shard1Chunk0 = generateChunkType(kNss,
+ const auto shard1Chunk0 = generateChunkType(collUUID,
ChunkVersion(11, 2, collEpoch, collTimestamp),
kShard1.getName(),
BSON("a" << 11),
@@ -233,14 +236,15 @@ TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
TEST_F(ShardingCatalogManagerBumpCollectionVersionAndChangeMetadataTest,
StopsRetryingOnPermanentServerErrors) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
+ const auto collUUID = UUID::gen();
- const auto shard0Chunk0 = generateChunkType(kNss,
+ const auto shard0Chunk0 = generateChunkType(collUUID,
ChunkVersion(10, 1, collEpoch, collTimestamp),
kShard0.getName(),
BSON("a" << 1),
BSON("a" << 10));
- const auto shard1Chunk0 = generateChunkType(kNss,
+ const auto shard1Chunk0 = generateChunkType(collUUID,
ChunkVersion(11, 2, collEpoch, collTimestamp),
kShard1.getName(),
BSON("a" << 11),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 2e6afa416e1..45b1212452d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -139,13 +139,11 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
BSONArrayBuilder preCond;
const bool collHasTimestamp = (bool)collVersion.getTimestamp();
+ invariant(collHasTimestamp);
for (const auto& chunk : chunksToMerge) {
- BSONObj query = BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax()));
- if (collHasTimestamp) {
- query = query.addFields(BSON(ChunkType::collectionUUID() << chunk.getCollectionUUID()));
- } else {
- query = query.addFields(BSON(ChunkType::ns(chunk.getNS().ns())));
- }
+ BSONObj query = BSON(ChunkType::min(chunk.getMin())
+ << ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID()
+ << chunk.getCollectionUUID());
const auto collectionIdentityMatchCondition = collHasTimestamp
? BSON(ChunkType::collectionUUID()
@@ -167,7 +165,7 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
* Check that the chunk still exists and return its metadata.
*/
StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx,
- const NamespaceStringOrUUID& nsOrUUID,
+ const UUID& uuid,
const OID& epoch,
const boost::optional<Timestamp>& timestamp,
const ChunkType& requestedChunk) {
@@ -177,14 +175,9 @@ StatusWith<ChunkType> getCurrentChunk(OperationContext* opCtx,
requestedChunk.isVersionSet() && requestedChunk.getVersion().isSet() &&
requestedChunk.getVersion().epoch().isSet());
- BSONObj chunkQuery = BSON(ChunkType::min() << requestedChunk.getMin() << ChunkType::max()
- << requestedChunk.getMax());
-
- if (nsOrUUID.uuid()) {
- chunkQuery = chunkQuery.addFields(BSON(ChunkType::collectionUUID << *nsOrUUID.uuid()));
- } else {
- chunkQuery = chunkQuery.addFields(BSON(ChunkType::ns() << nsOrUUID.nss()->ns()));
- }
+ BSONObj chunkQuery =
+ BSON(ChunkType::min() << requestedChunk.getMin() << ChunkType::max()
+ << requestedChunk.getMax() << ChunkType::collectionUUID << uuid);
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
@@ -264,7 +257,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
* Returns a chunk different from the one being migrated or 'none' if one doesn't exist.
*/
boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx,
- const NamespaceStringOrUUID& nsOrUUID,
+ const UUID& uuid,
const OID& epoch,
const boost::optional<Timestamp>& timestamp,
const ChunkType& migratedChunk,
@@ -272,11 +265,7 @@ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx,
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
BSONObjBuilder queryBuilder;
- if (nsOrUUID.uuid()) {
- queryBuilder << ChunkType::collectionUUID << *nsOrUUID.uuid();
- } else {
- queryBuilder << ChunkType::ns(nsOrUUID.nss()->ns());
- }
+ queryBuilder << ChunkType::collectionUUID << uuid;
queryBuilder << ChunkType::shard(fromShard.toString());
queryBuilder << ChunkType::min(BSON("$ne" << migratedChunk.getMin()));
@@ -338,8 +327,8 @@ StatusWith<ChunkVersion> getCollectionVersion(OperationContext* opCtx, const Nam
}
const CollectionType coll(findCollResponse.getValue().docs[0]);
- const auto chunksQuery = coll.getTimestamp() ? BSON(ChunkType::collectionUUID << coll.getUuid())
- : BSON(ChunkType::ns(coll.getNss().ns()));
+ invariant(coll.getTimestamp());
+ const auto chunksQuery = BSON(ChunkType::collectionUUID << coll.getUuid());
return getMaxChunkVersionFromQueryResponse(
coll,
Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
@@ -356,10 +345,9 @@ ChunkVersion getShardVersion(OperationContext* opCtx,
const CollectionType& coll,
const ShardId& fromShard,
const ChunkVersion& collectionVersion) {
- const auto chunksQuery = coll.getTimestamp()
- ? BSON(ChunkType::collectionUUID << coll.getUuid()
- << ChunkType::shard(fromShard.toString()))
- : BSON(ChunkType::ns(coll.getNss().ns()) << ChunkType::shard(fromShard.toString()));
+ invariant(coll.getTimestamp());
+ const auto chunksQuery =
+ BSON(ChunkType::collectionUUID << coll.getUuid() << ChunkType::shard(fromShard.toString()));
auto swDonorShardVersion = getMaxChunkVersionFromQueryResponse(
coll,
@@ -386,14 +374,6 @@ ChunkVersion getShardVersion(OperationContext* opCtx,
return swDonorShardVersion.getValue();
}
-NamespaceStringOrUUID getNsOrUUIDForChunkTargeting(const CollectionType& coll) {
- if (coll.getTimestamp()) {
- return {coll.getNss().db().toString(), coll.getUuid()};
- } else {
- return {coll.getNss()};
- }
-}
-
void bumpCollectionMinorVersion(OperationContext* opCtx,
const NamespaceString& nss,
TxnNumber txnNumber) {
@@ -410,16 +390,15 @@ void bumpCollectionMinorVersion(OperationContext* opCtx,
uassert(
ErrorCodes::NamespaceNotFound, "Collection does not exist", !findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- const auto nsOrUUID = getNsOrUUIDForChunkTargeting(coll);
// Find the newest chunk
+ invariant(coll.getTimestamp());
const auto findChunkResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
ChunkType::ConfigNS,
- nsOrUUID.uuid() ? BSON(ChunkType::collectionUUID << *nsOrUUID.uuid())
- : BSON(ChunkType::ns() << nsOrUUID.nss()->toString()) /* query */,
+ BSON(ChunkType::collectionUUID << coll.getUuid()) /* query */,
BSON(ChunkType::lastmod << -1) /* sort */,
1 /* limit */));
@@ -477,14 +456,10 @@ std::vector<ShardId> getShardsOwningChunksForCollection(OperationContext* opCtx,
uassert(
ErrorCodes::NamespaceNotFound, "Collection does not exist", !findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- const auto nsOrUUID = getNsOrUUIDForChunkTargeting(coll);
+ invariant(coll.getTimestamp());
DistinctCommandRequest distinctCmd(ChunkType::ConfigNS, ChunkType::shard.name());
- if (nsOrUUID.uuid()) {
- distinctCmd.setQuery(BSON(ChunkType::collectionUUID << *(nsOrUUID.uuid())));
- } else {
- distinctCmd.setQuery(BSON(ChunkType::ns(nsOrUUID.nss()->ns())));
- }
+ distinctCmd.setQuery(BSON(ChunkType::collectionUUID << coll.getUuid()));
const auto distinctResult = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
opCtx,
@@ -556,9 +531,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
}
// Find the chunk history.
- const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll);
const auto origChunk = _findChunkOnConfig(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), range.getMin());
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), range.getMin());
if (!origChunk.isOK()) {
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
return origChunk.getStatus();
@@ -651,14 +625,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS.ns());
- BSONObj query =
- BSON(ChunkType::min() << range.getMin() << ChunkType::max() << range.getMax());
- if (origChunk.getValue().getVersion().getTimestamp()) {
- query = query.addFields(
- BSON(ChunkType::collectionUUID << origChunk.getValue().getCollectionUUID()));
- } else {
- query = query.addFields(BSON(ChunkType::ns(nss.ns())));
- }
+ invariant(origChunk.getValue().getVersion().getTimestamp());
+ BSONObj query = BSON(ChunkType::min() << range.getMin() << ChunkType::max()
+ << range.getMax() << ChunkType::collectionUUID
+ << origChunk.getValue().getCollectionUUID());
b.append("q", BSON("query" << query << "orderby" << BSON(ChunkType::lastmod() << -1)));
@@ -677,7 +647,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
opCtx,
updates.arr(),
preCond.arr(),
- collNsOrUUID,
+ coll.getUuid(),
nss,
currentMaxVersion,
WriteConcernOptions(),
@@ -780,9 +750,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
}
// Check if the chunk(s) have already been merged. If so, return success.
- const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll);
auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), chunkBoundaries.front()));
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), chunkBoundaries.front()));
if (minChunkOnDisk.getMax().woCompare(chunkBoundaries.back()) == 0) {
BSONObjBuilder response;
collVersion.appendWithField(&response, kCollectionVersionField);
@@ -803,7 +772,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
for (size_t i = 1; i < chunkBoundaries.size(); ++i) {
// Read the original chunk from disk to lookup that chunk's '_id' field.
auto currentChunk = uassertStatusOK(_findChunkOnConfig(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), chunkBoundaries[i - 1]));
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), chunkBoundaries[i - 1]));
// Ensure the chunk boundaries are strictly increasing
if (chunkBoundaries[i].woCompare(currentChunk.getMin()) <= 0) {
@@ -829,7 +798,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
opCtx,
updates,
preCond,
- collNsOrUUID,
+ coll.getUuid(),
nss,
mergeVersion,
WriteConcernOptions(),
@@ -909,11 +878,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
}
const auto shardChunksInRangeQuery = [&]() {
BSONObjBuilder queryBuilder;
- if (coll.getTimestamp()) {
- queryBuilder << ChunkType::collectionUUID << coll.getUuid();
- } else {
- queryBuilder << ChunkType::ns(coll.getNss().ns());
- }
+ invariant(coll.getTimestamp());
+ queryBuilder << ChunkType::collectionUUID << coll.getUuid();
queryBuilder << ChunkType::shard(shardId.toString());
queryBuilder << ChunkType::min(BSON("$gte" << chunkRange.getMin()));
queryBuilder << ChunkType::min(BSON("$lt" << chunkRange.getMax()));
@@ -993,7 +959,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
opCtx,
updates,
preCond,
- getNsOrUUIDForChunkTargeting(coll),
+ coll.getUuid(),
nss,
mergeVersion,
WriteConcernOptions(),
@@ -1083,9 +1049,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
"Collection is undergoing changes and chunks cannot be moved",
coll.getAllowMigrations());
- const auto findChunkQuery = coll.getTimestamp()
- ? BSON(ChunkType::collectionUUID() << coll.getUuid())
- : BSON(ChunkType::ns(coll.getNss().ns()));
+ invariant(coll.getTimestamp());
+ const auto findChunkQuery = BSON(ChunkType::collectionUUID() << coll.getUuid());
auto findResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -1126,9 +1091,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
// Check if chunk still exists and which shard owns it
- const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll);
auto swCurrentChunk =
- getCurrentChunk(opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk);
+ getCurrentChunk(opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), migratedChunk);
if (!swCurrentChunk.isOK()) {
return swCurrentChunk.getStatus();
@@ -1167,11 +1131,11 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
auto controlChunk = getControlChunkForMigrate(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk, fromShard);
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), migratedChunk, fromShard);
// Find the chunk history.
const auto origChunk = uassertStatusOK(_findChunkOnConfig(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), migratedChunk.getMin()));
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), migratedChunk.getMin()));
// Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version
// will be 0.
@@ -1226,7 +1190,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
if (controlChunk) {
// Find the chunk history.
auto origControlChunk = uassertStatusOK(_findChunkOnConfig(
- opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), controlChunk->getMin()));
+ opCtx, coll.getUuid(), coll.getEpoch(), coll.getTimestamp(), controlChunk->getMin()));
newControlChunk = std::move(origControlChunk);
// Setting control chunk's minor version to 1 on the donor shard.
@@ -1272,20 +1236,13 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(
OperationContext* opCtx,
- const NamespaceStringOrUUID& nsOrUUID,
+ const UUID& uuid,
const OID& epoch,
const boost::optional<Timestamp>& timestamp,
const BSONObj& key) {
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- const auto query = [&]() {
- if (nsOrUUID.uuid()) {
- return BSON(ChunkType::collectionUUID << *(nsOrUUID.uuid()) << ChunkType::min(key));
- } else {
- return BSON(ChunkType::ns(nsOrUUID.nss()->ns()) << ChunkType::min(key));
- }
- }();
-
+ const auto query = BSON(ChunkType::collectionUUID << uuid << ChunkType::min(key));
auto findResponse =
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -1302,7 +1259,7 @@ StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(
const auto origChunks = std::move(findResponse.getValue().docs);
if (origChunks.size() != 1) {
return {ErrorCodes::IncompatibleShardingMetadata,
- str::stream() << "Tried to find the chunk for namespace/uuid" << nsOrUUID.toString()
+ str::stream() << "Tried to find the chunk for uuid" << uuid.toString()
<< " and min key " << key.toString() << ", but found no chunks"};
}
@@ -1340,14 +1297,10 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
+ invariant(coll.getTimestamp());
BSONObj targetChunkQuery =
- BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax()));
- if (coll.getTimestamp()) {
- targetChunkQuery =
- targetChunkQuery.addFields(BSON(ChunkType::collectionUUID << coll.getUuid()));
- } else {
- targetChunkQuery = targetChunkQuery.addFields(BSON(ChunkType::ns(coll.getNss().ns())));
- }
+ BSON(ChunkType::min(chunk.getMin())
+ << ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID << coll.getUuid());
auto targetChunkResult = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -1371,9 +1324,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
return;
}
- const auto allChunksQuery = coll.getTimestamp()
- ? BSON(ChunkType::collectionUUID << coll.getUuid())
- : BSON(ChunkType::ns(coll.getNss().ns()));
+ invariant(coll.getTimestamp());
+ const auto allChunksQuery = BSON(ChunkType::collectionUUID << coll.getUuid());
// Must use local read concern because we will perform subsequent writes.
auto findResponse = uassertStatusOK(
@@ -1415,13 +1367,10 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
currentCollectionVersion.getTimestamp());
- BSONObj chunkQuery(BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax())));
- if (coll.getTimestamp()) {
- chunkQuery = chunkQuery.addFields(BSON(ChunkType::collectionUUID << coll.getUuid()));
- } else {
- chunkQuery = chunkQuery.addFields(
- BSON(ChunkType::ns(coll.getNss().ns()) << ChunkType::epoch(collectionEpoch)));
- }
+ invariant(coll.getTimestamp());
+ BSONObj chunkQuery(BSON(ChunkType::min(chunk.getMin())
+ << ChunkType::max(chunk.getMax()) << ChunkType::collectionUUID
+ << coll.getUuid()));
BSONObjBuilder updateBuilder;
updateBuilder.append("$unset", BSON(ChunkType::jumbo() << ""));
@@ -1446,13 +1395,11 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
didUpdate);
}
-void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(
- OperationContext* opCtx,
- const boost::optional<NamespaceString>& nss,
- const boost::optional<UUID>& collUuid,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const ChunkVersion& version) {
+void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
+ const UUID& collUuid,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const ChunkVersion& version) {
auto earlyReturnBeforeDoingWriteGuard = makeGuard([&] {
// Ensure waiting for writeConcern of the data read.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
@@ -1485,14 +1432,13 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(
}
coll = CollectionType(findCollResponse.docs[0]);
- dassert(!collUuid || *collUuid == coll.getUuid());
+ dassert(collUuid == coll.getUuid());
}
- const auto requestedChunkQuery = coll.getTimestamp()
- ? BSON(ChunkType::min(minKey)
- << ChunkType::max(maxKey) << ChunkType::collectionUUID() << *collUuid)
- : BSON(ChunkType::min(minKey) << ChunkType::max(maxKey) << ChunkType::ns(coll.getNss().ns())
- << ChunkType::epoch(version.epoch()));
+ invariant(coll.getTimestamp());
+ const auto requestedChunkQuery =
+ BSON(ChunkType::min(minKey)
+ << ChunkType::max(maxKey) << ChunkType::collectionUUID() << collUuid);
// Get the chunk matching the requested chunk.
ChunkType matchingChunk;
@@ -1547,7 +1493,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(
// Get the chunk with the current collectionVersion for this epoch.
ChunkType highestChunk;
{
- const auto query = coll.getTimestamp() ? BSON(ChunkType::collectionUUID() << *collUuid)
+ const auto query = coll.getTimestamp() ? BSON(ChunkType::collectionUUID() << collUuid)
: BSON(ChunkType::epoch(version.epoch()));
const auto highestChunksVector =
uassertStatusOK(configShard->exhaustiveFindOnConfig(
@@ -1692,14 +1638,10 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
- const auto chunkQuery = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID()
- << coll.getUuid() << ChunkType::min(chunk.getMin()));
- } else {
- return BSON(ChunkType::ns(nss.ns()) << ChunkType::min(chunk.getMin()));
- }
- }();
+ invariant(coll.getTimestamp());
+ const auto chunkQuery = BSON(ChunkType::collectionUUID()
+ << coll.getUuid() << ChunkType::min(chunk.getMin()));
+
auto status = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
ChunkType::ConfigNS,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 6018367ac9f..168c53f9b24 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -68,10 +68,9 @@ protected:
void makeCollection(const NamespaceString& nss,
const UUID& collUuid,
const OID& epoch,
- const boost::optional<Timestamp>& timestamp) {
+ const Timestamp& timestamp) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
chunk.setVersion({12, 7, epoch, timestamp});
chunk.setShard(_shardName);
@@ -81,7 +80,6 @@ protected:
ChunkType otherChunk;
otherChunk.setName(OID::gen());
- otherChunk.setNS(nss);
otherChunk.setCollectionUUID(collUuid);
otherChunk.setVersion({14, 7, epoch, timestamp});
otherChunk.setShard(_shardName);
@@ -100,21 +98,18 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, *collTimestamp);
ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, collEpoch, jumboChunk());
- const auto nssOrUuid =
- collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
-
+ invariant(collTimestamp);
auto chunkDoc = uassertStatusOK(getChunkDoc(
- operationContext(), nssOrUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
+ operationContext(), collUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkDoc.getVersion());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -122,21 +117,18 @@ TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, *collTimestamp);
ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, collEpoch, nonJumboChunk());
- const auto nssOrUuid =
- collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
-
+ invariant(collTimestamp);
auto chunkDoc = uassertStatusOK(getChunkDoc(
- operationContext(), nssOrUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
+ operationContext(), collUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
ASSERT_FALSE(chunkDoc.getJumbo());
ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -144,7 +136,7 @@ TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
const auto collUuid = UUID::gen();
const auto collEpoch = OID::gen();
- makeCollection(nss, collUuid, collEpoch, collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, *collTimestamp);
ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
->clearJumboFlag(operationContext(), nss, OID::gen(), jumboChunk()),
@@ -152,7 +144,6 @@ TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
ErrorCodes::StaleEpoch);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -160,7 +151,7 @@ TEST_F(ClearJumboFlagTest, AssertsIfChunkCantBeFound) {
auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
const auto collEpoch = OID::gen();
const auto collUuid = UUID::gen();
- makeCollection(nss, collUuid, collEpoch, collTimestamp);
+ makeCollection(nss, collUuid, collEpoch, *collTimestamp);
ChunkRange imaginaryChunk(BSON("x" << 0), BSON("x" << 10));
ASSERT_THROWS(ShardingCatalogManager::get(operationContext())
@@ -168,7 +159,6 @@ TEST_F(ClearJumboFlagTest, AssertsIfChunkCantBeFound) {
AssertionException);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 84d951d1cd2..591917bfad4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -134,21 +134,14 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
}
void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, const CollectionType& coll) {
+ invariant(coll.getTimestamp());
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto allShards = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllShards(
opCtx, repl::ReadConcernLevel::kLocalReadConcern))
.value;
-
for (const auto& shardEntry : allShards) {
- const auto query = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID << coll.getUuid()
- << ChunkType::shard(shardEntry.getName()));
- } else {
- return BSON(ChunkType::ns(coll.getNss().ns())
- << ChunkType::shard(shardEntry.getName()));
- }
- }();
+ const auto query = BSON(ChunkType::collectionUUID
+ << coll.getUuid() << ChunkType::shard(shardEntry.getName()));
const auto chunk = uassertStatusOK(shardRegistry->getConfigShard()->exhaustiveFindOnConfig(
opCtx,
@@ -359,13 +352,7 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
// to the newly-generated objectid, (ii) their bounds for each new field in the refined
// key to MinKey (except for the global max chunk where the max bounds are set to
// MaxKey), and unsetting (iii) their jumbo field.
- const auto chunksQuery = [&]() {
- if (collType.getTimestamp()) {
- return BSON(ChunkType::collectionUUID << collType.getUuid());
- } else {
- return BSON(ChunkType::ns(collType.getNss().ns()));
- }
- }();
+ const auto chunksQuery = BSON(ChunkType::collectionUUID << collType.getUuid());
writeToConfigDocumentInTxn(
opCtx,
ChunkType::ConfigNS,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 7b0369bf347..d6df635f790 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -50,8 +50,9 @@ const NamespaceString kNamespace("TestDB.TestColl");
const KeyPattern kKeyPattern(BSON("x" << 1));
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
+ const auto collUUID = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
ShardType shard0;
shard0.setName("shard0");
@@ -68,7 +69,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
ChunkVersion origVersion(12, 7, collEpoch, collTimestamp);
migratedChunk.setName(OID::gen());
- migratedChunk.setNS(kNamespace);
+ migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -78,7 +79,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
origVersion.incMinor();
controlChunk.setName(OID::gen());
- controlChunk.setNS(kNamespace);
+ controlChunk.setCollectionUUID(collUUID);
controlChunk.setVersion(origVersion);
controlChunk.setShard(shard0.getName());
controlChunk.setHistory({ChunkHistory(Timestamp(50, 0), shard0.getName())});
@@ -134,8 +135,9 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
}
TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
+ const auto collUUID = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
ShardType shard0;
shard0.setName("shard0");
@@ -152,7 +154,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -169,7 +171,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -198,8 +200,9 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
}
TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
+ const auto collUUID = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
ShardType shard0;
shard0.setName("shard0");
@@ -216,7 +219,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -234,7 +237,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -260,6 +263,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
}
TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
+ const auto collUUID = UUID::gen();
ShardType shard0;
shard0.setName("shard0");
@@ -273,11 +277,11 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
int origMajorVersion = 15;
auto const origVersion =
- ChunkVersion(origMajorVersion, 4, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion(origMajorVersion, 4, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -295,7 +299,7 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -306,6 +310,8 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -318,11 +324,11 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
int origMajorVersion = 12;
auto const origVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -334,7 +340,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(kNamespace);
+ chunk1.setCollectionUUID(collUUID);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -348,7 +354,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
OID::gen(),
ShardId(shard0.getName()),
@@ -359,6 +365,8 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -371,13 +379,13 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
int origMajorVersion = 12;
auto const origVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
auto const otherVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -389,7 +397,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(kNamespace);
+ chunk1.setCollectionUUID(collUUID);
chunk1.setVersion(otherVersion);
chunk1.setShard(shard0.getName());
@@ -404,7 +412,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -415,6 +423,8 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
}
TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -427,11 +437,11 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
int origMajorVersion = 12;
auto const origVersion =
- ChunkVersion(origMajorVersion, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion(origMajorVersion, 7, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
@@ -443,7 +453,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(kNamespace);
+ chunk1.setCollectionUUID(collUUID);
chunk1.setVersion(origVersion);
chunk1.setShard(shard0.getName());
@@ -457,7 +467,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -468,8 +478,9 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
}
TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) {
+ const auto collUUID = UUID::gen();
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
ShardType shard0;
shard0.setName("shard0");
@@ -486,7 +497,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
ChunkType chunk0;
chunk0.setName(OID::gen());
- chunk0.setNS(kNamespace);
+ chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -499,7 +510,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(kNamespace);
+ chunk1.setCollectionUUID(collUUID);
chunk1.setVersion(origVersion);
chunk1.setShard(shard1.getName());
@@ -515,7 +526,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
Timestamp validAfter{101, 0};
StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
->commitChunkMigration(operationContext(),
- chunk0.getNS(),
+ kNamespace,
chunk0,
origVersion.epoch(),
ShardId(shard0.getName()),
@@ -550,6 +561,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
}
TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -560,12 +573,12 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion origVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
// Create migrate chunk with no chunk version set.
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
- migratedChunk.setNS(kNamespace);
+ migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setShard(shard0.getName());
migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
@@ -573,7 +586,7 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
ChunkType currentChunk;
currentChunk.setName(OID::gen());
- currentChunk.setNS(kNamespace);
+ currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(origVersion);
currentChunk.setShard(shard0.getName());
currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -596,6 +609,8 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
}
TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -607,22 +622,22 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
setupShards({shard0, shard1});
auto epoch = OID::gen();
- ChunkVersion origVersion(12, 7, epoch, boost::none /* timestamp */);
+ ChunkVersion origVersion(12, 7, epoch, Timestamp(42) /* timestamp */);
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
- migratedChunk.setNS(kNamespace);
+ migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(14, 7, epoch, boost::none /* timestamp */);
+ ChunkVersion currentChunkVersion(14, 7, epoch, Timestamp(42) /* timestamp */);
ChunkType currentChunk;
currentChunk.setName(OID::gen());
- currentChunk.setNS(kNamespace);
+ currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(currentChunkVersion);
currentChunk.setShard(shard0.getName());
currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
@@ -646,6 +661,8 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
}
TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
+ const auto collUUID = UUID::gen();
+
ShardType shard0;
shard0.setName("shard0");
shard0.setHost("shard0:12");
@@ -656,22 +673,22 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
setupShards({shard0, shard1});
- ChunkVersion origVersion(12, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion origVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType migratedChunk;
migratedChunk.setName(OID::gen());
- migratedChunk.setNS(kNamespace);
+ migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
- ChunkVersion currentChunkVersion(12, 7, OID::gen(), boost::none /* timestamp */);
+ ChunkVersion currentChunkVersion(12, 7, OID::gen(), Timestamp(42) /* timestamp */);
ChunkType currentChunk;
currentChunk.setName(OID::gen());
- currentChunk.setNS(kNamespace);
+ currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(currentChunkVersion);
currentChunk.setShard(shard0.getName());
currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 6b618e36140..4d511518aca 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -59,7 +59,6 @@ ChunkType generateChunkType(const NamespaceString& nss,
ChunkType chunkType;
chunkType.setName(OID::gen());
chunkType.setCollectionUUID(collUuid);
- chunkType.setNS(nss);
chunkType.setVersion(chunkVersion);
chunkType.setShard(shardId);
chunkType.setMin(minKey);
@@ -86,7 +85,7 @@ void assertChunkVersionWasBumpedTo(const ChunkType& chunkTypeBefore,
// None of the chunk's other fields should have been changed.
ASSERT_EQ(chunkTypeBefore.getName(), chunkTypeAfter.getName());
- ASSERT_EQ(chunkTypeBefore.getNS(), chunkTypeAfter.getNS());
+ ASSERT_EQ(chunkTypeBefore.getCollectionUUID(), chunkTypeAfter.getCollectionUUID());
ASSERT_BSONOBJ_EQ(chunkTypeBefore.getMin(), chunkTypeAfter.getMin());
ASSERT_BSONOBJ_EQ(chunkTypeBefore.getMax(), chunkTypeAfter.getMax());
ASSERT(chunkTypeBefore.getHistory() == chunkTypeAfter.getHistory());
@@ -103,8 +102,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- boost::none,
- boost::none,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -112,7 +110,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
const auto requestedChunkType = generateChunkType(_nss,
_collUuid,
@@ -128,8 +126,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- boost::none,
- boost::none,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -157,7 +154,6 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- _nss,
_collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
@@ -171,7 +167,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundRetu
TEST_F(EnsureChunkVersionIsGreaterThanTest,
IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
const auto requestedChunkType = generateChunkType(_nss,
_collUuid,
@@ -191,8 +187,7 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- boost::none,
- boost::none,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -227,7 +222,6 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest,
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- _nss,
_collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
@@ -244,7 +238,7 @@ TEST_F(
EnsureChunkVersionIsGreaterThanTest,
IfChunkMatchingRequestedChunkFoundAndHasHigherChunkVersionReturnsSuccessWithoutBumpingChunkVersion) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
const auto requestedChunkType = generateChunkType(_nss,
_collUuid,
@@ -259,8 +253,7 @@ TEST_F(
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- boost::none,
- boost::none,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -289,7 +282,6 @@ TEST_F(
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
- _nss,
_collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index c666e9ccc23..5dea8c35f53 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -65,7 +65,6 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(_nss1);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -146,7 +145,6 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(_nss1);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -220,10 +218,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
const auto collUuid = UUID::gen();
ChunkType chunk, otherChunk;
chunk.setName(OID::gen());
- chunk.setNS(_nss1);
chunk.setCollectionUUID(collUuid);
otherChunk.setName(OID::gen());
- otherChunk.setNS(_nss1);
otherChunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
@@ -301,7 +297,6 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
ShardId shardId(_shardName);
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(_nss1);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
@@ -377,7 +372,6 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
const auto collUuidAtRequest = UUID::gen();
const boost::optional<Timestamp> collTimestamp(42);
ChunkType chunk;
- chunk.setNS(_nss1);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -415,10 +409,9 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
const auto collEpoch = OID::gen();
const boost::optional<Timestamp> collTimestamp(42);
- ChunkType chunk;
- chunk.setNS(_nss1);
const auto collUuid = UUID::gen();
const auto requestUuid = UUID::gen();
+ ChunkType chunk;
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -469,7 +462,6 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
mergedChunk.setMin(chunkMin);
mergedChunk.setMax(chunkMax);
mergedChunk.setName(OID::gen());
- mergedChunk.setNS(_nss1);
mergedChunk.setCollectionUUID(collUuid);
mergedChunk.setShard(_shardId);
@@ -510,7 +502,6 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
const auto collUuid = UUID::gen();
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(_nss1);
chunk1.setCollectionUUID(collUuid);
@@ -587,7 +578,6 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceedWithLegacyMethod
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -633,8 +623,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceedWithLegacyMethod
ASSERT_EQ(expectedShardVersion, shardVersion);
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -663,7 +652,6 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceedWithLegacyMethod
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -673,7 +661,6 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceedWithLegacyMethod)
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -712,8 +699,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceedWithLegacyMethod)
->commitChunkMerge(
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -745,7 +731,6 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceedWithLegacyMethod)
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -756,10 +741,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersionWithLegacyMethod) {
const auto collUuid = UUID::gen();
ChunkType chunk, otherChunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
otherChunk.setName(OID::gen());
- otherChunk.setNS(nss);
otherChunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
@@ -799,8 +782,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersionWithLegacyMethod) {
->commitChunkMerge(
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -832,7 +814,6 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersionWithLegacyMethod) {
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -843,7 +824,6 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAloneWithLegacyMethod) {
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
@@ -880,8 +860,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAloneWithLegacyMethod) {
ShardingCatalogManager::get(operationContext())
->commitChunkMerge(
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -915,7 +894,6 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAloneWithLegacyMethod) {
ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -924,7 +902,6 @@ TEST_F(MergeChunkTest, NonExistingNamespaceWithLegacyMethod) {
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -960,7 +937,6 @@ TEST_F(MergeChunkTest, NonExistingNamespaceWithLegacyMethod) {
ASSERT_NOT_OK(mergeStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -969,7 +945,6 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrorsWithLegacyMethod)
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -1002,7 +977,6 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrorsWithLegacyMethod)
ASSERT_EQ(ErrorCodes::StaleEpoch, mergeStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -1013,7 +987,6 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceedsWithLegacyMethod) {
const auto collUuid = UUID::gen();
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -1052,8 +1025,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceedsWithLegacyMethod) {
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
// Verify that no change to config.chunks happened.
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -1074,7 +1046,6 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceedsWithLegacyMethod) {
ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -1090,7 +1061,6 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFailsWithLegacyMethod) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
chunk.setShard(_shardId);
@@ -1126,7 +1096,6 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFailsWithLegacyMethod) {
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -1137,7 +1106,6 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceedWithLegacyMetho
const auto collUuid = UUID::gen();
ChunkType chunk1;
chunk1.setName(OID::gen());
- chunk1.setNS(nss);
chunk1.setCollectionUUID(collUuid);
@@ -1176,8 +1144,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceedWithLegacyMetho
->commitChunkMerge(
operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
- const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
- : BSON(ChunkType::ns(nss.ns()));
+ const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -1209,7 +1176,6 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceedWithLegacyMetho
ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index ac1de7140e0..397eca70a61 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -199,15 +199,16 @@ TEST_F(RemoveShardTest, RemoveShardStillDrainingChunksRemaining) {
shard2.setState(ShardType::ShardState::kShardAware);
auto epoch = OID::gen();
- ChunkType chunk1(NamespaceString("testDB.testColl"),
+ const auto uuid = UUID::gen();
+ ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
shard1.getName());
- ChunkType chunk2(NamespaceString("testDB.testColl"),
+ ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
shard1.getName());
- ChunkType chunk3(NamespaceString("testDB.testColl"),
+ ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
shard1.getName());
@@ -284,15 +285,16 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
shard2.setState(ShardType::ShardState::kShardAware);
auto epoch = OID::gen();
- ChunkType chunk1(NamespaceString("testDB.testColl"),
+ auto uuid = UUID::gen();
+ ChunkType chunk1(uuid,
ChunkRange(BSON("_id" << 0), BSON("_id" << 20)),
ChunkVersion(1, 1, epoch, boost::none /* timestamp */),
shard1.getName());
- ChunkType chunk2(NamespaceString("testDB.testColl"),
+ ChunkType chunk2(uuid,
ChunkRange(BSON("_id" << 21), BSON("_id" << 50)),
ChunkVersion(1, 2, epoch, boost::none /* timestamp */),
shard1.getName());
- ChunkType chunk3(NamespaceString("testDB.testColl"),
+ ChunkType chunk3(uuid,
ChunkRange(BSON("_id" << 51), BSON("_id" << 1000)),
ChunkVersion(1, 3, epoch, boost::none /* timestamp */),
shard1.getName());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index e735c1291e4..525011fd0ee 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -63,7 +63,6 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -106,7 +105,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
@@ -121,7 +120,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
// Second chunkDoc should have range [chunkSplitPoint, chunkMax]
auto otherChunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkSplitPoint, collEpoch, collTimestamp);
ASSERT_OK(otherChunkDocStatus.getStatus());
auto otherChunkDoc = otherChunkDocStatus.getValue();
@@ -138,7 +137,6 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -149,7 +147,6 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -182,7 +179,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
@@ -197,7 +194,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
// Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2]
auto midChunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkSplitPoint, collEpoch, collTimestamp);
ASSERT_OK(midChunkDocStatus.getStatus());
auto midChunkDoc = midChunkDocStatus.getValue();
@@ -212,7 +209,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
// Third chunkDoc should have range [chunkSplitPoint2, chunkMax]
auto lastChunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint2, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkSplitPoint2, collEpoch, collTimestamp);
ASSERT_OK(lastChunkDocStatus.getStatus());
auto lastChunkDoc = lastChunkDocStatus.getValue();
@@ -230,7 +227,6 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -241,10 +237,8 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ChunkType chunk, chunk2;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(collUuid);
chunk2.setName(OID::gen());
- chunk2.setNS(nss);
chunk2.setCollectionUUID(collUuid);
// set up first chunk
@@ -283,7 +277,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkMin, collEpoch, collTimestamp);
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
@@ -295,7 +289,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
// Second chunkDoc should have range [chunkSplitPoint, chunkMax]
auto otherChunkDocStatus =
- getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ getChunkDoc(operationContext(), collUuid, chunkSplitPoint, collEpoch, collTimestamp);
ASSERT_OK(otherChunkDocStatus.getStatus());
auto otherChunkDoc = otherChunkDocStatus.getValue();
@@ -306,7 +300,6 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -316,7 +309,6 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -344,7 +336,6 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
ASSERT_EQ(ErrorCodes::BadValue, splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -353,7 +344,6 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -379,7 +369,6 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ASSERT_NOT_OK(splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -388,7 +377,6 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -414,7 +402,6 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -424,7 +411,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -450,7 +436,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -459,7 +444,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -485,7 +469,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -495,7 +478,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -521,7 +503,6 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
@@ -530,7 +511,6 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setNS(nss);
chunk.setCollectionUUID(UUID::gen());
auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
@@ -559,7 +539,6 @@ TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
"shard0000"));
};
- test(_nss1, boost::none /* timestamp */);
test(_nss2, Timestamp(42));
}
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 3002cafe317..2b2b4c846b7 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -91,7 +91,7 @@ protected:
boost::none /* chunkSizeBytes */,
true,
{ChunkType{
- kNss, range, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), kOtherShard}});
+ uuid, range, ChunkVersion(1, 0, epoch, boost::none /* timestamp */), kOtherShard}});
return CollectionMetadata(ChunkManager(kThisShard,
DatabaseVersion(UUID::gen(), Timestamp()),
@@ -126,16 +126,21 @@ protected:
if (SimpleBSONObjComparator::kInstance.evaluate(chunkToSplit.getMin() < minKey)) {
chunkVersion.incMajor();
- splitChunks.emplace_back(
- kNss, ChunkRange(chunkToSplit.getMin(), minKey), chunkVersion, kOtherShard);
+ splitChunks.emplace_back(*collMetadata.getUUID(),
+ ChunkRange(chunkToSplit.getMin(), minKey),
+ chunkVersion,
+ kOtherShard);
}
chunkVersion.incMajor();
- splitChunks.emplace_back(kNss, ChunkRange(minKey, maxKey), chunkVersion, kThisShard);
+ splitChunks.emplace_back(
+ *collMetadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kThisShard);
chunkVersion.incMajor();
- splitChunks.emplace_back(
- kNss, ChunkRange(maxKey, chunkToSplit.getMax()), chunkVersion, kOtherShard);
+ splitChunks.emplace_back(*collMetadata.getUUID(),
+ ChunkRange(maxKey, chunkToSplit.getMax()),
+ chunkVersion,
+ kOtherShard);
auto rt = cm->getRoutingTableHistory_ForTest().makeUpdated(
boost::none, boost::none, true, splitChunks);
@@ -166,7 +171,8 @@ protected:
boost::none,
boost::none,
true,
- {ChunkType(kNss, ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
+ {ChunkType(
+ *metadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
return CollectionMetadata(ChunkManager(cm->dbPrimary(),
cm->dbVersion(),
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 72c4000e4eb..c6591e51e52 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -175,7 +175,7 @@ protected:
boost::none /* resharding Fields */,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{kNss,
+ {ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("dummyShardId")}});
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 3c50b5eab20..ccb1ae3bbec 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -448,22 +448,23 @@ public:
}
std::vector<ChunkType> makeChangedChunks(ChunkVersion startingVersion) {
- ChunkType chunk1(kNss,
+ const auto uuid = UUID::gen();
+ ChunkType chunk1(uuid,
{kShardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
startingVersion,
{"0"});
chunk1.setName(OID::gen());
startingVersion.incMinor();
- ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, startingVersion, {"1"});
+ ChunkType chunk2(uuid, {BSON("_id" << -100), BSON("_id" << 0)}, startingVersion, {"1"});
chunk2.setName(OID::gen());
startingVersion.incMinor();
- ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, startingVersion, {"0"});
+ ChunkType chunk3(uuid, {BSON("_id" << 0), BSON("_id" << 100)}, startingVersion, {"0"});
chunk3.setName(OID::gen());
startingVersion.incMinor();
- ChunkType chunk4(kNss,
+ ChunkType chunk4(uuid,
{BSON("_id" << 100), kShardKeyPattern.getKeyPattern().globalMax()},
startingVersion,
{"1"});
diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp
index 076f4d8ee82..b0e6de71d6c 100644
--- a/src/mongo/db/s/op_observer_sharding_test.cpp
+++ b/src/mongo/db/s/op_observer_sharding_test.cpp
@@ -62,14 +62,15 @@ protected:
* DeleteState::documentKey.
*/
static CollectionMetadata makeAMetadata(BSONObj const& keyPattern) {
+ const UUID uuid = UUID::gen();
const OID epoch = OID::gen();
auto range = ChunkRange(BSON("key" << MINKEY), BSON("key" << MAXKEY));
- auto chunk = ChunkType(kTestNss,
+ auto chunk = ChunkType(uuid,
std::move(range),
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("other"));
auto rt = RoutingTableHistory::makeNew(kTestNss,
- UUID::gen(),
+ uuid,
KeyPattern(keyPattern),
nullptr,
false,
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index 8a42ae17b8c..fc5b0d61787 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -110,7 +110,7 @@ public:
boost::none,
boost::none /* chunkSizeBytes */,
true,
- {ChunkType{kNss,
+ {ChunkType{uuid,
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
ShardId("dummyShardId")}});
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 0dfb762acd1..661e0da7c12 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -511,9 +511,9 @@ void removeChunkAndTagsDocs(OperationContext* opCtx,
opCtx,
TagsType::ConfigNS,
BatchedCommandRequest::buildDeleteOp(TagsType::ConfigNS,
- BSON(ChunkType::ns(ns.ns())), // query
- true, // multi
- hint // hint
+ BSON(TagsType::ns(ns.ns())), // query
+ true, // multi
+ hint // hint
),
txnNumber);
}
@@ -821,25 +821,17 @@ ReshardingCoordinatorExternalStateImpl::calculateParticipantShardsAndChunks(
if (const auto& chunks = coordinatorDoc.getPresetReshardedChunks()) {
auto version = calculateChunkVersionForInitialChunks(opCtx);
+ invariant(version.getTimestamp());
// Use the provided shardIds from presetReshardedChunks to construct the
// recipient list.
for (const auto& reshardedChunk : *chunks) {
recipientShardIds.emplace(reshardedChunk.getRecipientShardId());
- if (version.getTimestamp()) {
- initialChunks.emplace_back(
- coordinatorDoc.getReshardingUUID(),
- ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
- version,
- reshardedChunk.getRecipientShardId());
- } else {
- initialChunks.emplace_back(
- coordinatorDoc.getTempReshardingNss(),
- ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
- version,
- reshardedChunk.getRecipientShardId());
- }
+ initialChunks.emplace_back(coordinatorDoc.getReshardingUUID(),
+ ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
+ version,
+ reshardedChunk.getRecipientShardId());
version.incMinor();
}
} else {
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index 7af6c8db7c1..9221c00df1a 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -87,20 +87,13 @@ class ExternalStateForTest : public ReshardingCoordinatorExternalState {
// Use the provided shardIds from presetReshardedChunks to construct the
// recipient list.
if (const auto& chunks = coordinatorDoc.getPresetReshardedChunks()) {
+ invariant(version.getTimestamp());
for (const auto& reshardedChunk : *chunks) {
- if (version.getTimestamp()) {
- initialChunks.emplace_back(
- coordinatorDoc.getReshardingUUID(),
- ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
- version,
- reshardedChunk.getRecipientShardId());
- } else {
- initialChunks.emplace_back(
- coordinatorDoc.getTempReshardingNss(),
- ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
- version,
- reshardedChunk.getRecipientShardId());
- }
+ initialChunks.emplace_back(
+ coordinatorDoc.getReshardingUUID(),
+ ChunkRange{reshardedChunk.getMin(), reshardedChunk.getMax()},
+ version,
+ reshardedChunk.getRecipientShardId());
version.incMinor();
}
}
diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
index 7f2515c7584..6cf1d5005d0 100644
--- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp
@@ -74,7 +74,7 @@ public:
std::unique_ptr<CollatorInterface> defaultCollator) {
const OID epoch = OID::gen();
std::vector<ChunkType> chunks = {ChunkType{
- _sourceNss,
+ _sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
_myDonorId}};
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
index 858cdc4643c..c6202d8a8d7 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.h
@@ -109,7 +109,7 @@ protected:
const ShardId& shardThatChunkExistsOn) {
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
auto chunk = ChunkType(
- nss, std::move(range), ChunkVersion(1, 0, epoch, boost::none), shardThatChunkExistsOn);
+ uuid, std::move(range), ChunkVersion(1, 0, epoch, boost::none), shardThatChunkExistsOn);
ChunkManager cm(kThisShard.getShardId(),
DatabaseVersion(uuid, timestamp),
makeStandaloneRoutingTableHistory(
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index b503567685b..036d84d5698 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -167,18 +167,18 @@ public:
const OID epoch = OID::gen();
std::vector<ChunkType> chunks = {
ChunkType{
- kCrudNs,
+ kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << MINKEY),
BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity())},
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
_sourceId.getShardId()},
ChunkType{
- kCrudNs,
+ kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << -std::numeric_limits<double>::infinity()),
BSON(kOriginalShardKey << 0)},
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
kOtherShardId},
- ChunkType{kCrudNs,
+ ChunkType{kCrudUUID,
ChunkRange{BSON(kOriginalShardKey << 0), BSON(kOriginalShardKey << MAXKEY)},
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
_sourceId.getShardId()}};
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index c21596f18e5..9691a947954 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -287,7 +287,7 @@ private:
ChunkManager makeChunkManagerForSourceCollection() {
const OID epoch = OID::gen();
std::vector<ChunkType> chunks = {ChunkType{
- _sourceNss,
+ _sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
_myDonorId}};
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index a88783eda55..2e81a7be471 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -238,17 +238,17 @@ private:
const OID epoch = OID::gen();
std::vector<ChunkType> chunks = {
ChunkType{
- _sourceNss,
+ _sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY),
BSON(_currentShardKey << -std::numeric_limits<double>::infinity())},
ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
_myDonorId},
- ChunkType{_sourceNss,
+ ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << -std::numeric_limits<double>::infinity()),
BSON(_currentShardKey << 0)},
ChunkVersion(100, 1, epoch, boost::none /* timestamp */),
_otherDonorId},
- ChunkType{_sourceNss,
+ ChunkType{_sourceUUID,
ChunkRange{BSON(_currentShardKey << 0), BSON(_currentShardKey << MAXKEY)},
ChunkVersion(100, 2, epoch, boost::none /* timestamp */),
_myDonorId}};
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
index 074a5f74aaa..ad04e9623dd 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp
@@ -163,7 +163,7 @@ public:
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk(tempNss,
+ ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
version,
{"0"});
@@ -188,7 +188,7 @@ public:
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk(origNss,
+ ChunkType chunk(uuid,
{skey.getKeyPattern().globalMin(), skey.getKeyPattern().globalMax()},
version,
{"0"});
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 5640ea3bac9..a2ee7a5521b 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -78,7 +78,7 @@ public:
const OID epoch = OID::gen();
std::vector<ChunkType> chunks = {ChunkType{
- nss,
+ _sourceUUID,
ChunkRange{BSON(_currentShardKey << MINKEY), BSON(_currentShardKey << MAXKEY)},
ChunkVersion(100, 0, epoch, boost::none /* timestamp */),
_someDonorId}};
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index 8943f73ffb9..98d534ca694 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -155,15 +155,17 @@ public:
}
protected:
- std::vector<ChunkType> createChunks(const OID& epoch, const std::string& shardKey) {
+ std::vector<ChunkType> createChunks(const OID& epoch,
+ const UUID& uuid,
+ const std::string& shardKey) {
auto range1 = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << 5));
- ChunkType chunk1(kNss,
+ ChunkType chunk1(uuid,
range1,
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
kShardList[0].getName());
auto range2 = ChunkRange(BSON(shardKey << 5), BSON(shardKey << MAXKEY));
- ChunkType chunk2(kNss,
+ ChunkType chunk2(uuid,
range2,
ChunkVersion(1, 0, epoch, boost::none /* timestamp */),
kShardList[1].getName());
@@ -221,9 +223,12 @@ protected:
_mockCatalogCacheLoader->setDatabaseRefreshReturnValue(
DatabaseType(kNss.db().toString(), kShardList[0].getName(), true, env.dbVersion));
_mockCatalogCacheLoader->setCollectionRefreshValues(
- kNss, coll, createChunks(env.version.epoch(), kShardKey), reshardingFields);
+ kNss,
+ coll,
+ createChunks(env.version.epoch(), env.sourceUuid, kShardKey),
+ reshardingFields);
_mockCatalogCacheLoader->setCollectionRefreshValues(
- env.tempNss, coll, createChunks(env.version.epoch(), "y"), boost::none);
+ env.tempNss, coll, createChunks(env.version.epoch(), env.sourceUuid, "y"), boost::none);
forceDatabaseRefresh(opCtx, kNss.db());
forceShardFilteringMetadataRefresh(opCtx, kNss);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index 09ed06890cf..d3d7913e58e 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -116,6 +116,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeFiveChunks(
const ChunkVersion& collectionVersion) {
ChunkVersion collVersion(collectionVersion);
vector<ChunkType> chunks;
+ const UUID uuid = UUID::gen();
BSONObj mins[] = {
BSON("a" << MINKEY), BSON("a" << 10), BSON("a" << 50), BSON("a" << 100), BSON("a" << 200)};
@@ -126,7 +127,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeFiveChunks(
collVersion.incMajor();
ChunkType chunk;
- chunk.setNS(kNss);
+ chunk.setCollectionUUID(uuid);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);
@@ -142,6 +143,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
const ChunkVersion& collectionVersion) {
ChunkVersion collVersion(collectionVersion);
vector<ChunkType> chunks;
+ const UUID uuid = UUID::gen();
// The diff query is for GTE a known version, so prepend the previous newest chunk, which is
// unmodified by this change and so should be found. Note: it is important for testing that the
@@ -149,7 +151,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
// dependent on a race between persistence and retrieving data because it combines enqueued and
// persisted results without applying modifications.
ChunkType oldChunk;
- oldChunk.setNS(kNss);
+ oldChunk.setCollectionUUID(uuid);
oldChunk.setMin(BSON("a" << 200));
oldChunk.setMax(BSON("a" << MAXKEY));
oldChunk.setShard(kShardId);
@@ -165,7 +167,7 @@ vector<ChunkType> ShardServerCatalogCacheLoaderTest::makeThreeUpdatedChunksDiff(
collVersion.incMinor();
ChunkType chunk;
- chunk.setNS(kNss);
+ chunk.setCollectionUUID(uuid);
chunk.setMin(mins[i]);
chunk.setMax(maxs[i]);
chunk.setShard(kShardId);
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index 9b9fdc950ca..9421a389193 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -220,7 +220,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenamePreconditionsAreMet) {
ChunkVersion chunkVersion(1, 1, OID::gen(), boost::none);
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(kToNss);
+ chunk.setCollectionUUID(UUID::gen());
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
chunk.setHistory({ChunkHistory(Timestamp(1, 1), shard0.getName())});
@@ -240,7 +240,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenamePreconditionsTargetCollectionExists) {
ChunkVersion chunkVersion(1, 1, OID::gen(), boost::none);
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(kToNss);
+ chunk.setCollectionUUID(UUID::gen());
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
chunk.setHistory({ChunkHistory(Timestamp(1, 1), shard0.getName())});