summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/resharding
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/s/resharding')
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service.cpp48
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp21
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp77
3 files changed, 70 insertions, 76 deletions
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
index 18e8cc3b8aa..93d573c8f75 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp
@@ -542,35 +542,23 @@ void updateChunkAndTagsDocsForTempNss(OperationContext* opCtx,
OID newCollectionEpoch,
boost::optional<Timestamp> newCollectionTimestamp,
TxnNumber txnNumber) {
- // Update all chunk documents that currently have 'ns' as the temporary collection namespace
- // such that 'ns' is now the original collection namespace and 'lastmodEpoch' is
- // newCollectionEpoch.
- const auto chunksQuery = [&]() {
- if (newCollectionTimestamp) {
- return BSON(ChunkType::collectionUUID() << coordinatorDoc.getReshardingUUID());
- } else {
- return BSON(ChunkType::ns(coordinatorDoc.getTempReshardingNss().ns()));
- }
- }();
- const auto chunksUpdate = [&]() {
- if (newCollectionTimestamp) {
- return BSON("$set" << BSON(ChunkType::epoch << newCollectionEpoch
- << ChunkType::timestamp
- << *newCollectionTimestamp));
- } else {
- return BSON("$set" << BSON(ChunkType::ns << coordinatorDoc.getSourceNss().ns()
- << ChunkType::epoch << newCollectionEpoch));
- }
- }();
- auto chunksRequest = BatchedCommandRequest::buildUpdateOp(ChunkType::ConfigNS,
- chunksQuery, // query
- chunksUpdate, // update
- false, // upsert
- true // multi
- );
-
- auto chunksRes = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn(
- opCtx, ChunkType::ConfigNS, chunksRequest, txnNumber);
+ // If the collection entry has a timestamp, this means the metadata has been upgraded to the 5.0
+ // format in which case chunks are indexed by UUID and do not contain Epochs. Therefore, only
+ // the update to config.collections is sufficient.
+ if (!newCollectionTimestamp) {
+ auto chunksRequest = BatchedCommandRequest::buildUpdateOp(
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(coordinatorDoc.getTempReshardingNss().ns())), // query
+ BSON("$set" << BSON(ChunkType::ns << coordinatorDoc.getSourceNss().ns()
+ << ChunkType::epoch
+ << newCollectionEpoch)), // update
+ false, // upsert
+ true // multi
+ );
+
+ auto chunksRes = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn(
+ opCtx, ChunkType::ConfigNS, chunksRequest, txnNumber);
+ }
auto tagsRequest = BatchedCommandRequest::buildUpdateOp(
TagsType::ConfigNS,
@@ -609,12 +597,12 @@ CollectionType createTempReshardingCollectionType(
const BSONObj& collation) {
CollectionType collType(coordinatorDoc.getTempReshardingNss(),
chunkVersion.epoch(),
+ chunkVersion.getTimestamp(),
opCtx->getServiceContext()->getPreciseClockSource()->now(),
coordinatorDoc.getReshardingUUID());
collType.setKeyPattern(coordinatorDoc.getReshardingKey());
collType.setDefaultCollation(collation);
collType.setUnique(false);
- collType.setTimestamp(chunkVersion.getTimestamp());
TypeCollectionReshardingFields tempEntryReshardingFields(coordinatorDoc.getReshardingUUID());
tempEntryReshardingFields.setState(coordinatorDoc.getState());
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index 5bb026932fd..785ddd3305d 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -367,8 +367,11 @@ public:
shardKey = _oldShardKey.toBSON();
}
- CollectionType collType(
- coordinatorDoc.getSourceNss(), std::move(epoch), lastUpdated, std::move(uuid));
+ CollectionType collType(coordinatorDoc.getSourceNss(),
+ std::move(epoch),
+ Timestamp(1, 2),
+ lastUpdated,
+ std::move(uuid));
collType.setKeyPattern(shardKey);
collType.setUnique(false);
if (reshardingFields)
@@ -424,7 +427,7 @@ public:
}
}
- std::vector<ChunkType> makeChunks(const NamespaceString& nss,
+ std::vector<ChunkType> makeChunks(const UUID& uuid,
OID epoch,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
@@ -433,21 +436,21 @@ public:
// Create two chunks, one on each shard with the given namespace and epoch
ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(nss, chunkRanges[0], version, ShardId("shard0000"));
+ ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
- ChunkType chunk2(nss, chunkRanges[1], version, ShardId("shard0001"));
+ ChunkType chunk2(uuid, chunkRanges[1], version, ShardId("shard0001"));
chunk2.setName(ids[1]);
return std::vector<ChunkType>{chunk1, chunk2};
}
// Returns the chunk for the donor shard.
- ChunkType makeAndInsertChunksForDonorShard(const NamespaceString& nss,
+ ChunkType makeAndInsertChunksForDonorShard(const UUID& uuid,
OID epoch,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
- auto chunks = makeChunks(nss, epoch, shardKey, ids);
+ auto chunks = makeChunks(uuid, epoch, shardKey, ids);
// Only the chunk corresponding to shard0000 is stored as a donor in the coordinator state
// document constructed.
@@ -503,10 +506,10 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorSuccessfullyTransi
auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch);
auto opCtx = operationContext();
auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
+ _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
auto initialChunks =
- makeChunks(_tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
std::vector<ReshardedChunk> presetReshardedChunks;
for (const auto& chunk : initialChunks) {
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 58de1d6d76b..5a0beb4bf24 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -112,8 +112,11 @@ protected:
shardKey = _oldShardKey.toBSON();
}
- CollectionType collType(
- coordinatorDoc.getSourceNss(), std::move(epoch), lastUpdated, std::move(uuid));
+ CollectionType collType(coordinatorDoc.getSourceNss(),
+ std::move(epoch),
+ Timestamp(1, 2),
+ lastUpdated,
+ std::move(uuid));
collType.setKeyPattern(shardKey);
collType.setUnique(false);
if (reshardingFields)
@@ -131,11 +134,11 @@ protected:
}
// Returns the chunk for the donor shard.
- ChunkType makeAndInsertChunksForDonorShard(const NamespaceString& nss,
+ ChunkType makeAndInsertChunksForDonorShard(const UUID& uuid,
OID epoch,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
- auto chunks = makeChunks(nss, epoch, shardKey, ids);
+ auto chunks = makeChunks(uuid, epoch, shardKey, ids);
// Only the chunk corresponding to shard0000 is stored as a donor in the coordinator state
// document constructed.
@@ -145,11 +148,11 @@ protected:
}
// Returns the chunk for the recipient shard.
- ChunkType makeAndInsertChunksForRecipientShard(const NamespaceString& nss,
+ ChunkType makeAndInsertChunksForRecipientShard(const UUID& uuid,
OID epoch,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
- auto chunks = makeChunks(nss, epoch, shardKey, ids);
+ auto chunks = makeChunks(uuid, epoch, shardKey, ids);
// Only the chunk corresponding to shard0001 is stored as a recipient in the coordinator
// state document constructed.
@@ -158,7 +161,7 @@ protected:
return recipientChunk;
}
- std::vector<ChunkType> makeChunks(const NamespaceString& nss,
+ std::vector<ChunkType> makeChunks(const UUID& uuid,
OID epoch,
const ShardKeyPattern& shardKey,
std::vector<OID> ids) {
@@ -166,11 +169,11 @@ protected:
_newShardKey.isShardKey(shardKey.toBSON()) ? _newChunkRanges : _oldChunkRanges;
// Create two chunks, one on each shard with the given namespace and epoch
- ChunkVersion version(1, 0, epoch, boost::none /* timestamp */);
- ChunkType chunk1(nss, chunkRanges[0], version, ShardId("shard0000"));
+ ChunkVersion version(1, 0, epoch, Timestamp(1, 2));
+ ChunkType chunk1(uuid, chunkRanges[0], version, ShardId("shard0000"));
chunk1.setName(ids[0]);
version.incMinor();
- ChunkType chunk2(nss, chunkRanges[1], version, ShardId("shard0001"));
+ ChunkType chunk2(uuid, chunkRanges[1], version, ShardId("shard0001"));
chunk2.setName(ids[1]);
return std::vector<ChunkType>{chunk1, chunk2};
@@ -213,10 +216,7 @@ protected:
client.insert(CollectionType::ConfigNS.ns(), originalNssCatalogEntry.toBSON());
auto tempNssCatalogEntry = createTempReshardingCollectionType(
- opCtx,
- coordinatorDoc,
- ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */),
- BSONObj());
+ opCtx, coordinatorDoc, ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)), BSONObj());
client.insert(CollectionType::ConfigNS.ns(), tempNssCatalogEntry.toBSON());
return coordinatorDoc;
@@ -420,14 +420,13 @@ protected:
void readChunkCatalogEntriesAndAssertMatchExpected(
OperationContext* opCtx,
+ const UUID& uuid,
std::vector<ChunkType> expectedChunks,
const OID& collEpoch,
const boost::optional<Timestamp>& collTimestamp) {
- auto nss = expectedChunks[0].getNS();
-
DBDirectClient client(opCtx);
std::vector<ChunkType> foundChunks;
- auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("ns" << nss.ns())));
+ auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("uuid" << uuid)));
while (cursor->more()) {
auto d = uassertStatusOK(
ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp));
@@ -441,7 +440,9 @@ protected:
return c.getName() == id;
});
ASSERT(onDiskIt != foundChunks.end());
- ASSERT_EQUALS(onDiskIt->toConfigBSON().woCompare(it->toConfigBSON()), 0);
+ auto expectedBSON = it->toConfigBSON().removeField(ChunkType::collectionUUID());
+ auto onDiskBSON = onDiskIt->toConfigBSON().removeField(ChunkType::collectionUUID());
+ ASSERT_BSONOBJ_EQ(expectedBSON, onDiskBSON);
}
}
@@ -503,11 +504,11 @@ protected:
// collection should have been removed.
boost::optional<CollectionType> expectedTempCollType = boost::none;
if (expectedCoordinatorDoc.getState() < CoordinatorStateEnum::kCommitting) {
- expectedTempCollType = createTempReshardingCollectionType(
- opCtx,
- expectedCoordinatorDoc,
- ChunkVersion(1, 1, OID::gen(), boost::none /* timestamp */),
- BSONObj());
+ expectedTempCollType =
+ createTempReshardingCollectionType(opCtx,
+ expectedCoordinatorDoc,
+ ChunkVersion(1, 1, OID::gen(), Timestamp(1, 2)),
+ BSONObj());
// It's necessary to add the userCanceled field because the call into
// createTempReshardingCollectionType assumes that the collection entry is
@@ -592,7 +593,7 @@ protected:
// Check that chunks and tags entries have been correctly created
readChunkCatalogEntriesAndAssertMatchExpected(
- opCtx, initialChunks, _originalEpoch, _originalTimestamp);
+ opCtx, _reshardingUUID, initialChunks, _originalEpoch, _originalTimestamp);
readTagCatalogEntriesAndAssertMatchExpected(opCtx, newZones);
}
@@ -630,7 +631,7 @@ protected:
// Check that chunks and tags entries previously under the temporary namespace have been
// correctly updated to the original namespace
readChunkCatalogEntriesAndAssertMatchExpected(
- opCtx, expectedChunks, _finalEpoch, _finalTimestamp);
+ opCtx, _reshardingUUID, expectedChunks, _finalEpoch, _finalTimestamp);
readTagCatalogEntriesAndAssertMatchExpected(opCtx, expectedZones);
}
@@ -663,12 +664,14 @@ protected:
insertStateAndCatalogEntries(CoordinatorStateEnum::kPreparingToDonate, _originalEpoch);
auto initialChunksIds = std::vector{OID::gen(), OID::gen()};
- auto tempNssChunks = makeChunks(_tempNss, _tempEpoch, _newShardKey, initialChunksIds);
+ auto tempNssChunks =
+ makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds);
auto recipientChunk = tempNssChunks[1];
insertChunkAndZoneEntries(tempNssChunks, makeZones(_tempNss, _newShardKey));
insertChunkAndZoneEntries(
- makeChunks(_originalNss, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}),
+ makeChunks(
+ _originalUUID, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}),
makeZones(_originalNss, _oldShardKey));
// Persist the updates on disk
@@ -737,11 +740,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, WriteInitialInfoSucceeds) {
// Ensure the chunks for the original namespace exist since they will be bumped as a product of
// the state transition to kPreparingToDonate.
auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
+ _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
auto collectionVersion = donorChunk.getVersion();
auto initialChunks =
- makeChunks(_tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
auto newZones = makeZones(_tempNss, _newShardKey);
std::vector<BSONObj> zonesBSON;
@@ -785,11 +788,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, BasicStateTransitionSucceeds) {
// Ensure the chunks for the original and temporary namespaces exist since they will be bumped
// as a product of the state transition to kBlockingWrites.
auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
+ _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
auto donorCollectionVersion = donorChunk.getVersion();
auto recipientChunk = makeAndInsertChunksForRecipientShard(
- _tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ _reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
auto recipientCollectionVersion = donorChunk.getVersion();
// Persist the updates on disk
@@ -810,11 +813,11 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSu
// Ensure the chunks for the original and temporary namespaces exist since they will be bumped
// as a product of the state transition to kCloning.
auto donorChunk = makeAndInsertChunksForDonorShard(
- _originalNss, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
+ _originalUUID, _originalEpoch, _oldShardKey, std::vector{OID::gen(), OID::gen()});
auto donorCollectionVersion = donorChunk.getVersion();
auto recipientChunk = makeAndInsertChunksForRecipientShard(
- _tempNss, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ _reshardingUUID, _tempEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
auto recipientCollectionVersion = recipientChunk.getVersion();
// Persist the updates on disk
@@ -841,12 +844,12 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu
CoordinatorStateEnum::kBlockingWrites, _originalEpoch, fetchTimestamp);
auto initialChunksIds = std::vector{OID::gen(), OID::gen()};
- auto tempNssChunks = makeChunks(_tempNss, _tempEpoch, _newShardKey, initialChunksIds);
+ auto tempNssChunks = makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds);
auto recipientChunk = tempNssChunks[1];
insertChunkAndZoneEntries(tempNssChunks, makeZones(_tempNss, _newShardKey));
insertChunkAndZoneEntries(
- makeChunks(_originalNss, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}),
+ makeChunks(_originalUUID, OID::gen(), _oldShardKey, std::vector{OID::gen(), OID::gen()}),
makeZones(_originalNss, _oldShardKey));
// Persist the updates on disk
@@ -855,7 +858,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTranstionToDecisionPersistedSu
// The new epoch to use for the resharded collection to indicate that the collection is a
// new incarnation of the namespace
- auto updatedChunks = makeChunks(_originalNss, _finalEpoch, _newShardKey, initialChunksIds);
+ auto updatedChunks = makeChunks(_originalUUID, _finalEpoch, _newShardKey, initialChunksIds);
auto updatedZones = makeZones(_originalNss, _newShardKey);
writeDecisionPersistedStateExpectSuccess(
@@ -881,7 +884,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDoneSucceeds) {
// Ensure the chunks for the original namespace exist since they will be bumped as a product of
// the state transition to kDone.
auto finalChunk = makeAndInsertChunksForRecipientShard(
- _originalNss, _finalEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
+ _reshardingUUID, _finalEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()});
auto collectionVersion = finalChunk.getVersion();
removeCoordinatorDocAndReshardingFieldsExpectSuccess(operationContext(), coordinatorDoc);