summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2021-06-11 07:23:56 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-15 10:37:20 +0000
commit5268977d520f944bdf55e1bd4299ff7429bbe8ff (patch)
tree7cd037851a2848deac9d30e3e5b61606205cd576
parent64e16e323f6534a22e0b51670f18f78da678681e (diff)
downloadmongo-5268977d520f944bdf55e1bd4299ff7429bbe8ff.tar.gz
SERVER-57316 Changing a few functions that rely on the presence of epochs/timestamps on config.chunks
The following functions do not rely on the presence of epochs on config.chunks if there is a timestamp on config.collections: - ensureChunkVersionIsGreaterThan cmd. - clearJumboFlag cmd. - commitChunkMerge/commitChunksMerge. - commitChunkSplit. - commitChunkMigration. - getMaxChunkVersionFromQueryResponse. (cherry picked from commit 108840e0fdd428435b3fbbc1919ddb05311321c7)
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp25
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h15
-rw-r--r--src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp370
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp123
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp137
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp1150
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp837
-rw-r--r--src/mongo/db/s/migration_util.cpp11
-rw-r--r--src/mongo/s/request_types/ensure_chunk_version_is_greater_than.idl8
11 files changed, 1506 insertions, 1183 deletions
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index f4c7a461e9b..9b42faeff2c 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -333,7 +333,11 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
return UUID::gen();
}
}();
- CollectionType coll(nss, chunks[0].getVersion().epoch(), Date_t::now(), collUUID);
+ CollectionType coll(nss,
+ chunks[0].getVersion().epoch(),
+ chunks[0].getVersion().getTimestamp(),
+ Date_t::now(),
+ collUUID);
coll.setTimestamp(chunks.front().getVersion().getTimestamp());
coll.setKeyPattern(shardKey);
ASSERT_OK(
@@ -347,11 +351,26 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
OperationContext* opCtx,
+ const NamespaceStringOrUUID& nssOrUuid,
const BSONObj& minKey,
const OID& collEpoch,
const boost::optional<Timestamp>& collTimestamp) {
- auto doc =
- findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min() << minKey));
+ const auto query = nssOrUuid.uuid()
+ ? BSON(ChunkType::collectionUUID() << *nssOrUuid.uuid() << ChunkType::min(minKey))
+ : BSON(ChunkType::ns(nssOrUuid.nss()->ns()) << ChunkType::min(minKey));
+ auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, query);
+ if (!doc.isOK())
+ return doc.getStatus();
+
+ return ChunkType::fromConfigBSON(doc.getValue(), collEpoch, collTimestamp);
+}
+
+StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(
+ OperationContext* opCtx,
+ const BSONObj& minKey,
+ const OID& collEpoch,
+ const boost::optional<Timestamp>& collTimestamp) {
+ auto doc = findOneOnConfigCollection(opCtx, ChunkType::ConfigNS, BSON(ChunkType::min(minKey)));
if (!doc.isOK())
return doc.getStatus();
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index 0e1ee919469..764488a46e6 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -106,7 +106,20 @@ protected:
const std::vector<ChunkType>& chunks);
/**
- * Retrieves the chunk document from the config server.
+ * Retrieves the chunk document <nssOrUuid, minKey> from the config server.
+ * This is the recommended way to get a chunk document.
+ */
+ StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx,
+ const NamespaceStringOrUUID& nssOrUuid,
+ const BSONObj& minKey,
+ const OID& collEpoch,
+ const boost::optional<Timestamp>& collTimestamp);
+
+ /**
+ * Retrieves the chunk document <minKey> from the config server.
+ * This function assumes that there is just one chunk document associated to minKey. This can
+ * lead to some problems in scenarios where there are two or more collections that are splitted
+ * in the same way.
*/
StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx,
const BSONObj& minKey,
diff --git a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
index 0a5db266b0d..6403ec22c8a 100644
--- a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
+++ b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp
@@ -57,7 +57,12 @@ public:
"writeConcern",
opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority);
ShardingCatalogManager::get(opCtx)->ensureChunkVersionIsGreaterThan(
- opCtx, request().getMinKey(), request().getMaxKey(), request().getVersion());
+ opCtx,
+ request().getNss(),
+ request().getCollectionUUID(),
+ request().getMinKey(),
+ request().getMaxKey(),
+ request().getVersion());
}
private:
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index d47f1facb63..74920116362 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -290,8 +290,14 @@ public:
/**
* If a chunk matching 'requestedChunk' exists, bumps the chunk's version to one greater than
* the current collection version.
+ *
+ * 'nss' and 'collUUID' were added to the ConfigsvrEnsureChunkVersionIsGreaterThanCommand
+ * in 5.0. They are optional in 5.0 because the request may come from a previous version (4.4)
+ * that doesn't pass these extra fields.
*/
void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
+ const boost::optional<NamespaceString>& nss,
+ const boost::optional<UUID>& collUUID,
const BSONObj& minKey,
const BSONObj& maxKey,
const ChunkVersion& version);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index bdadd67c1c3..a20777362d8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -138,20 +138,26 @@ BSONArray buildMergeChunksTransactionPrecond(const std::vector<ChunkType>& chunk
const ChunkVersion& collVersion) {
BSONArrayBuilder preCond;
+ const bool collHasTimestamp = (bool)collVersion.getTimestamp();
for (const auto& chunk : chunksToMerge) {
BSONObj query = BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax()));
- if (collVersion.getTimestamp()) {
- query = query.addFields(BSON(ChunkType::collectionUUID << chunk.getCollectionUUID()));
+ if (collHasTimestamp) {
+ query = query.addFields(BSON(ChunkType::collectionUUID() << chunk.getCollectionUUID()));
} else {
query = query.addFields(BSON(ChunkType::ns(chunk.getNS().ns())));
}
+ const auto collectionIdentityMatchCondition = collHasTimestamp
+ ? BSON(ChunkType::collectionUUID()
+ << chunk.getCollectionUUID() << ChunkType::shard(chunk.getShard().toString()))
+ : BSON(ChunkType::epoch(collVersion.epoch())
+ << ChunkType::shard(chunk.getShard().toString()));
+
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS.ns());
b.append("q", BSON("query" << query << "orderby" << BSON(ChunkType::lastmod() << -1)));
- b.append("res",
- BSON(ChunkType::epoch(collVersion.epoch())
- << ChunkType::shard(chunk.getShard().toString())));
+ b.append("res", collectionIdentityMatchCondition);
+
preCond.append(b.obj());
}
return preCond.arr();
@@ -292,7 +298,7 @@ boost::optional<ChunkType> getControlChunkForMigrate(OperationContext* opCtx,
// Helper function to find collection version and shard version.
StatusWith<ChunkVersion> getMaxChunkVersionFromQueryResponse(
- const NamespaceString& nss, const StatusWith<Shard::QueryResponse>& queryResponse) {
+ const CollectionType& coll, const StatusWith<Shard::QueryResponse>& queryResponse) {
if (!queryResponse.isOK()) {
return queryResponse.getStatus();
@@ -301,11 +307,14 @@ StatusWith<ChunkVersion> getMaxChunkVersionFromQueryResponse(
const auto& chunksVector = queryResponse.getValue().docs;
if (chunksVector.empty()) {
return {ErrorCodes::Error(50577),
- str::stream() << "Collection '" << nss.ns()
+ str::stream() << "Collection '" << coll.getNss().ns()
<< "' no longer either exists, is sharded, or has chunks"};
}
- return ChunkVersion::parseLegacyWithField(chunksVector.front(), ChunkType::lastmod());
+ const auto chunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), coll.getEpoch(), coll.getTimestamp()));
+
+ return chunk.getVersion();
}
// Helper function to get the collection version for nss. Always uses kLocalReadConcern.
@@ -329,16 +338,10 @@ StatusWith<ChunkVersion> getCollectionVersion(OperationContext* opCtx, const Nam
}
const CollectionType coll(findCollResponse.getValue().docs[0]);
- const auto chunksQuery = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID << coll.getUuid());
- } else {
- return BSON(ChunkType::ns(coll.getNss().ns()));
- }
- }();
-
+ const auto chunksQuery = coll.getTimestamp() ? BSON(ChunkType::collectionUUID << coll.getUuid())
+ : BSON(ChunkType::ns(coll.getNss().ns()));
return getMaxChunkVersionFromQueryResponse(
- nss,
+ coll,
Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -359,19 +362,13 @@ BSONObj getShardAndCollectionVersion(OperationContext* opCtx,
auto collectionVersion = uassertStatusOKWithContext(
std::move(swCollectionVersion), "Couldn't retrieve collection version from config server");
- const auto chunksQuery = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID
- << coll.getUuid() << ChunkType::shard()
- << fromShard); // Query all chunks for this namespace and shard.
- } else {
- return BSON(ChunkType::ns()
- << coll.getNss().ns() << ChunkType::shard()
- << fromShard); // Query all chunks for this namespace and shard.
- }
- }();
+ const auto chunksQuery = coll.getTimestamp()
+ ? BSON(ChunkType::collectionUUID << coll.getUuid()
+ << ChunkType::shard(fromShard.toString()))
+ : BSON(ChunkType::ns(coll.getNss().ns()) << ChunkType::shard(fromShard.toString()));
+
auto swDonorShardVersion = getMaxChunkVersionFromQueryResponse(
- coll.getNss(),
+ coll,
Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -551,15 +548,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
auto collVersion = swCollVersion.getValue();
- // Return an error if collection epoch does not match epoch of request.
- if (collVersion.epoch() != requestEpoch) {
- return {ErrorCodes::StaleEpoch,
- str::stream() << "splitChunk cannot split chunk " << range.toString()
- << ". Epoch of collection '" << nss.ns() << "' has changed."
- << " Current epoch: " << collVersion.epoch()
- << ", cmd epoch: " << requestEpoch};
- }
-
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -574,10 +562,19 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
+ // Return an error if collection epoch does not match epoch of request.
+ if (coll.getEpoch() != requestEpoch) {
+ return {ErrorCodes::StaleEpoch,
+ str::stream() << "splitChunk cannot split chunk " << range.toString()
+ << ". Epoch of collection '" << nss.ns() << "' has changed."
+ << " Current epoch: " << coll.getEpoch()
+ << ", cmd epoch: " << requestEpoch};
+ }
+
// Find the chunk history.
const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll);
const auto origChunk = _findChunkOnConfig(
- opCtx, collNsOrUUID, collVersion.epoch(), collVersion.getTimestamp(), range.getMin());
+ opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), range.getMin());
if (!origChunk.isOK()) {
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
return origChunk.getStatus();
@@ -680,11 +677,14 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
}
b.append("q", BSON("query" << query << "orderby" << BSON(ChunkType::lastmod() << -1)));
- {
- BSONObjBuilder bb(b.subobjStart("res"));
- bb.append(ChunkType::epoch(), requestEpoch);
- bb.append(ChunkType::shard(), shardName);
- }
+
+ const auto resultMustMatch = origChunk.getValue().getVersion().getTimestamp()
+ ? BSON(ChunkType::collectionUUID()
+ << origChunk.getValue().getCollectionUUID() << ChunkType::shard(shardName))
+ : BSON(ChunkType::epoch(requestEpoch) << ChunkType::shard(shardName));
+
+ b.append("res", resultMustMatch);
+
preCond.append(b.obj());
}
@@ -774,16 +774,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
return swCollVersion.getStatus().withContext(str::stream()
<< "mergeChunk cannot merge chunks.");
}
-
auto collVersion = swCollVersion.getValue();
- // Return an error if epoch of chunk does not match epoch of request
- if (collVersion.epoch() != requestEpoch) {
- return {ErrorCodes::StaleEpoch,
- str::stream() << "Epoch of chunk does not match epoch of request. Chunk epoch: "
- << collVersion.epoch() << ", request epoch: " << requestEpoch};
- }
-
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -798,13 +790,17 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
!findCollResponse.docs.empty());
const CollectionType coll(findCollResponse.docs[0]);
+ if (coll.getEpoch() != requestEpoch) {
+ return {ErrorCodes::StaleEpoch,
+ str::stream()
+ << "Epoch of collection does not match epoch of request. Collection epoch: "
+ << coll.getEpoch() << ", request epoch: " << requestEpoch};
+ }
+
// Check if the chunk(s) have already been merged. If so, return success.
const auto collNsOrUUID = getNsOrUUIDForChunkTargeting(coll);
- auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(opCtx,
- collNsOrUUID,
- collVersion.epoch(),
- collVersion.getTimestamp(),
- chunkBoundaries.front()));
+ auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(
+ opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), chunkBoundaries.front()));
if (minChunkOnDisk.getMax().woCompare(chunkBoundaries.back()) == 0) {
auto replyWithVersions = getShardAndCollectionVersion(opCtx, coll, ShardId(shardName));
// Makes sure that the last thing we read in getCurrentChunk and
@@ -821,11 +817,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
// Do not use the first chunk boundary as a max bound while building chunks
for (size_t i = 1; i < chunkBoundaries.size(); ++i) {
// Read the original chunk from disk to lookup that chunk's '_id' field.
- auto currentChunk = uassertStatusOK(_findChunkOnConfig(opCtx,
- collNsOrUUID,
- collVersion.epoch(),
- collVersion.getTimestamp(),
- chunkBoundaries[i - 1]));
+ auto currentChunk = uassertStatusOK(_findChunkOnConfig(
+ opCtx, collNsOrUUID, coll.getEpoch(), coll.getTimestamp(), chunkBoundaries[i - 1]));
// Ensure the chunk boundaries are strictly increasing
if (chunkBoundaries[i].woCompare(currentChunk.getMin()) <= 0) {
@@ -1033,7 +1026,6 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
return getShardAndCollectionVersion(opCtx, coll, shardId);
}
-
StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
OperationContext* opCtx,
const NamespaceString& nss,
@@ -1097,13 +1089,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
"Collection is undergoing changes and chunks cannot be moved",
coll.getAllowMigrations());
- const auto findChunkQuery = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID() << coll.getUuid());
- } else {
- return BSON(ChunkType::ns() << coll.getNss().ns());
- }
- }();
+ const auto findChunkQuery = coll.getTimestamp()
+ ? BSON(ChunkType::collectionUUID() << coll.getUuid())
+ : BSON(ChunkType::ns(coll.getNss().ns()));
+
auto findResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -1373,14 +1362,11 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
return;
}
+ const auto allChunksQuery = coll.getTimestamp()
+ ? BSON(ChunkType::collectionUUID << coll.getUuid())
+ : BSON(ChunkType::ns(coll.getNss().ns()));
+
// Must use local read concern because we will perform subsequent writes.
- const auto allChunksQuery = [&]() {
- if (coll.getTimestamp()) {
- return BSON(ChunkType::collectionUUID << coll.getUuid());
- } else {
- return BSON(ChunkType::ns(coll.getNss().ns()));
- }
- }();
auto findResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
@@ -1420,12 +1406,12 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
currentCollectionVersion.getTimestamp());
- BSONObj chunkQuery(BSON(ChunkType::epoch(collectionEpoch)
- << ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax())));
+ BSONObj chunkQuery(BSON(ChunkType::min(chunk.getMin()) << ChunkType::max(chunk.getMax())));
if (coll.getTimestamp()) {
chunkQuery = chunkQuery.addFields(BSON(ChunkType::collectionUUID << coll.getUuid()));
} else {
- chunkQuery = chunkQuery.addFields(BSON(ChunkType::ns(coll.getNss().ns())));
+ chunkQuery = chunkQuery.addFields(
+ BSON(ChunkType::ns(coll.getNss().ns()) << ChunkType::epoch(collectionEpoch)));
}
BSONObjBuilder updateBuilder;
@@ -1451,10 +1437,13 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
didUpdate);
}
-void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const ChunkVersion& version) {
+void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(
+ OperationContext* opCtx,
+ const boost::optional<NamespaceString>& nss,
+ const boost::optional<UUID>& collUuid,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const ChunkVersion& version) {
auto earlyReturnBeforeDoingWriteGuard = makeGuard([&] {
// Ensure waiting for writeConcern of the data read.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
@@ -1465,81 +1454,124 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
// move chunks on different collections to proceed in parallel.
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
- const auto requestedChunkQuery =
- BSON(ChunkType::min(minKey) << ChunkType::max(maxKey) << ChunkType::epoch(version.epoch()));
const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- // Get the chunk matching the requested chunk.
- const auto matchingChunksVector =
- uassertStatusOK(
- configShard->exhaustiveFindOnConfig(opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- requestedChunkQuery,
- BSONObj() /* sort */,
- 1 /* limit */))
- .docs;
- if (matchingChunksVector.empty()) {
- // This can happen in a number of cases, such as that the collection has been dropped, its
- // shard key has been refined, the chunk has been split, or the chunk has been merged.
- LOGV2(23884,
- "ensureChunkVersionIsGreaterThan did not find any chunks with minKey {minKey}, "
- "maxKey {maxKey}, and epoch {epoch}. Returning success.",
- "ensureChunkVersionIsGreaterThan did not find any matching chunks; returning success",
- "minKey"_attr = minKey,
- "maxKey"_attr = maxKey,
- "epoch"_attr = version.epoch());
- return;
+ CollectionType coll;
+ {
+ auto findCollResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ CollectionType::ConfigNS,
+ BSON(CollectionType::kEpochFieldName << version.epoch()),
+ {} /* sort */,
+ 1));
+
+ if (findCollResponse.docs.empty()) {
+ LOGV2(5731600,
+ "ensureChunkVersionIsGreaterThan did not find a collection with epoch "
+ "{epoch} epoch; returning success.",
+ "epoch"_attr = version.epoch());
+ return;
+ }
+
+ coll = CollectionType(findCollResponse.docs[0]);
+ dassert(!collUuid || *collUuid == coll.getUuid());
}
- const auto currentChunk = uassertStatusOK(ChunkType::fromConfigBSON(
- matchingChunksVector.front(), version.epoch(), version.getTimestamp()));
+ const auto requestedChunkQuery = coll.getTimestamp()
+ ? BSON(ChunkType::min(minKey)
+ << ChunkType::max(maxKey) << ChunkType::collectionUUID() << *collUuid)
+ : BSON(ChunkType::min(minKey) << ChunkType::max(maxKey) << ChunkType::ns(coll.getNss().ns())
+ << ChunkType::epoch(version.epoch()));
- if (version.isOlderThan(currentChunk.getVersion())) {
- LOGV2(23885,
- "ensureChunkVersionIsGreaterThan found that the chunk with minKey {minKey}, maxKey "
- "{maxKey}, and epoch {epoch} already has a higher version than {version}. Current "
- "chunk is {currentChunk}. Returning success.",
- "ensureChunkVersionIsGreaterThan found that the chunk already has a higher version; "
- "returning success",
- "minKey"_attr = minKey,
- "maxKey"_attr = maxKey,
- "epoch"_attr = version.epoch(),
- "version"_attr = version,
- "currentChunk"_attr = currentChunk.toConfigBSON());
- return;
+ // Get the chunk matching the requested chunk.
+ ChunkType matchingChunk;
+ {
+ const auto matchingChunksVector =
+ uassertStatusOK(configShard->exhaustiveFindOnConfig(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ requestedChunkQuery,
+ BSONObj() /* sort */,
+ 1 /* limit */))
+ .docs;
+ if (matchingChunksVector.empty()) {
+ // This can happen in a number of cases, such as that the collection has been
+ // dropped, its shard key has been refined, the chunk has been split, or the chunk
+ // has been merged.
+ LOGV2(23884,
+ "ensureChunkVersionIsGreaterThan did not find any chunks with minKey {minKey}, "
+ "maxKey {maxKey}, and epoch {epoch}. Returning success.",
+ "ensureChunkVersionIsGreaterThan did not find any matching chunks; returning "
+ "success",
+ "minKey"_attr = minKey,
+ "maxKey"_attr = maxKey,
+ "epoch"_attr = version.epoch());
+ return;
+ }
+
+ matchingChunk = uassertStatusOK(ChunkType::fromConfigBSON(
+ matchingChunksVector.front(), coll.getEpoch(), coll.getTimestamp()));
+
+ if (version.isOlderThan(matchingChunk.getVersion())) {
+ LOGV2(23885,
+ "ensureChunkVersionIsGreaterThan found that the chunk with minKey {minKey}, "
+ "maxKey "
+ "{maxKey}, and epoch {epoch} already has a higher version than {version}. "
+ "Current "
+ "chunk is {currentChunk}. Returning success.",
+ "ensureChunkVersionIsGreaterThan found that the chunk already has a higher "
+ "version; "
+ "returning success",
+ "minKey"_attr = minKey,
+ "maxKey"_attr = maxKey,
+ "epoch"_attr = version.epoch(),
+ "version"_attr = version,
+ "currentChunk"_attr = matchingChunk.toConfigBSON());
+ return;
+ }
}
// Get the chunk with the current collectionVersion for this epoch.
- const auto highestChunksVector =
- uassertStatusOK(
- configShard->exhaustiveFindOnConfig(opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::epoch(version.epoch())) /* query */,
- BSON(ChunkType::lastmod << -1) /* sort */,
- 1 /* limit */))
- .docs;
- if (highestChunksVector.empty()) {
- LOGV2(23886,
- "ensureChunkVersionIsGreaterThan did not find any chunks with epoch {epoch} when "
- "attempting to find the collectionVersion. The collection must have been dropped "
- "concurrently or had its shard key refined. Returning success.",
- "ensureChunkVersionIsGreaterThan did not find any chunks with a matching epoch when "
- "attempting to find the collectionVersion. The collection must have been dropped "
- "concurrently or had its shard key refined. Returning success.",
- "epoch"_attr = version.epoch());
- return;
+ ChunkType highestChunk;
+ {
+ const auto highestChunksVector =
+ uassertStatusOK(configShard->exhaustiveFindOnConfig(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::epoch(version.epoch())) /* query */,
+ BSON(ChunkType::lastmod << -1) /* sort */,
+ 1 /* limit */))
+ .docs;
+ if (highestChunksVector.empty()) {
+ LOGV2(23886,
+ "ensureChunkVersionIsGreaterThan did not find any chunks with epoch {epoch} "
+ "when "
+ "attempting to find the collectionVersion. The collection must have been "
+ "dropped "
+ "concurrently or had its shard key refined. Returning success.",
+ "ensureChunkVersionIsGreaterThan did not find any chunks with a matching epoch "
+ "when "
+ "attempting to find the collectionVersion. The collection must have been "
+ "dropped "
+ "concurrently or had its shard key refined. Returning success.",
+ "epoch"_attr = version.epoch());
+ return;
+ }
+ highestChunk = uassertStatusOK(ChunkType::fromConfigBSON(
+ highestChunksVector.front(), coll.getEpoch(), coll.getTimestamp()));
}
- const auto highestChunk = uassertStatusOK(ChunkType::fromConfigBSON(
- highestChunksVector.front(), version.epoch(), version.getTimestamp()));
- // Generate a new version for the chunk by incrementing the collectionVersion's major version.
- auto newChunk = currentChunk;
+ // Generate a new version for the chunk by incrementing the collectionVersion's major
+ // version.
+ auto newChunk = matchingChunk;
newChunk.setVersion(ChunkVersion(
- highestChunk.getVersion().majorVersion() + 1, 0, version.epoch(), version.getTimestamp()));
+ highestChunk.getVersion().majorVersion() + 1, 0, coll.getEpoch(), coll.getTimestamp()));
// Update the chunk, if it still exists, to have the bumped version.
earlyReturnBeforeDoingWriteGuard.dismiss();
@@ -1551,27 +1583,29 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
false /* upsert */,
kNoWaitWriteConcern));
if (didUpdate) {
- LOGV2(
- 23887,
- "ensureChunkVersionIsGreaterThan bumped the version of the chunk with minKey {minKey}, "
- "maxKey {maxKey}, and epoch {epoch}. Chunk is now {newChunk}",
- "ensureChunkVersionIsGreaterThan bumped the the chunk version",
- "minKey"_attr = minKey,
- "maxKey"_attr = maxKey,
- "epoch"_attr = version.epoch(),
- "newChunk"_attr = newChunk.toConfigBSON());
+ LOGV2(23887,
+ "ensureChunkVersionIsGreaterThan bumped the version of the chunk with minKey "
+ "{minKey}, "
+ "maxKey {maxKey}, and epoch {epoch}. Chunk is now {newChunk}",
+ "ensureChunkVersionIsGreaterThan bumped the the chunk version",
+ "minKey"_attr = minKey,
+ "maxKey"_attr = maxKey,
+ "epoch"_attr = version.epoch(),
+ "newChunk"_attr = newChunk.toConfigBSON());
} else {
- LOGV2(
- 23888,
- "ensureChunkVersionIsGreaterThan did not find a chunk matching minKey {minKey}, "
- "maxKey {maxKey}, and epoch {epoch} when trying to bump its version. The collection "
- "must have been dropped concurrently or had its shard key refined. Returning success.",
- "ensureChunkVersionIsGreaterThan did not find a matching chunk when trying to bump its "
- "version. The collection must have been dropped concurrently or had its shard key "
- "refined. Returning success.",
- "minKey"_attr = minKey,
- "maxKey"_attr = maxKey,
- "epoch"_attr = version.epoch());
+ LOGV2(23888,
+ "ensureChunkVersionIsGreaterThan did not find a chunk matching minKey {minKey}, "
+ "maxKey {maxKey}, and epoch {epoch} when trying to bump its version. The "
+ "collection "
+ "must have been dropped concurrently or had its shard key refined. Returning "
+ "success.",
+ "ensureChunkVersionIsGreaterThan did not find a matching chunk when trying to bump "
+ "its "
+ "version. The collection must have been dropped concurrently or had its shard key "
+ "refined. Returning success.",
+ "minKey"_attr = minKey,
+ "maxKey"_attr = maxKey,
+ "epoch"_attr = version.epoch());
}
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
index 68845265eae..6018367ac9f 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp
@@ -48,14 +48,6 @@ const KeyPattern kKeyPattern(BSON("x" << 1));
class ClearJumboFlagTest : public ConfigServerTestFixture {
public:
- const NamespaceString& ns() {
- return _namespace;
- }
-
- const OID& epoch() {
- return _epoch;
- }
-
ChunkRange jumboChunk() {
return ChunkRange(BSON("x" << MINKEY), BSON("x" << 0));
}
@@ -67,76 +59,117 @@ public:
protected:
void setUp() override {
ConfigServerTestFixture::setUp();
-
ShardType shard;
- shard.setName("shard");
+ shard.setName(_shardName);
shard.setHost("shard:12");
-
setupShards({shard});
+ }
+ void makeCollection(const NamespaceString& nss,
+ const UUID& collUuid,
+ const OID& epoch,
+ const boost::optional<Timestamp>& timestamp) {
ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setNS(_namespace);
- chunk.setVersion({12, 7, _epoch, boost::none /* timestamp */});
- chunk.setShard(shard.getName());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+ chunk.setVersion({12, 7, epoch, timestamp});
+ chunk.setShard(_shardName);
chunk.setMin(jumboChunk().getMin());
chunk.setMax(jumboChunk().getMax());
chunk.setJumbo(true);
ChunkType otherChunk;
otherChunk.setName(OID::gen());
- otherChunk.setNS(_namespace);
- otherChunk.setVersion({14, 7, _epoch, boost::none /* timestamp */});
- otherChunk.setShard(shard.getName());
+ otherChunk.setNS(nss);
+ otherChunk.setCollectionUUID(collUuid);
+ otherChunk.setVersion({14, 7, epoch, timestamp});
+ otherChunk.setShard(_shardName);
otherChunk.setMin(nonJumboChunk().getMin());
otherChunk.setMax(nonJumboChunk().getMax());
- setupCollection(_namespace, kKeyPattern, {chunk, otherChunk});
+ setupCollection(nss, kKeyPattern, {chunk, otherChunk});
}
-private:
- const NamespaceString _namespace{"TestDB.TestColl"};
- const OID _epoch{OID::gen()};
+ const std::string _shardName = "shard";
+ const NamespaceString _nss1{"TestDB.TestColl1"};
+ const NamespaceString _nss2{"TestDB.TestColl2"};
};
TEST_F(ClearJumboFlagTest, ClearJumboShouldBumpVersion) {
- const auto collEpoch = epoch();
- const auto collTimestamp = boost::none;
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collUuid = UUID::gen();
+ const auto collEpoch = OID::gen();
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
+
+ ShardingCatalogManager::get(operationContext())
+ ->clearJumboFlag(operationContext(), nss, collEpoch, jumboChunk());
+
+ const auto nssOrUuid =
+ collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
- ShardingCatalogManager::get(operationContext())
- ->clearJumboFlag(operationContext(), ns(), collEpoch, jumboChunk());
+ auto chunkDoc = uassertStatusOK(getChunkDoc(
+ operationContext(), nssOrUuid, jumboChunk().getMin(), collEpoch, collTimestamp));
+ ASSERT_FALSE(chunkDoc.getJumbo());
+ ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkDoc.getVersion());
+ };
- auto chunkDoc = uassertStatusOK(
- getChunkDoc(operationContext(), jumboChunk().getMin(), collEpoch, collTimestamp));
- ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(15, 0, collEpoch, collTimestamp), chunkDoc.getVersion());
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(ClearJumboFlagTest, ClearJumboShouldNotBumpVersionIfChunkNotJumbo) {
- const auto collEpoch = epoch();
- const auto collTimestamp = boost::none;
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collUuid = UUID::gen();
+ const auto collEpoch = OID::gen();
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
+
+ ShardingCatalogManager::get(operationContext())
+ ->clearJumboFlag(operationContext(), nss, collEpoch, nonJumboChunk());
+
+ const auto nssOrUuid =
+ collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
- ShardingCatalogManager::get(operationContext())
- ->clearJumboFlag(operationContext(), ns(), collEpoch, nonJumboChunk());
+ auto chunkDoc = uassertStatusOK(getChunkDoc(
+ operationContext(), nssOrUuid, nonJumboChunk().getMin(), collEpoch, collTimestamp));
+ ASSERT_FALSE(chunkDoc.getJumbo());
+ ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
+ };
- auto chunkDoc = uassertStatusOK(
- getChunkDoc(operationContext(), nonJumboChunk().getMin(), collEpoch, collTimestamp));
- ASSERT_FALSE(chunkDoc.getJumbo());
- ASSERT_EQ(ChunkVersion(14, 7, collEpoch, collTimestamp), chunkDoc.getVersion());
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(ClearJumboFlagTest, AssertsOnEpochMismatch) {
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->clearJumboFlag(operationContext(), ns(), OID::gen(), jumboChunk()),
- AssertionException,
- ErrorCodes::StaleEpoch);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collUuid = UUID::gen();
+ const auto collEpoch = OID::gen();
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
+
+ ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
+ ->clearJumboFlag(operationContext(), nss, OID::gen(), jumboChunk()),
+ AssertionException,
+ ErrorCodes::StaleEpoch);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(ClearJumboFlagTest, AssertsIfChunkCantBeFound) {
- ChunkRange imaginaryChunk(BSON("x" << 0), BSON("x" << 10));
- ASSERT_THROWS(ShardingCatalogManager::get(operationContext())
- ->clearJumboFlag(operationContext(), ns(), OID::gen(), imaginaryChunk),
- AssertionException);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const auto collUuid = UUID::gen();
+ makeCollection(nss, collUuid, collEpoch, collTimestamp);
+
+ ChunkRange imaginaryChunk(BSON("x" << 0), BSON("x" << 10));
+ ASSERT_THROWS(ShardingCatalogManager::get(operationContext())
+ ->clearJumboFlag(operationContext(), nss, OID::gen(), imaginaryChunk),
+ AssertionException);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
} // namespace
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 06280859952..6b618e36140 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -35,9 +35,6 @@
namespace mongo {
namespace {
-const NamespaceString kNss("TestDB", "TestColl");
-const KeyPattern kKeyPattern(BSON("x" << 1));
-
class EnsureChunkVersionIsGreaterThanTest : public ConfigServerTestFixture {
protected:
std::string _shardName = "shard0000";
@@ -48,15 +45,20 @@ protected:
shard.setHost(_shardName + ":12");
setupShards({shard});
}
+ const NamespaceString _nss{"TestDB", "TestColl"};
+ const UUID _collUuid = UUID::gen();
+ const KeyPattern _keyPattern{BSON("x" << 1)};
};
ChunkType generateChunkType(const NamespaceString& nss,
+ const UUID& collUuid,
const ChunkVersion& chunkVersion,
const ShardId& shardId,
const BSONObj& minKey,
const BSONObj& maxKey) {
ChunkType chunkType;
chunkType.setName(OID::gen());
+ chunkType.setCollectionUUID(collUuid);
chunkType.setNS(nss);
chunkType.setVersion(chunkVersion);
chunkType.setShard(shardId);
@@ -90,9 +92,10 @@ void assertChunkVersionWasBumpedTo(const ChunkType& chunkTypeBefore,
ASSERT(chunkTypeBefore.getHistory() == chunkTypeAfter.getHistory());
}
-TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess) {
+TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
const auto requestedChunkType =
- generateChunkType(kNss,
+ generateChunkType(_nss,
+ _collUuid,
ChunkVersion(10, 2, OID::gen(), boost::none /* timestamp */),
ShardId(_shardName),
BSON("a" << 1),
@@ -100,57 +103,62 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunksFoundFoundReturnsSuccess)
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ boost::none,
+ boost::none,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
}
-TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingEpochFoundReturnsSuccess) {
- const auto collEpoch1 = OID::gen();
- const auto collTimestamp1 = boost::none;
+TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) {
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = boost::none;
- const auto requestedChunkType =
- generateChunkType(kNss,
- ChunkVersion(10, 2, collEpoch1, collTimestamp1),
- ShardId(_shardName),
- BSON("a" << 1),
- BSON("a" << 10));
- // Epoch is different.
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion(10, 2, collEpoch, collTimestamp),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
- const auto collEpoch2 = OID::gen();
- const auto collTimestamp2 = boost::none;
ChunkType existingChunkType = requestedChunkType;
- existingChunkType.setVersion(ChunkVersion(10, 2, collEpoch2, collTimestamp2));
- setupCollection(kNss, kKeyPattern, {existingChunkType});
+ // Min key is different.
+ existingChunkType.setMin(BSON("a" << -1));
+ setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ boost::none,
+ boost::none,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
assertChunkHasNotChanged(
existingChunkType,
- getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch2, collTimestamp2));
+ getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp));
}
-TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundReturnsSuccess) {
+TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundReturnsSuccess) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(kNss,
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
ChunkVersion(10, 2, collEpoch, collTimestamp),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
ChunkType existingChunkType = requestedChunkType;
- // Min key is different.
- existingChunkType.setMin(BSON("a" << -1));
- setupCollection(kNss, kKeyPattern, {existingChunkType});
+ // Max key is different.
+ existingChunkType.setMax(BSON("a" << 20));
+ setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ _nss,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -160,53 +168,67 @@ TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMinKeyFoundRetu
getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp));
}
-TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoChunkWithMatchingMaxKeyFoundReturnsSuccess) {
+TEST_F(EnsureChunkVersionIsGreaterThanTest,
+ IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) {
const auto collEpoch = OID::gen();
const auto collTimestamp = boost::none;
- const auto requestedChunkType = generateChunkType(kNss,
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
ChunkVersion(10, 2, collEpoch, collTimestamp),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
- ChunkType existingChunkType = requestedChunkType;
- // Max key is different.
- existingChunkType.setMax(BSON("a" << 20));
- setupCollection(kNss, kKeyPattern, {existingChunkType});
+ const auto existingChunkType = requestedChunkType;
+ const auto highestChunkType = generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion(20, 3, collEpoch, collTimestamp),
+ ShardId("shard0001"),
+ BSON("a" << 11),
+ BSON("a" << 20));
+ setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ boost::none,
+ boost::none,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
- assertChunkHasNotChanged(
+ assertChunkVersionWasBumpedTo(
existingChunkType,
- getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp));
+ getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp),
+ ChunkVersion(
+ highestChunkType.getVersion().majorVersion() + 1, 0, collEpoch, collTimestamp));
}
TEST_F(EnsureChunkVersionIsGreaterThanTest,
- IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccess) {
+ IfChunkMatchingRequestedChunkFoundBumpsChunkVersionAndReturnsSuccessNew) {
const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ const auto collTimestamp = Timestamp(42);
- const auto requestedChunkType = generateChunkType(kNss,
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
ChunkVersion(10, 2, collEpoch, collTimestamp),
ShardId(_shardName),
BSON("a" << 1),
BSON("a" << 10));
const auto existingChunkType = requestedChunkType;
- const auto highestChunkType = generateChunkType(kNss,
+ const auto highestChunkType = generateChunkType(_nss,
+ _collUuid,
ChunkVersion(20, 3, collEpoch, collTimestamp),
ShardId("shard0001"),
BSON("a" << 11),
BSON("a" << 20));
- setupCollection(kNss, kKeyPattern, {existingChunkType, highestChunkType});
+ setupCollection(_nss, _keyPattern, {existingChunkType, highestChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ _nss,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
@@ -224,7 +246,38 @@ TEST_F(
const auto collEpoch = OID::gen();
const auto collTimestamp = boost::none;
- const auto requestedChunkType = generateChunkType(kNss,
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
+ ChunkVersion(10, 2, collEpoch, collTimestamp),
+ ShardId(_shardName),
+ BSON("a" << 1),
+ BSON("a" << 10));
+
+ ChunkType existingChunkType = requestedChunkType;
+ existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
+ setupCollection(_nss, _keyPattern, {existingChunkType});
+
+ ShardingCatalogManager::get(operationContext())
+ ->ensureChunkVersionIsGreaterThan(operationContext(),
+ boost::none,
+ boost::none,
+ requestedChunkType.getMin(),
+ requestedChunkType.getMax(),
+ requestedChunkType.getVersion());
+
+ assertChunkHasNotChanged(
+ existingChunkType,
+ getChunkDoc(operationContext(), existingChunkType.getMin(), collEpoch, collTimestamp));
+}
+
+TEST_F(
+ EnsureChunkVersionIsGreaterThanTest,
+ IfChunkMatchingRequestedChunkFoundAndHasHigherChunkVersionReturnsSuccessWithoutBumpingChunkVersionNew) {
+ const auto collEpoch = OID::gen();
+ const auto collTimestamp = Timestamp(42);
+
+ const auto requestedChunkType = generateChunkType(_nss,
+ _collUuid,
ChunkVersion(10, 2, collEpoch, collTimestamp),
ShardId(_shardName),
BSON("a" << 1),
@@ -232,10 +285,12 @@ TEST_F(
ChunkType existingChunkType = requestedChunkType;
existingChunkType.setVersion(ChunkVersion(11, 1, collEpoch, collTimestamp));
- setupCollection(kNss, kKeyPattern, {existingChunkType});
+ setupCollection(_nss, _keyPattern, {existingChunkType});
ShardingCatalogManager::get(operationContext())
->ensureChunkVersionIsGreaterThan(operationContext(),
+ _nss,
+ _collUuid,
requestedChunkType.getMin(),
requestedChunkType.getMax(),
requestedChunkType.getVersion());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 04f002fb135..23299c6cb21 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -51,589 +51,643 @@ protected:
shard.setHost(_shardName + ":12");
setupShards({shard});
}
-};
-const NamespaceString kNamespace("TestDB.TestColl");
-const KeyPattern kKeyPattern(BSON("x" << 1));
+ const NamespaceString _nss1{"TestDB.TestColl1"};
+ const NamespaceString _nss2{"TestDB.TestColl2"};
+ const KeyPattern _keyPattern{BSON("x" << 1)};
+};
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
- chunk2.setName(OID::gen());
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- // second chunk boundaries
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
-
- Timestamp validAfter{100, 0};
-
- auto versions = assertGet(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
- auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
-
- ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
- ASSERT_EQ(collVersion, shardVersion);
-
- // Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
- origVersion.minorVersion() + 1,
- origVersion.epoch(),
- origVersion.getTimestamp());
- ASSERT_EQ(expectedShardVersion, shardVersion);
-
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly one chunk left in the collection
- ASSERT_EQ(1u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(
- chunksVector.front(), collVersion.epoch(), collVersion.getTimestamp()));
- ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
-
- // Check that the shard version returned by the merge matches the CSRS one
- ASSERT_EQ(shardVersion, mergedChunk.getVersion());
-
- // Make sure history is there
- ASSERT_EQ(1UL, mergedChunk.getHistory().size());
- ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ const auto collUuid = UUID::gen();
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2});
+
+ Timestamp validAfter{100, 0};
+
+ auto versions = assertGet(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+
+ auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
+ auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
+
+ ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
+ ASSERT_EQ(collVersion, shardVersion);
+
+ // Check for increment on mergedChunk's minor version
+ auto expectedShardVersion = ChunkVersion(origVersion.majorVersion(),
+ origVersion.minorVersion() + 1,
+ origVersion.epoch(),
+ origVersion.getTimestamp());
+ ASSERT_EQ(expectedShardVersion, shardVersion);
+
+
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(
+ chunksVector.front(), collVersion.epoch(), collVersion.getTimestamp()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ // Check that the shard version returned by the merge matches the CSRS one
+ ASSERT_EQ(shardVersion, mergedChunk.getVersion());
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunks to be merged
- auto chunk2(chunk);
- auto chunk3(chunk);
- chunk2.setName(OID::gen());
- chunk3.setName(OID::gen());
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkBound2 = BSON("a" << 7);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- // second chunk boundaries
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkBound2);
- // third chunk boundaries
- chunk3.setMin(chunkBound2);
- chunk3.setMax(chunkMax);
-
- // Record chunk boundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, chunk3});
-
- Timestamp validAfter{100, 0};
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly one chunk left in the collection
- ASSERT_EQ(1u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
-
- {
- // Check for increment on mergedChunk's minor version
- ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
- }
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const auto collUuid = UUID::gen();
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
- // Make sure history is there
- ASSERT_EQ(1UL, mergedChunk.getHistory().size());
- ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ // Construct chunks to be merged
+ auto chunk2(chunk);
+ auto chunk3(chunk);
+ chunk2.setName(OID::gen());
+ chunk3.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkBound2 = BSON("a" << 7);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2, chunk3});
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk, otherChunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
- otherChunk.setName(OID::gen());
- otherChunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
- chunk2.setName(OID::gen());
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- // second chunk boundaries
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- // Record chunk boundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- // Set up other chunk with competing version
- auto competingVersion = ChunkVersion(2, 1, collEpoch, boost::none /* timestamp */);
- otherChunk.setVersion(competingVersion);
- otherChunk.setShard(ShardId("shard0000"));
- otherChunk.setMin(BSON("a" << 10));
- otherChunk.setMax(BSON("a" << 20));
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk});
-
- Timestamp validAfter{100, 0};
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- collEpoch,
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly two chunks left in the collection: one merged, one competing
- ASSERT_EQ(2u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
-
- {
- // Check for minor increment on collection version
- ASSERT_EQ(competingVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
- ASSERT_EQ(competingVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
- }
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
- // Make sure history is there
- ASSERT_EQ(1UL, mergedChunk.getHistory().size());
- ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ const auto collUuid = UUID::gen();
+ ChunkType chunk, otherChunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+ otherChunk.setName(OID::gen());
+ otherChunk.setNS(nss);
+ otherChunk.setCollectionUUID(collUuid);
+
+ auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
+
+ // Set up other chunk with competing version
+ auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ otherChunk.setVersion(competingVersion);
+ otherChunk.setShard(ShardId("shard0000"));
+ otherChunk.setMin(BSON("a" << 10));
+ otherChunk.setMax(BSON("a" << 20));
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2, otherChunk});
+
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly two chunks left in the collection: one merged, one competing
+ ASSERT_EQ(2u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for minor increment on collection version
+ ASSERT_EQ(competingVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
- chunk2.setName(OID::gen());
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- // second chunk boundaries
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- // Record chunk boundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- // Set up unmerged chunk
- auto otherChunk(chunk);
- otherChunk.setName(OID::gen());
- otherChunk.setMin(BSON("a" << 10));
- otherChunk.setMax(BSON("a" << 20));
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2, otherChunk});
-
- Timestamp validAfter{1};
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly two chunks left in the collection: one merged, one untouched
- ASSERT_EQ(2u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
-
- {
- // Check for increment on mergedChunk's minor version
- ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
- }
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ const auto collUuid = UUID::gen();
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
- // OtherChunk should have been left alone
- auto foundOtherChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.back(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
- ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
+ auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
+
+ // Set up unmerged chunk
+ auto otherChunk(chunk);
+ otherChunk.setName(OID::gen());
+ otherChunk.setMin(BSON("a" << 10));
+ otherChunk.setMax(BSON("a" << 20));
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2, otherChunk});
+
+ Timestamp validAfter{1};
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly two chunks left in the collection: one merged, one untouched
+ ASSERT_EQ(2u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // OtherChunk should have been left alone
+ auto foundOtherChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.back(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
+ ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(MergeChunkTest, NonExistingNamespace) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- // Record chunk boundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
-
- Timestamp validAfter{1};
-
- auto mergeStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.NonExistingColl"),
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter);
- ASSERT_NOT_OK(mergeStatus);
-}
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
-TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- // Record chunk baoundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
-
- Timestamp validAfter{1};
-
- auto mergeStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- OID::gen(),
- chunkBoundaries,
- "shard0000",
- validAfter);
- ASSERT_EQ(ErrorCodes::StaleEpoch, mergeStatus);
-}
+ ChunkType chunk;
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
-TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId("shard0000"));
-
- // Construct chunk to be merged
- auto chunk2(chunk);
- chunk2.setName(OID::gen());
-
- auto chunkMin = BSON("a" << 1);
- auto chunkBound = BSON("a" << 5);
- auto chunkMax = BSON("a" << 10);
- // first chunk boundaries
- chunk.setMin(chunkMin);
- chunk.setMax(chunkBound);
- // second chunk boundaries
- chunk2.setMin(chunkBound);
- chunk2.setMax(chunkMax);
-
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
-
- ChunkType mergedChunk(chunk);
- auto mergedVersion = chunk.getVersion();
- mergedVersion.incMinor();
- mergedChunk.setVersion(mergedVersion);
- mergedChunk.setMax(chunkMax);
-
- setupCollection(kNamespace, kKeyPattern, {mergedChunk});
-
- Timestamp validAfter{1};
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- // Verify that no change to config.chunks happened.
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly one chunk left in the collection
- ASSERT_EQ(1u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- ChunkType foundChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
-}
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
-TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
- const OID epoch = OID::gen();
- const std::vector<BSONObj> chunkBoundaries{
- BSON("a" << 100), BSON("a" << 200), BSON("a" << 30), BSON("a" << 400)};
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2});
+
+ Timestamp validAfter{1};
+
+ auto mergeStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(operationContext(),
+ NamespaceString("TestDB.NonExistingColl"),
+ collEpoch,
+ chunkBoundaries,
+ "shard0000",
+ validAfter);
+ ASSERT_NOT_OK(mergeStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
+}
- {
- std::vector<ChunkType> originalChunks;
- ChunkVersion version = ChunkVersion(1, 0, epoch, boost::none /* timestamp */);
+TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
chunk.setShard(ShardId("shard0000"));
- chunk.setVersion(version);
- chunk.setMin(BSON("a" << 100));
- chunk.setMax(BSON("a" << 200));
- originalChunks.push_back(chunk);
+ // Construct chunk to be merged
+ auto chunk2(chunk);
- version.incMinor();
- chunk.setName(OID::gen());
- chunk.setMin(BSON("a" << 200));
- chunk.setMax(BSON("a" << 300));
- chunk.setVersion(version);
- originalChunks.push_back(chunk);
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ // Record chunk baoundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- version.incMinor();
+ setupCollection(nss, _keyPattern, {chunk, chunk2});
+
+ Timestamp validAfter{1};
+
+ auto mergeStatus =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, OID::gen(), chunkBoundaries, "shard0000", validAfter);
+ ASSERT_EQ(ErrorCodes::StaleEpoch, mergeStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
+}
+
+TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ const auto collUuid = UUID::gen();
+ ChunkType chunk;
chunk.setName(OID::gen());
- chunk.setMin(BSON("a" << 300));
- chunk.setMax(BSON("a" << 400));
- chunk.setVersion(version);
- originalChunks.push_back(chunk);
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
- setupCollection(kNamespace, kKeyPattern, originalChunks);
- }
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
- Timestamp validAfter{1};
+ // Construct chunk to be merged
+ auto chunk2(chunk);
+ chunk2.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkBound = BSON("a" << 5);
+ auto chunkMax = BSON("a" << 10);
+ // first chunk boundaries
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkBound);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound);
+ chunk2.setMax(chunkMax);
+
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
+
+ ChunkType mergedChunk(chunk);
+ auto mergedVersion = chunk.getVersion();
+ mergedVersion.incMinor();
+ mergedChunk.setVersion(mergedVersion);
+ mergedChunk.setMax(chunkMax);
+
+ setupCollection(nss, _keyPattern, {mergedChunk});
+
+ Timestamp validAfter{1};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+
+ // Verify that no change to config.chunks happened.
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ ChunkType foundChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(mergedChunk.toConfigBSON(), foundChunk.toConfigBSON());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
+}
- ASSERT_EQ(
- ErrorCodes::InvalidOptions,
- ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(
- operationContext(), kNamespace, epoch, chunkBoundaries, "shard0000", validAfter));
+TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const std::vector<BSONObj> chunkBoundaries{
+ BSON("a" << 100), BSON("a" << 200), BSON("a" << 30), BSON("a" << 400)};
+
+ {
+ std::vector<ChunkType> originalChunks;
+ ChunkVersion version = ChunkVersion(1, 0, collEpoch, collTimestamp);
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ chunk.setShard(ShardId("shard0000"));
+
+ chunk.setVersion(version);
+ chunk.setMin(BSON("a" << 100));
+ chunk.setMax(BSON("a" << 200));
+ originalChunks.push_back(chunk);
+
+ version.incMinor();
+ chunk.setName(OID::gen());
+ chunk.setMin(BSON("a" << 200));
+ chunk.setMax(BSON("a" << 300));
+ chunk.setVersion(version);
+ originalChunks.push_back(chunk);
+
+ version.incMinor();
+ chunk.setName(OID::gen());
+ chunk.setMin(BSON("a" << 300));
+ chunk.setMax(BSON("a" << 400));
+ chunk.setVersion(version);
+ originalChunks.push_back(chunk);
+
+ setupCollection(nss, _keyPattern, originalChunks);
+ }
+
+ Timestamp validAfter{1};
+
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk1;
- chunk1.setName(OID::gen());
- chunk1.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
- chunk1.setVersion(origVersion);
- chunk1.setShard(ShardId("shard0000"));
-
- auto chunk2(chunk1);
- auto chunk3(chunk1);
- chunk2.setName(OID::gen());
- chunk3.setName(OID::gen());
-
- auto chunkMin = BSON("a" << kMinBSONKey);
- auto chunkBound1 = BSON("a" << BSON("$maxKey" << 1));
- auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1));
- auto chunkMax = BSON("a" << kMaxBSONKey);
-
- // first chunk boundaries
- chunk1.setMin(chunkMin);
- chunk1.setMax(chunkBound1);
- // second chunk boundaries
- chunk2.setMin(chunkBound1);
- chunk2.setMax(chunkBound2);
- // third chunk boundaries
- chunk3.setMin(chunkBound2);
- chunk3.setMax(chunkMax);
-
- setupCollection(kNamespace, kKeyPattern, {chunk1, chunk2, chunk3});
-
- // Record chunk boundaries for passing into commitChunkMerge
- std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
- Timestamp validAfter{100, 0};
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
-
- auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- ChunkType::ConfigNS,
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::lastmod << -1),
- boost::none));
-
- const auto& chunksVector = findResponse.docs;
-
- // There should be exactly one chunk left in the collection
- ASSERT_EQ(1u, chunksVector.size());
-
- // MergedChunk should have range [chunkMin, chunkMax]
- auto mergedChunk =
- uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
- ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
-
- {
- // Check for increment on mergedChunk's minor version
- ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
- }
-
- // Make sure history is there
- ASSERT_EQ(1UL, mergedChunk.getHistory().size());
- ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ const auto collUuid = UUID::gen();
+ ChunkType chunk1;
+ chunk1.setName(OID::gen());
+ chunk1.setNS(nss);
+ chunk1.setCollectionUUID(collUuid);
+
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk1.setVersion(origVersion);
+ chunk1.setShard(ShardId("shard0000"));
+
+ auto chunk2(chunk1);
+ auto chunk3(chunk1);
+ chunk2.setName(OID::gen());
+ chunk3.setName(OID::gen());
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkBound1 = BSON("a" << BSON("$maxKey" << 1));
+ auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1));
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+
+ // first chunk boundaries
+ chunk1.setMin(chunkMin);
+ chunk1.setMax(chunkBound1);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound1);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ setupCollection(nss, _keyPattern, {chunk1, chunk2, chunk3});
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), nss, collEpoch, chunkBoundaries, "shard0000", validAfter));
+
+ const auto query = collTimestamp ? BSON(ChunkType::collectionUUID() << collUuid)
+ : BSON(ChunkType::ns(nss.ns()));
+ auto findResponse = uassertStatusOK(getConfigShard()->exhaustiveFindOnConfig(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ query,
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(
+ ChunkType::fromConfigBSON(chunksVector.front(), collEpoch, collTimestamp));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
} // namespace
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 25d713cc536..e735c1291e4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -39,9 +39,6 @@ namespace mongo {
namespace {
using unittest::assertGet;
-const NamespaceString kNamespace("TestDB", "TestColl");
-const KeyPattern kKeyPattern(BSON("a" << 1));
-
class SplitChunkTest : public ConfigServerTestFixture {
protected:
std::string _shardName = "shard0000";
@@ -52,426 +49,518 @@ protected:
shard.setHost(_shardName + ":12");
setupShards({shard});
}
+
+
+ const NamespaceString _nss1{"TestDB", "TestColl1"};
+ const NamespaceString _nss2{"TestDB", "TestColl2"};
+ const KeyPattern _keyPattern{BSON("a" << 1)};
};
TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
- chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
- ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
-
- auto chunkSplitPoint = BSON("a" << 5);
- std::vector<BSONObj> splitPoints{chunkSplitPoint};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto versions = assertGet(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- collEpoch,
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000"));
- auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
- auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
-
- ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
- ASSERT_EQ(collVersion, shardVersion);
-
- // Check for increment on mergedChunk's minor version
- auto expectedShardVersion = ChunkVersion(
- origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp);
- ASSERT_EQ(expectedShardVersion, shardVersion);
- ASSERT_EQ(shardVersion, collVersion);
-
- // First chunkDoc should have range [chunkMin, chunkSplitPoint]
- auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp);
- ASSERT_OK(chunkDocStatus.getStatus());
-
- auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
-
- // Check for increment on first chunkDoc's minor version
- ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
-
- // Make sure the history is there
- ASSERT_EQ(2UL, chunkDoc.getHistory().size());
-
- // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
- auto otherChunkDocStatus =
- getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp);
- ASSERT_OK(otherChunkDocStatus.getStatus());
-
- auto otherChunkDoc = otherChunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
-
- // Check for increment on second chunkDoc's minor version
- ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
-
- // Make sure the history is there
- ASSERT_EQ(2UL, otherChunkDoc.getHistory().size());
-
- // Both chunks should have the same history
- ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const auto collUuid = UUID::gen();
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+
+ auto chunkSplitPoint = BSON("a" << 5);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto versions = assertGet(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+ auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
+ auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
+
+ ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
+ ASSERT_EQ(collVersion, shardVersion);
+
+ // Check for increment on mergedChunk's minor version
+ auto expectedShardVersion = ChunkVersion(
+ origVersion.majorVersion(), origVersion.minorVersion() + 2, collEpoch, collTimestamp);
+ ASSERT_EQ(expectedShardVersion, shardVersion);
+ ASSERT_EQ(shardVersion, collVersion);
+
+ const auto nssOrUuid =
+ collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+
+ // Make sure the history is there
+ ASSERT_EQ(2UL, otherChunkDoc.getHistory().size());
+
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const auto collUuid = UUID::gen();
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
- auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
+ ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
- chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
- ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
+ auto chunkSplitPoint = BSON("a" << 5);
+ auto chunkSplitPoint2 = BSON("a" << 7);
+ std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- auto chunkSplitPoint = BSON("a" << 5);
- auto chunkSplitPoint2 = BSON("a" << 7);
- std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
+ setupCollection(nss, _keyPattern, {chunk});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
- setupCollection(kNamespace, kKeyPattern, {chunk});
+ const auto nssOrUuid =
+ collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- collEpoch,
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000"));
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ ASSERT_OK(chunkDocStatus.getStatus());
- // First chunkDoc should have range [chunkMin, chunkSplitPoint]
- auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp);
- ASSERT_OK(chunkDocStatus.getStatus());
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
- auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+ // Check for increment on first chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
- // Check for increment on first chunkDoc's minor version
- ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+ // Make sure the history is there
+ ASSERT_EQ(2UL, chunkDoc.getHistory().size());
- // Make sure the history is there
- ASSERT_EQ(2UL, chunkDoc.getHistory().size());
+ // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2]
+ auto midChunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ ASSERT_OK(midChunkDocStatus.getStatus());
- // Second chunkDoc should have range [chunkSplitPoint, chunkSplitPoint2]
- auto midChunkDocStatus =
- getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp);
- ASSERT_OK(midChunkDocStatus.getStatus());
+ auto midChunkDoc = midChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax());
- auto midChunkDoc = midChunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax());
+ // Check for increment on second chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion());
- // Check for increment on second chunkDoc's minor version
- ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 2, midChunkDoc.getVersion().minorVersion());
+ // Make sure the history is there
+ ASSERT_EQ(2UL, midChunkDoc.getHistory().size());
- // Make sure the history is there
- ASSERT_EQ(2UL, midChunkDoc.getHistory().size());
+ // Third chunkDoc should have range [chunkSplitPoint2, chunkMax]
+ auto lastChunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint2, collEpoch, collTimestamp);
+ ASSERT_OK(lastChunkDocStatus.getStatus());
- // Third chunkDoc should have range [chunkSplitPoint2, chunkMax]
- auto lastChunkDocStatus =
- getChunkDoc(operationContext(), chunkSplitPoint2, collEpoch, collTimestamp);
- ASSERT_OK(lastChunkDocStatus.getStatus());
+ auto lastChunkDoc = lastChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax());
- auto lastChunkDoc = lastChunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax());
+ // Check for increment on third chunkDoc's minor version
+ ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion());
- // Check for increment on third chunkDoc's minor version
- ASSERT_EQ(origVersion.majorVersion(), lastChunkDoc.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 3, lastChunkDoc.getVersion().minorVersion());
+ // Make sure the history is there
+ ASSERT_EQ(2UL, lastChunkDoc.getHistory().size());
- // Make sure the history is there
- ASSERT_EQ(2UL, lastChunkDoc.getHistory().size());
+ // Both chunks should have the same history
+ ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory());
+ ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
+ };
- // Both chunks should have the same history
- ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory());
- ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
- const auto collEpoch = OID::gen();
- const auto collTimestamp = boost::none;
-
- ChunkType chunk, chunk2;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
- chunk2.setName(OID::gen());
- chunk2.setNS(kNamespace);
-
- // set up first chunk
- auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints;
- auto chunkSplitPoint = BSON("a" << 5);
- splitPoints.push_back(chunkSplitPoint);
-
- // set up second chunk (chunk2)
- auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
- chunk2.setVersion(competingVersion);
- chunk2.setShard(ShardId(_shardName));
- chunk2.setMin(BSON("a" << 10));
- chunk2.setMax(BSON("a" << 20));
-
- setupCollection(kNamespace, kKeyPattern, {chunk, chunk2});
-
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- collEpoch,
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000"));
-
- // First chunkDoc should have range [chunkMin, chunkSplitPoint]
- auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin, collEpoch, collTimestamp);
- ASSERT_OK(chunkDocStatus.getStatus());
-
- auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
-
- // Check for increment based on the competing chunk version
- ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
- ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
-
- // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
- auto otherChunkDocStatus =
- getChunkDoc(operationContext(), chunkSplitPoint, collEpoch, collTimestamp);
- ASSERT_OK(otherChunkDocStatus.getStatus());
-
- auto otherChunkDoc = otherChunkDocStatus.getValue();
- ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
-
- // Check for increment based on the competing chunk version
- ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
- ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+ const auto collUuid = UUID::gen();
+
+ ChunkType chunk, chunk2;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(collUuid);
+ chunk2.setName(OID::gen());
+ chunk2.setNS(nss);
+ chunk2.setCollectionUUID(collUuid);
+
+ // set up first chunk
+ auto origVersion = ChunkVersion(1, 2, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints;
+ auto chunkSplitPoint = BSON("a" << 5);
+ splitPoints.push_back(chunkSplitPoint);
+
+ // set up second chunk (chunk2)
+ auto competingVersion = ChunkVersion(2, 1, collEpoch, collTimestamp);
+ chunk2.setVersion(competingVersion);
+ chunk2.setShard(ShardId(_shardName));
+ chunk2.setMin(BSON("a" << 10));
+ chunk2.setMax(BSON("a" << 20));
+
+ setupCollection(nss, _keyPattern, {chunk, chunk2});
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+
+ const auto nssOrUuid =
+ collTimestamp ? NamespaceStringOrUUID(nss.db().toString(), collUuid) : nss;
+
+ // First chunkDoc should have range [chunkMin, chunkSplitPoint]
+ auto chunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkMin, collEpoch, collTimestamp);
+ ASSERT_OK(chunkDocStatus.getStatus());
+
+ auto chunkDoc = chunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
+
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 1, chunkDoc.getVersion().minorVersion());
+
+ // Second chunkDoc should have range [chunkSplitPoint, chunkMax]
+ auto otherChunkDocStatus =
+ getChunkDoc(operationContext(), nssOrUuid, chunkSplitPoint, collEpoch, collTimestamp);
+ ASSERT_OK(otherChunkDocStatus.getStatus());
+
+ auto otherChunkDoc = otherChunkDocStatus.getValue();
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
+
+ // Check for increment based on the competing chunk version
+ ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
+ ASSERT_EQ(competingVersion.minorVersion() + 2, otherChunkDoc.getVersion().minorVersion());
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, PreConditionFailErrors) {
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints;
- auto chunkSplitPoint = BSON("a" << 5);
- splitPoints.push_back(chunkSplitPoint);
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, BSON("a" << 7)),
- splitPoints,
- "shard0000");
- ASSERT_EQ(ErrorCodes::BadValue, splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints;
+ auto chunkSplitPoint = BSON("a" << 5);
+ splitPoints.push_back(chunkSplitPoint);
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, BSON("a" << 7)),
+ splitPoints,
+ "shard0000");
+ ASSERT_EQ(ErrorCodes::BadValue, splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints{BSON("a" << 5)};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.NonExistingColl"),
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000");
- ASSERT_NOT_OK(splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints{BSON("a" << 5)};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ NamespaceString("TestDB.NonExistingColl"),
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000");
+ ASSERT_NOT_OK(splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints{BSON("a" << 5)};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- OID::gen(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000");
- ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints{BSON("a" << 5)};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ OID::gen(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000");
+ ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000");
- ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000");
+ ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000");
- ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000");
+ ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
- ChunkType chunk;
- chunk.setName(OID::gen());
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << 1);
- auto chunkMax = BSON("a" << 10);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
-
- std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
-
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- auto splitStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000");
- ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setName(OID::gen());
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << 1);
+ auto chunkMax = BSON("a" << 10);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+
+ std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
+
+ setupCollection(nss, _keyPattern, {chunk});
+
+ auto splitStatus = ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000");
+ ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
- ChunkType chunk;
- chunk.setNS(kNamespace);
-
- auto origVersion = ChunkVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- chunk.setVersion(origVersion);
- chunk.setShard(ShardId(_shardName));
-
- auto chunkMin = BSON("a" << kMinBSONKey);
- auto chunkMax = BSON("a" << kMaxBSONKey);
- chunk.setMin(chunkMin);
- chunk.setMax(chunkMax);
- setupCollection(kNamespace, kKeyPattern, {chunk});
-
- ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- {BSON("a" << BSON("$minKey" << 1))},
- "shard0000"));
- ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- {BSON("a" << BSON("$maxKey" << 1))},
- "shard0000"));
+ auto test = [&](const NamespaceString& nss, const boost::optional<Timestamp>& collTimestamp) {
+ const auto collEpoch = OID::gen();
+
+ ChunkType chunk;
+ chunk.setNS(nss);
+ chunk.setCollectionUUID(UUID::gen());
+
+ auto origVersion = ChunkVersion(1, 0, collEpoch, collTimestamp);
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId(_shardName));
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ setupCollection(nss, _keyPattern, {chunk});
+
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$minKey" << 1))},
+ "shard0000"));
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$maxKey" << 1))},
+ "shard0000"));
+ };
+
+ test(_nss1, boost::none /* timestamp */);
+ test(_nss2, Timestamp(42));
}
} // namespace
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index 189157b0d56..84a29659333 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -820,6 +820,8 @@ void deleteMigrationCoordinatorDocumentLocally(OperationContext* opCtx, const UU
}
void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& collUUID,
const ChunkRange& range,
const ChunkVersion& preMigrationChunkVersion) {
ConfigsvrEnsureChunkVersionIsGreaterThan ensureChunkVersionIsGreaterThanRequest;
@@ -827,6 +829,8 @@ void ensureChunkVersionIsGreaterThan(OperationContext* opCtx,
ensureChunkVersionIsGreaterThanRequest.setMinKey(range.getMin());
ensureChunkVersionIsGreaterThanRequest.setMaxKey(range.getMax());
ensureChunkVersionIsGreaterThanRequest.setVersion(preMigrationChunkVersion);
+ ensureChunkVersionIsGreaterThanRequest.setNss(nss);
+ ensureChunkVersionIsGreaterThanRequest.setCollectionUUID(collUUID);
const auto ensureChunkVersionIsGreaterThanRequestBSON =
ensureChunkVersionIsGreaterThanRequest.toBSON({});
@@ -956,8 +960,11 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss)
// The decision is not known. Recover the decision from the config server.
- ensureChunkVersionIsGreaterThan(
- opCtx, doc.getRange(), doc.getPreMigrationChunkVersion());
+ ensureChunkVersionIsGreaterThan(opCtx,
+ doc.getNss(),
+ doc.getCollectionUuid(),
+ doc.getRange(),
+ doc.getPreMigrationChunkVersion());
hangInRefreshFilteringMetadataUntilSuccessInterruptible.pauseWhileSet(opCtx);
diff --git a/src/mongo/s/request_types/ensure_chunk_version_is_greater_than.idl b/src/mongo/s/request_types/ensure_chunk_version_is_greater_than.idl
index fadd9b4427d..1deec9225e7 100644
--- a/src/mongo/s/request_types/ensure_chunk_version_is_greater_than.idl
+++ b/src/mongo/s/request_types/ensure_chunk_version_is_greater_than.idl
@@ -58,3 +58,11 @@ commands:
epoch.
type: ChunkVersion
optional: false
+ collectionUUID:
+ description: The collection's UUID. This field was introduced in 5.0 as mandatory.
+ type: uuid
+ optional: true
+ nss:
+ description: The collection's namespace. This field was introduced in 5.0 as mandatory.
+ type: namespacestring
+ optional: true