summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnrico Golfieri <enrico.golfieri@mongodb.com>2022-10-03 08:24:18 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-10-03 09:08:03 +0000
commit37d637a8c97bb5eb8b4bc0afad4e10fbe7d50398 (patch)
tree8b8569531be854658c90b2d1ec73ab1b76876025
parent643d02696daf385ec5ea0abbaa6ee656eb82d74d (diff)
downloadmongo-37d637a8c97bb5eb8b4bc0afad4e10fbe7d50398.tar.gz
SERVER-67226 Chunk Operations under sharding_catalog_manager should return ChunkVersion
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp6
-rw-r--r--src/mongo/db/s/config/configsvr_merge_chunks_command.cpp5
-rw-r--r--src/mongo/db/s/config/configsvr_split_chunk_command.cpp7
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h65
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp92
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp178
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp100
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp42
8 files changed, 252 insertions, 243 deletions
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index ec93541b311..b0057770471 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -128,7 +128,7 @@ public:
const NamespaceString nss = ns();
auto migratedChunk = toChunkType(request().getMigratedChunk());
- StatusWith<BSONObj> chunkVersionResponse =
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> response =
ShardingCatalogManager::get(opCtx)->commitChunkMigration(
opCtx,
nss,
@@ -139,9 +139,9 @@ public:
request().getToShard(),
request().getValidAfter());
- auto chunkVersionObj = uassertStatusOK(chunkVersionResponse);
+ auto shardAndCollVers = uassertStatusOK(response);
- return Response{ChunkVersion::parse(chunkVersionObj[ChunkVersion::kChunkVersionField])};
+ return Response{shardAndCollVers.shardVersion};
}
private:
diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
index 430cb5c25b9..15d02fe8a98 100644
--- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
+++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp
@@ -87,7 +87,7 @@ public:
repl::ReadConcernArgs::get(opCtx) =
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
- const BSONObj shardAndCollVers = uassertStatusOK(
+ const auto shardAndCollVers = uassertStatusOK(
ShardingCatalogManager::get(opCtx)->commitChunksMerge(opCtx,
ns(),
request().getEpoch(),
@@ -96,8 +96,7 @@ public:
request().getChunkRange(),
request().getShard(),
request().getValidAfter()));
- return ConfigSvrMergeResponse{
- ChunkVersion::parse(shardAndCollVers[ChunkVersion::kChunkVersionField])};
+ return ConfigSvrMergeResponse{shardAndCollVers.shardVersion};
}
private:
diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
index 713f7252356..37ac4d023b6 100644
--- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
@@ -65,6 +65,9 @@ using std::string;
* writeConcern: <BSONObj>
* }
*/
+
+constexpr StringData kCollectionVersionField = "collectionVersion"_sd;
+
class ConfigSvrSplitChunkCommand : public BasicCommand {
public:
ConfigSvrSplitChunkCommand() : BasicCommand("_configsvrCommitChunkSplit") {}
@@ -129,7 +132,9 @@ public:
parsedRequest.getSplitPoints(),
parsedRequest.getShardName(),
parsedRequest.isFromChunkSplitter()));
- result.appendElements(shardAndCollVers);
+
+ shardAndCollVers.collectionVersion.serialize(kCollectionVersionField, &result);
+ shardAndCollVers.shardVersion.serialize(ChunkVersion::kChunkVersionField, &result);
return true;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index c1f94dbc46a..8cf63f2da69 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -91,6 +91,11 @@ public:
std::unique_ptr<executor::TaskExecutor> addShardExecutor);
~ShardingCatalogManager();
+ struct ShardAndCollectionVersion {
+ ChunkVersion shardVersion;
+ ChunkVersion collectionVersion;
+ };
+
/**
* Instantiates an instance of the sharding catalog manager and installs it on the specified
* service context. This method is not thread-safe and must be called only once when the service
@@ -242,18 +247,20 @@ public:
* Updates metadata in the config.chunks collection to show the given chunk as split into
* smaller chunks at the specified split points.
*
- * Returns a BSON object with the newly produced chunk versions after the migration:
+ * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the
+ * migration:
* - shardVersion - The new shard version of the source shard
* - collectionVersion - The new collection version after the commit
*/
- StatusWith<BSONObj> commitChunkSplit(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const boost::optional<Timestamp>& requestTimestamp,
- const ChunkRange& range,
- const std::vector<BSONObj>& splitPoints,
- const std::string& shardName,
- bool fromChunkSplitter);
+ StatusWith<ShardAndCollectionVersion> commitChunkSplit(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const boost::optional<Timestamp>& requestTimestamp,
+ const ChunkRange& range,
+ const std::vector<BSONObj>& splitPoints,
+ const std::string& shardName,
+ bool fromChunkSplitter);
/**
* Updates metadata in the config.chunks collection so the chunks within the specified key range
@@ -261,36 +268,40 @@ public:
* If 'validAfter' is not set, this means the commit request came from an older server version,
* which is not history-aware.
*
- * Returns a BSON object with the newly produced chunk versions after the migration:
+ * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the
+ * migration:
* - shardVersion - The new shard version of the source shard
* - collectionVersion - The new collection version after the commit
*/
- StatusWith<BSONObj> commitChunksMerge(OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<OID>& epoch,
- const boost::optional<Timestamp>& timestamp,
- const UUID& requestCollectionUUID,
- const ChunkRange& chunkRange,
- const ShardId& shardId,
- const boost::optional<Timestamp>& validAfter);
+ StatusWith<ShardAndCollectionVersion> commitChunksMerge(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<OID>& epoch,
+ const boost::optional<Timestamp>& timestamp,
+ const UUID& requestCollectionUUID,
+ const ChunkRange& chunkRange,
+ const ShardId& shardId,
+ const boost::optional<Timestamp>& validAfter);
/**
* Updates metadata in config.chunks collection to show the given chunk in its new shard.
* If 'validAfter' is not set, this means the commit request came from an older server version,
* which is not history-aware.
*
- * Returns a BSON object with the newly produced chunk versions after the migration:
+ * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the
+ * migration:
* - shardVersion - The new shard version of the source shard
* - collectionVersion - The new collection version after the commit
*/
- StatusWith<BSONObj> commitChunkMigration(OperationContext* opCtx,
- const NamespaceString& nss,
- const ChunkType& migratedChunk,
- const OID& collectionEpoch,
- const Timestamp& collectionTimestamp,
- const ShardId& fromShard,
- const ShardId& toShard,
- const boost::optional<Timestamp>& validAfter);
+ StatusWith<ShardAndCollectionVersion> commitChunkMigration(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ChunkType& migratedChunk,
+ const OID& collectionEpoch,
+ const Timestamp& collectionTimestamp,
+ const ShardId& fromShard,
+ const ShardId& toShard,
+ const boost::optional<Timestamp>& validAfter);
/**
* Removes the jumbo flag from the specified chunk.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 23083a77590..55381ae4041 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -73,8 +73,6 @@ MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory);
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
-constexpr StringData kCollectionVersionField = "collectionVersion"_sd;
-
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
*/
@@ -609,15 +607,15 @@ ShardingCatalogManager::_splitChunkInTransaction(OperationContext* opCtx,
sharedBlock->newChunks};
}
-StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const boost::optional<Timestamp>& requestTimestamp,
- const ChunkRange& range,
- const std::vector<BSONObj>& splitPoints,
- const std::string& shardName,
- const bool fromChunkSplitter) {
+StatusWith<ShardingCatalogManager::ShardAndCollectionVersion>
+ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const boost::optional<Timestamp>& requestTimestamp,
+ const ChunkRange& range,
+ const std::vector<BSONObj>& splitPoints,
+ const std::string& shardName,
+ const bool fromChunkSplitter) {
// Mark opCtx as interruptible to ensure that all reads and writes to the metadata collections
// under the exclusive _kChunkOpLock happen on the same term.
@@ -718,10 +716,8 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
}
}
- BSONObjBuilder response;
- splitChunkResult.currentMaxVersion.serialize(kCollectionVersionField, &response);
- splitChunkResult.currentMaxVersion.serialize(ChunkVersion::kChunkVersionField, &response);
- return response.obj();
+ return ShardAndCollectionVersion{splitChunkResult.currentMaxVersion /*shardVersion*/,
+ splitChunkResult.currentMaxVersion /*collectionVersion*/};
}
void ShardingCatalogManager::_mergeChunksInTransaction(
@@ -829,15 +825,15 @@ void ShardingCatalogManager::_mergeChunksInTransaction(
txn.run(opCtx, updateChunksFn);
}
-StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<OID>& epoch,
- const boost::optional<Timestamp>& timestamp,
- const UUID& requestCollectionUUID,
- const ChunkRange& chunkRange,
- const ShardId& shardId,
- const boost::optional<Timestamp>& validAfter) {
+StatusWith<ShardingCatalogManager::ShardAndCollectionVersion>
+ShardingCatalogManager::commitChunksMerge(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<OID>& epoch,
+ const boost::optional<Timestamp>& timestamp,
+ const UUID& requestCollectionUUID,
+ const ChunkRange& chunkRange,
+ const ShardId& shardId,
+ const boost::optional<Timestamp>& validAfter) {
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -907,15 +903,14 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
<< " does not contain a sequence of chunks that exactly fills the range "
<< chunkRange.toString(),
chunk.getRange() == chunkRange);
- BSONObjBuilder response;
- collVersion.serialize(kCollectionVersionField, &response);
+
const auto currentShardVersion = getShardVersion(opCtx, coll, shardId, collVersion);
- currentShardVersion.serialize(ChunkVersion::kChunkVersionField, &response);
+
// Makes sure that the last thing we read in getCollectionVersion and getShardVersion gets
// majority written before to return from this command, otherwise next RoutingInfo cache
// refresh from the shard may not see those newest information.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
- return response.obj();
+ return ShardAndCollectionVersion{currentShardVersion, collVersion};
}
// 3. Prepare the data for the merge
@@ -972,21 +967,18 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
ShardingLogging::get(opCtx)->logChange(
opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions());
- BSONObjBuilder response;
- mergeVersion.serialize(kCollectionVersionField, &response);
- mergeVersion.serialize(ChunkVersion::kChunkVersionField, &response);
- return response.obj();
+ return ShardAndCollectionVersion{mergeVersion /*shardVersion*/, mergeVersion /*collVersion*/};
}
-StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const ChunkType& migratedChunk,
- const OID& collectionEpoch,
- const Timestamp& collectionTimestamp,
- const ShardId& fromShard,
- const ShardId& toShard,
- const boost::optional<Timestamp>& validAfter) {
+StatusWith<ShardingCatalogManager::ShardAndCollectionVersion>
+ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ChunkType& migratedChunk,
+ const OID& collectionEpoch,
+ const Timestamp& collectionTimestamp,
+ const ShardId& fromShard,
+ const ShardId& toShard,
+ const boost::optional<Timestamp>& validAfter) {
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
@@ -1100,16 +1092,13 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
if (currentChunk.getShard() == toShard) {
// The commit was already done successfully
- BSONObjBuilder response;
- currentCollectionVersion.serialize(kCollectionVersionField, &response);
const auto currentShardVersion =
getShardVersion(opCtx, coll, fromShard, currentCollectionVersion);
- currentShardVersion.serialize(ChunkVersion::kChunkVersionField, &response);
// Makes sure that the last thing we read in findChunkContainingRange, getShardVersion, and
// getCollectionVersion gets majority written before to return from this command, otherwise
// next RoutingInfo cache refresh from the shard may not see those newest information.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
- return response.obj();
+ return ShardAndCollectionVersion{currentShardVersion, currentCollectionVersion};
}
uassert(4914702,
@@ -1232,18 +1221,17 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
_commitChunkMigrationInTransaction(
opCtx, nss, newMigratedChunk, newSplitChunks, newControlChunk);
- BSONObjBuilder response;
+ ShardAndCollectionVersion response;
if (!newControlChunk) {
// We migrated the last chunk from the donor shard.
- newMigratedChunk->getVersion().serialize(kCollectionVersionField, &response);
- const ChunkVersion donorShardVersion(
+ response.collectionVersion = newMigratedChunk->getVersion();
+ response.shardVersion = ChunkVersion(
{currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()}, {0, 0});
- donorShardVersion.serialize(ChunkVersion::kChunkVersionField, &response);
} else {
- newControlChunk->getVersion().serialize(kCollectionVersionField, &response);
- newControlChunk->getVersion().serialize(ChunkVersion::kChunkVersionField, &response);
+ response.collectionVersion = newControlChunk->getVersion();
+ response.shardVersion = newControlChunk->getVersion();
}
- return response.obj();
+ return response;
}
StatusWith<ChunkType> ShardingCatalogManager::_findChunkOnConfig(OperationContext* opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 0f99ae11634..2b627a53e3d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -126,25 +126,25 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
setupCollection(kNamespace, kKeyPattern, {migratedChunk, controlChunk});
Timestamp validAfter{101, 0};
- BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- migratedChunk,
- migratedChunk.getVersion().epoch(),
- collTimestamp,
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter));
+ auto versions = assertGet(ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ migratedChunk,
+ migratedChunk.getVersion().epoch(),
+ collTimestamp,
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter));
// Verify the versions returned match expected values.
- auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ auto mver = versions.shardVersion;
ASSERT_EQ(ChunkVersion(
{migratedChunk.getVersion().epoch(), migratedChunk.getVersion().getTimestamp()},
{migratedChunk.getVersion().majorVersion() + 1, 1}),
mver);
// Verify that a collection version is returned
- auto cver = ChunkVersion::parse(versions["collectionVersion"]);
+ auto cver = versions.collectionVersion;
ASSERT_TRUE(mver.isOlderOrEqualThan(cver));
// Verify the chunks ended up in the right shards.
@@ -204,25 +204,26 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
Timestamp validAfter{101, 0};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- origVersion.epoch(),
- collTimestamp,
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ origVersion.epoch(),
+ collTimestamp,
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
- ASSERT_OK(resultBSON.getStatus());
+ ASSERT_OK(result.getStatus());
// Verify the version returned matches expected value.
- BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ auto versions = result.getValue();
+ auto mver = versions.shardVersion;
ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify that a collection version is returned
- auto cver = ChunkVersion::parse(versions["collectionVersion"]);
+ auto cver = versions.collectionVersion;
ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion + 1, 0}), cver);
// Verify the chunk ended up in the right shard.
@@ -270,21 +271,22 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
// Make the time distance between the last history element large enough.
Timestamp validAfter{200, 0};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- origVersion.epoch(),
- collTimestamp,
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ origVersion.epoch(),
+ collTimestamp,
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
- ASSERT_OK(resultBSON.getStatus());
+ ASSERT_OK(result.getStatus());
// Verify the version returned matches expected value.
- BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ auto versions = result.getValue();
+ auto mver = versions.shardVersion;
ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify the chunk ended up in the right shard.
@@ -331,17 +333,18 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
// Make the time before the last change to trigger the failure.
Timestamp validAfter{99, 0};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- origVersion.epoch(),
- origVersion.getTimestamp(),
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
-
- ASSERT_EQ(ErrorCodes::IncompatibleShardingMetadata, resultBSON.getStatus());
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ origVersion.epoch(),
+ origVersion.getTimestamp(),
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
+
+ ASSERT_EQ(ErrorCodes::IncompatibleShardingMetadata, result.getStatus());
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
@@ -386,17 +389,18 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
Timestamp validAfter{1};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- OID::gen(),
- Timestamp(52),
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
-
- ASSERT_EQ(ErrorCodes::StaleEpoch, resultBSON.getStatus());
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ OID::gen(),
+ Timestamp(52),
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
+
+ ASSERT_EQ(ErrorCodes::StaleEpoch, result.getStatus());
}
TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
@@ -443,17 +447,18 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
Timestamp validAfter{1};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- origVersion.epoch(),
- origVersion.getTimestamp(),
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
-
- ASSERT_EQ(ErrorCodes::StaleEpoch, resultBSON.getStatus());
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ origVersion.epoch(),
+ origVersion.getTimestamp(),
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
+
+ ASSERT_EQ(ErrorCodes::StaleEpoch, result.getStatus());
}
TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) {
@@ -503,21 +508,22 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1});
Timestamp validAfter{101, 0};
- StatusWith<BSONObj> resultBSON = ShardingCatalogManager::get(operationContext())
- ->commitChunkMigration(operationContext(),
- kNamespace,
- chunk0,
- origVersion.epoch(),
- origVersion.getTimestamp(),
- ShardId(shard0.getName()),
- ShardId(shard1.getName()),
- validAfter);
-
- ASSERT_OK(resultBSON.getStatus());
+ StatusWith<ShardingCatalogManager::ShardAndCollectionVersion> result =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMigration(operationContext(),
+ kNamespace,
+ chunk0,
+ origVersion.epoch(),
+ origVersion.getTimestamp(),
+ ShardId(shard0.getName()),
+ ShardId(shard1.getName()),
+ validAfter);
+
+ ASSERT_OK(result.getStatus());
// Verify the versions returned match expected values.
- BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parse(versions["shardVersion"]);
+ auto versions = result.getValue();
+ auto mver = versions.shardVersion;
ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver);
// Verify the chunks ended up in the right shards.
@@ -637,8 +643,8 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
ShardId(shard1.getName()),
validAfter);
- ASSERT_NOT_OK(result);
- ASSERT_EQ(result, ErrorCodes::ConflictingOperationInProgress);
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQ(result.getStatus(), ErrorCodes::ConflictingOperationInProgress);
}
TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
@@ -689,8 +695,8 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
ShardId(shard1.getName()),
validAfter);
- ASSERT_NOT_OK(result);
- ASSERT_EQ(result, ErrorCodes::StaleEpoch);
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQ(result.getStatus(), ErrorCodes::StaleEpoch);
}
class CommitMoveRangeTest : public CommitChunkMigrate {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 39837c1aadd..2743b329022 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -117,11 +117,11 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
_shardId,
validAfter));
- auto collVersion = ChunkVersion::parse(versions["collectionVersion"]);
- auto shardVersion = ChunkVersion::parse(versions["shardVersion"]);
+ auto collVersion = versions.collectionVersion;
+ auto shardVersion = versions.shardVersion;
- ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
- ASSERT_EQ(collVersion, shardVersion);
+ ASSERT_TRUE(origVersion.isOlderThan(versions.shardVersion));
+ ASSERT_EQ(shardVersion, collVersion);
// Check for increment on mergedChunk's minor version
auto expectedShardVersion =
@@ -196,15 +196,15 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
Timestamp validAfter{100, 0};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunksMerge(operationContext(),
- _nss1,
- collEpoch,
- collTimestamp,
- collUuid,
- rangeToBeMerged,
- _shardId,
- validAfter));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ _nss1,
+ collEpoch,
+ collTimestamp,
+ collUuid,
+ rangeToBeMerged,
+ _shardId,
+ validAfter));
const auto query BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(
@@ -280,15 +280,15 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
Timestamp validAfter{100, 0};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunksMerge(operationContext(),
- _nss1,
- collEpoch,
- collTimestamp,
- collUuid,
- rangeToBeMerged,
- _shardId,
- validAfter));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ _nss1,
+ collEpoch,
+ collTimestamp,
+ collUuid,
+ rangeToBeMerged,
+ _shardId,
+ validAfter));
const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(
@@ -362,15 +362,15 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
setupCollection(_nss1, _keyPattern, {chunk, chunk2, otherChunk});
Timestamp validAfter{1};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunksMerge(operationContext(),
- _nss1,
- collEpoch,
- collTimestamp,
- collUuid,
- rangeToBeMerged,
- shardId,
- validAfter));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ _nss1,
+ collEpoch,
+ collTimestamp,
+ collUuid,
+ rangeToBeMerged,
+ shardId,
+ validAfter));
const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -486,7 +486,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
rangeToBeMerged,
_shardId,
validAfter);
- ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus);
+ ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus.getStatus());
}
TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
@@ -515,15 +515,15 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
Timestamp validAfter{1};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunksMerge(operationContext(),
- _nss1,
- collEpoch,
- collTimestamp,
- collUuid,
- rangeToBeMerged,
- _shardId,
- validAfter));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ _nss1,
+ collEpoch,
+ collTimestamp,
+ collUuid,
+ rangeToBeMerged,
+ _shardId,
+ validAfter));
// Verify that no change to config.chunks happened.
const auto query = BSON(ChunkType::collectionUUID() << collUuid);
@@ -586,15 +586,15 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
ChunkRange rangeToBeMerged(chunk1.getMin(), chunk3.getMax());
Timestamp validAfter{100, 0};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunksMerge(operationContext(),
- _nss1,
- collEpoch,
- collTimestamp,
- collUuid,
- rangeToBeMerged,
- _shardId,
- validAfter));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunksMerge(operationContext(),
+ _nss1,
+ collEpoch,
+ collTimestamp,
+ collUuid,
+ rangeToBeMerged,
+ _shardId,
+ validAfter));
const auto query = BSON(ChunkType::collectionUUID() << collUuid);
auto findResponse = uassertStatusOK(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index 32c5ced57b4..4c963a28ee9 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -105,8 +105,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
splitPoints,
"shard0000",
false /* fromChunkSplitter*/));
- auto collVersion = ChunkVersion::parse(versions["collectionVersion"]);
- auto shardVersion = ChunkVersion::parse(versions["shardVersion"]);
+ auto collVersion = versions.collectionVersion;
+ auto shardVersion = versions.shardVersion;
ASSERT_TRUE(origVersion.isOlderThan(shardVersion));
ASSERT_EQ(collVersion, shardVersion);
@@ -181,15 +181,15 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
setupCollection(nss, _keyPattern, {chunk});
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- nss,
- collEpoch,
- collTimestamp,
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000",
- false /* fromChunkSplitter*/));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ collTimestamp,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000",
+ false /* fromChunkSplitter*/));
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
@@ -278,15 +278,15 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
setupCollection(nss, _keyPattern, {chunk, chunk2});
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- nss,
- collEpoch,
- collTimestamp,
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000",
- false /* fromChunkSplitter*/));
+ uassertStatusOK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ nss,
+ collEpoch,
+ collTimestamp,
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000",
+ false /* fromChunkSplitter*/));
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus =
@@ -420,7 +420,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
splitPoints,
"shard0000",
false /* fromChunkSplitter*/);
- ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus);
+ ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus.getStatus());
};
test(_nss2, Timestamp(42));