summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2020-09-03 22:36:25 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-04 03:09:15 +0000
commit74756b18f43407e32496b600f2466457c59cb26a (patch)
tree2b04adfd957d3a451c3f285c2efb870f043b1cad
parent1bb2d6e2943d24ed3eef9cc9123fdfdd60e63562 (diff)
downloadmongo-74756b18f43407e32496b600f2466457c59cb26a.tar.gz
SERVER-50288 Return collection version on split and merge commands
-rw-r--r--src/mongo/db/s/config/configsvr_split_chunk_command.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h32
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp38
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp37
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp30
5 files changed, 86 insertions, 53 deletions
diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
index 7d51a5d3d44..712fdf5af9d 100644
--- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp
@@ -111,7 +111,7 @@ public:
auto parsedRequest = uassertStatusOK(SplitChunkRequest::parseFromConfigCommand(cmdObj));
- Status splitChunkResult =
+ auto splitChunkResult =
ShardingCatalogManager::get(opCtx)->commitChunkSplit(opCtx,
parsedRequest.getNamespace(),
parsedRequest.getEpoch(),
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index e39e307d62c..16772155643 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -189,26 +189,34 @@ public:
/**
* Updates metadata in the config.chunks collection to show the given chunk as split into
* smaller chunks at the specified split points.
+ *
+ * Returns a BSON object with the newly produced chunk versions after the migration:
+ * - shardVersion - The new shard version of the source shard
+ * - collectionVersion - The new collection version after the commit
*/
- Status commitChunkSplit(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const ChunkRange& range,
- const std::vector<BSONObj>& splitPoints,
- const std::string& shardName);
+ StatusWith<BSONObj> commitChunkSplit(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const ChunkRange& range,
+ const std::vector<BSONObj>& splitPoints,
+ const std::string& shardName);
/**
* Updates metadata in the config.chunks collection so the chunks with given boundaries are seen
* merged into a single larger chunk.
* If 'validAfter' is not set, this means the commit request came from an older server version,
* which is not history-aware.
+ *
+ * Returns a BSON object with the newly produced chunk versions after the migration:
+ * - shardVersion - The new shard version of the source shard
+ * - collectionVersion - The new collection version after the commit
*/
- Status commitChunkMerge(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const std::vector<BSONObj>& chunkBoundaries,
- const std::string& shardName,
- const boost::optional<Timestamp>& validAfter);
+ StatusWith<BSONObj> commitChunkMerge(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const std::vector<BSONObj>& chunkBoundaries,
+ const std::string& shardName,
+ const boost::optional<Timestamp>& validAfter);
/**
* Updates metadata in config.chunks collection to show the given chunk in its new shard.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 169ed795d8d..204d8377764 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -358,12 +358,13 @@ BSONObj getShardAndCollectionVersion(OperationContext* opCtx,
} // namespace
-Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const ChunkRange& range,
- const std::vector<BSONObj>& splitPoints,
- const std::string& shardName) {
+StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const ChunkRange& range,
+ const std::vector<BSONObj>& splitPoints,
+ const std::string& shardName) {
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
@@ -557,15 +558,16 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
}
}
- return Status::OK();
+ return getShardAndCollectionVersion(opCtx, nss, ShardId(shardName));
}
-Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
- const NamespaceString& nss,
- const OID& requestEpoch,
- const std::vector<BSONObj>& chunkBoundaries,
- const std::string& shardName,
- const boost::optional<Timestamp>& validAfter) {
+StatusWith<BSONObj> ShardingCatalogManager::commitChunkMerge(
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& requestEpoch,
+ const std::vector<BSONObj>& chunkBoundaries,
+ const std::string& shardName,
+ const boost::optional<Timestamp>& validAfter) {
// This method must never be called with empty chunks to merge
invariant(!chunkBoundaries.empty());
@@ -608,7 +610,13 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
// Check if the chunk(s) have already been merged. If so, return success.
auto minChunkOnDisk = uassertStatusOK(_findChunkOnConfig(opCtx, nss, chunkBoundaries.front()));
if (minChunkOnDisk.getMax().woCompare(chunkBoundaries.back()) == 0) {
- return Status::OK();
+ auto replyWithVersions = getShardAndCollectionVersion(opCtx, nss, ShardId(shardName));
+ // Makes sure that the last thing we read in getCurrentChunk and
+ // getShardAndCollectionVersion gets majority written before to return from this command,
+ // otherwise next RoutingInfo cache refresh from the shard may not see those newest
+ // information.
+ repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
+ return replyWithVersions;
}
// Build chunks to be merged
@@ -678,7 +686,7 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
ShardingLogging::get(opCtx)->logChange(
opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions());
- return Status::OK();
+ return getShardAndCollectionVersion(opCtx, nss, ShardId(shardName));
}
StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 3808f33e337..05478c71aa6 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -38,6 +38,7 @@
namespace mongo {
namespace {
+using unittest::assertGet;
const NamespaceString kNamespace("TestDB.TestColl");
@@ -72,13 +73,24 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
Timestamp validAfter{100, 0};
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- kNamespace,
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000",
- validAfter));
+ auto versions = assertGet(ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ chunkBoundaries,
+ "shard0000",
+ validAfter));
+
+ auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
+ auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
+
+ ASSERT_GT(shardVersion, origVersion);
+ ASSERT_EQ(collVersion, shardVersion);
+
+ // Check for increment on mergedChunk's minor version
+ auto expectedShardVersion = ChunkVersion(
+ origVersion.majorVersion(), origVersion.minorVersion() + 1, origVersion.epoch());
+ ASSERT_EQ(expectedShardVersion, shardVersion);
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -99,11 +111,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
- {
- // Check for increment on mergedChunk's minor version
- ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
- ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
- }
+ // Check that the shard version returned by the merge matches the CSRS one
+ ASSERT_EQ(shardVersion, mergedChunk.getVersion());
// Make sure history is there
ASSERT_EQ(1UL, mergedChunk.getHistory().size());
@@ -371,9 +380,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
chunkBoundaries,
"shard0000",
validAfter);
- // TODO SERVER-50288 Return collection version on split and merge commands
- // Check the returned shard version instead of the error code
- ASSERT_EQ(50577, mergeStatus.code());
+ ASSERT_NOT_OK(mergeStatus);
}
TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index efb4f446a9e..4bb7ad1ec6d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -37,6 +37,7 @@
namespace mongo {
namespace {
+using unittest::assertGet;
const NamespaceString kNamespace("TestDB", "TestColl");
@@ -63,13 +64,24 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
setupChunks({chunk});
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkSplit(operationContext(),
- kNamespace,
- origVersion.epoch(),
- ChunkRange(chunkMin, chunkMax),
- splitPoints,
- "shard0000"));
+ auto versions = assertGet(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ splitPoints,
+ "shard0000"));
+ auto collVersion = assertGet(ChunkVersion::parseWithField(versions, "collectionVersion"));
+ auto shardVersion = assertGet(ChunkVersion::parseWithField(versions, "shardVersion"));
+
+ ASSERT_GT(shardVersion, origVersion);
+ ASSERT_EQ(collVersion, shardVersion);
+
+ // Check for increment on mergedChunk's minor version
+ auto expectedShardVersion = ChunkVersion(
+ origVersion.majorVersion(), origVersion.minorVersion() + 2, origVersion.epoch());
+ ASSERT_EQ(expectedShardVersion, shardVersion);
+ ASSERT_EQ(shardVersion, collVersion);
// First chunkDoc should have range [chunkMin, chunkSplitPoint]
auto chunkDocStatus = getChunkDoc(operationContext(), chunkMin);
@@ -296,9 +308,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
ChunkRange(chunkMin, chunkMax),
splitPoints,
"shard0000");
- // TODO SERVER-50288 Return collection version on split and merge commands
- // Check the returned shard version instead of the error code
- ASSERT_EQ(50577, splitStatus.code());
+ ASSERT_NOT_OK(splitStatus);
}
TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {