summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp15
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp11
2 files changed, 17 insertions, 9 deletions
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 9c5b4d3a9c6..a48107135a5 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/dist_lock_manager.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/vector_clock.h"
@@ -196,13 +197,19 @@ void mergeChunks(OperationContext* opCtx,
<< " to merge chunks in [" << redact(minKey) << ", " << redact(maxKey)
<< ")");
- // We now have the collection distributed lock, refresh metadata to latest version and sanity
- // check
- onShardVersionMismatch(opCtx, nss, boost::none);
+ const bool isVersioned = OperationShardingState::isOperationVersioned(opCtx);
+ if (!isVersioned) {
+ onShardVersionMismatch(opCtx, nss, boost::none);
+ }
const auto metadataBeforeMerge = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingRuntime::get(opCtx, nss)->getCurrentMetadataIfKnown();
+ auto csr = CollectionShardingRuntime::get(opCtx, nss);
+ // If there is a version attached to the OperationContext, validate it
+ if (isVersioned) {
+ csr->checkShardVersionOrThrow(opCtx);
+ }
+ return csr->getCurrentMetadataIfKnown();
}();
uassert(ErrorCodes::StaleEpoch,
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 531aa1ab41e..5d826c9c0cf 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -103,10 +103,6 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
-
vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
return false;
@@ -135,6 +131,9 @@ public:
return false;
}
+ auto const cm =
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss);
+
if (!cm.getShardKeyPattern().isShardKey(minKey) ||
!cm.getShardKeyPattern().isShardKey(maxKey)) {
errmsg = str::stream()
@@ -148,6 +147,7 @@ public:
maxKey = cm.getShardKeyPattern().normalizeShardKey(maxKey);
const auto firstChunk = cm.findIntersectingChunkWithSimpleCollation(minKey);
+ ChunkVersion shardVersion = cm.getVersion(firstChunk.getShardId());
BSONObjBuilder remoteCmdObjB;
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::nsField()]);
@@ -157,7 +157,8 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(),
firstChunk.getShardId().toString());
- remoteCmdObjB.append("epoch", cm.getVersion().epoch());
+ remoteCmdObjB.append("epoch", shardVersion.epoch());
+ shardVersion.appendToCommand(&remoteCmdObjB);
BSONObj remoteResult;