summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-08-12 08:49:52 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-08-12 09:34:55 +0000
commitc13c8d8997a25915f52afbd2a392c468fc63376a (patch)
treed3caacfccc6701e45344a373560ff3ad090e722a /src/mongo/db
parent412f7c46764ea072c6bd00f6d87197ae54922ab9 (diff)
downloadmongo-c13c8d8997a25915f52afbd2a392c468fc63376a.tar.gz
SERVER-68485 Merge and Split commands should not use the CSR to check the shard version
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/chunk_operation_precondition_checks.cpp180
-rw-r--r--src/mongo/db/s/chunk_operation_precondition_checks.h93
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp106
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp16
-rw-r--r--src/mongo/db/s/operation_sharding_state.h8
-rw-r--r--src/mongo/db/s/shardsvr_merge_chunks_command.cpp60
-rw-r--r--src/mongo/db/s/shardsvr_split_chunk_command.cpp35
8 files changed, 328 insertions, 171 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 38b844a09a3..38913ff31ad 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -49,6 +49,7 @@ env.Library(
'active_migrations_registry.cpp',
'auto_split_vector.cpp',
'chunk_move_write_concern_options.cpp',
+ 'chunk_operation_precondition_checks.cpp',
'chunk_split_state_driver.cpp',
'chunk_splitter.cpp',
'collection_critical_section_document.idl',
diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.cpp b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
new file mode 100644
index 00000000000..da8f5d890a3
--- /dev/null
+++ b/src/mongo/db/s/chunk_operation_precondition_checks.cpp
@@ -0,0 +1,180 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/operation_sharding_state.h"
+#include "mongo/db/s/sharding_state.h"
+
+namespace mongo {
+
+CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& expectedEpoch,
+ const boost::optional<Timestamp>& expectedTimestamp) {
+ AutoGetCollection collection(opCtx, nss, MODE_IS);
+
+ const auto shardId = ShardingState::get(opCtx)->shardId();
+ auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
+ const auto csrLock = CollectionShardingRuntime::CSRLock::lockExclusive(opCtx, csr);
+ auto optMetadata = csr->getCurrentMetadataIfKnown();
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ boost::none /* wantedVersion */,
+ shardId),
+ str::stream() << "Collection " << nss.ns() << " needs to be recovered",
+ optMetadata);
+
+ auto metadata = *optMetadata;
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ ChunkVersion::UNSHARDED() /* wantedVersion */,
+ shardId),
+ str::stream() << "Collection " << nss.ns() << " is not sharded",
+ metadata.isSharded());
+
+ uassert(ErrorCodes::NamespaceNotFound,
+ "The collection was not found locally even though it is marked as sharded.",
+ collection);
+
+ const auto shardVersion = metadata.getShardVersion();
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Collection " << nss.ns()
+ << " has changed since operation was sent (sent epoch: " << expectedEpoch
+ << ", current epoch: " << shardVersion.epoch() << ")",
+ expectedEpoch == shardVersion.epoch() &&
+ (!expectedTimestamp || expectedTimestamp == shardVersion.getTimestamp()));
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Shard does not contain any chunks for collection.",
+ shardVersion.majorVersion() > 0);
+
+ return metadata;
+}
+
+void checkShardKeyPattern(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange) {
+ const auto shardId = ShardingState::get(opCtx)->shardId();
+ const auto& keyPattern = metadata.getKeyPattern();
+ const auto shardVersion = metadata.getShardVersion();
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "The range " << chunkRange.toString()
+ << " is not valid for collection " << nss.ns() << " with key pattern "
+ << keyPattern.toString(),
+ metadata.isValidKey(chunkRange.getMin()) && metadata.isValidKey(chunkRange.getMax()));
+}
+
+void checkChunkMatchesRange(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange) {
+ const auto shardId = ShardingState::get(opCtx)->shardId();
+ const auto shardVersion = metadata.getShardVersion();
+
+ ChunkType existingChunk;
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Range with bounds " << chunkRange.toString()
+ << " is not owned by this shard.",
+ metadata.getNextChunk(chunkRange.getMin(), &existingChunk) &&
+ existingChunk.getMin().woCompare(chunkRange.getMin()) == 0);
+
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Chunk bounds " << chunkRange.toString() << " do not exist.",
+ existingChunk.getRange() == chunkRange);
+}
+
+void checkRangeWithinChunk(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange) {
+ const auto shardId = ShardingState::get(opCtx)->shardId();
+ const auto shardVersion = metadata.getShardVersion();
+
+ ChunkType existingChunk;
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Range with bounds " << chunkRange.toString()
+ << " is not contained within a chunk owned by this shard.",
+ metadata.getNextChunk(chunkRange.getMin(), &existingChunk) &&
+ existingChunk.getRange().covers(chunkRange));
+}
+
+void checkRangeOwnership(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange) {
+ const auto shardId = ShardingState::get(opCtx)->shardId();
+ const auto shardVersion = metadata.getShardVersion();
+
+ ChunkType existingChunk;
+ BSONObj minKey = chunkRange.getMin();
+ do {
+ uassert(StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Range with bounds " << chunkRange.toString()
+ << " is not owned by this shard.",
+ metadata.getNextChunk(minKey, &existingChunk) &&
+ existingChunk.getMin().woCompare(minKey) == 0);
+ minKey = existingChunk.getMax();
+ } while (existingChunk.getMax().woCompare(chunkRange.getMax()) < 0);
+ uassert(
+ StaleConfigInfo(nss,
+ ChunkVersion::IGNORED() /* receivedVersion */,
+ shardVersion /* wantedVersion */,
+ shardId),
+ str::stream() << "Shard does not contain a sequence of chunks that exactly fills the range "
+ << chunkRange.toString(),
+ existingChunk.getMax().woCompare(chunkRange.getMax()) == 0);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.h b/src/mongo/db/s/chunk_operation_precondition_checks.h
new file mode 100644
index 00000000000..4720091c1c2
--- /dev/null
+++ b/src/mongo/db/s/chunk_operation_precondition_checks.h
@@ -0,0 +1,93 @@
+/**
+ * Copyright (C) 2022-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+#include "mongo/bson/oid.h"
+#include "mongo/bson/timestamp.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/s/catalog/type_chunk.h"
+
+namespace mongo {
+/**
+ * These functions should only be used by ddl operations such as split, merge, and split vector that
+ * do not use the shard version protocol and instead perform manual checks.
+ */
+
+/**
+ * Checks that the metadata for the collection is present in the CSR, that the collection is sharded
+ * according to that metadata, and that the expected epoch and timestamp match what is present in
+ * the CSR. Returns the collection metadata.
+ *
+ * Throws StaleShardVersion otherwise.
+ */
+CollectionMetadata checkCollectionIdentity(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const OID& expectedEpoch,
+ const boost::optional<Timestamp>& expectedTimestamp);
+
+/**
+ * Checks that the chunk range matches the shard key pattern in the metadata.
+ *
+ * Throws StaleShardVersion otherwise.
+ */
+void checkShardKeyPattern(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange);
+
+/**
+ * Checks that there is exactly one chunk owned by this shard whose bounds equal chunkRange.
+ *
+ * Thows StaleShardVersion otherwise.
+ */
+void checkChunkMatchesRange(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange);
+
+/**
+ * Checks that the range is contained within a single chunk. The bounds of the chunk do not
+ * necessarily have to be the same as the bounds of the chunk.
+ *
+ * Thows StaleShardVersion otherwise.
+ */
+void checkRangeWithinChunk(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange);
+
+/**
+ * Checks that there is a series of chunks owned by this shard that make up the range in chunkRange.
+ *
+ * Throws StaleShardVersion otherwise.
+ */
+void checkRangeOwnership(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ const ChunkRange& chunkRange);
+
+} // namespace mongo
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index dd5c1b73c83..80ef301a712 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/s/chunk_operation_precondition_checks.h"
#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_coordinator.h"
@@ -189,32 +190,13 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
const auto [collectionMetadata, collectionUUID] = [&] {
UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
AutoGetCollection autoColl(_opCtx, nss(), MODE_IS);
- uassert(ErrorCodes::InvalidOptions,
- "cannot move chunks for a collection that doesn't exist",
- autoColl.getCollection());
-
- UUID collectionUUID = autoColl.getCollection()->uuid();
auto* const csr = CollectionShardingRuntime::get(_opCtx, nss());
const auto csrLock = CollectionShardingRuntime::CSRLock::lockExclusive(_opCtx, csr);
- auto optMetadata = csr->getCurrentMetadataIfKnown();
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- boost::none /* wantedVersion */,
- shardId,
- boost::none),
- "The collection's sharding state was cleared by a concurrent operation",
- optMetadata);
+ const auto metadata = checkCollectionIdentity(_opCtx, nss(), _args.getEpoch(), boost::none);
- auto& metadata = *optMetadata;
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- ChunkVersion::UNSHARDED() /* wantedVersion */,
- shardId,
- boost::none),
- "Cannot move chunks for an unsharded collection",
- metadata.isSharded());
+ UUID collectionUUID = autoColl.getCollection()->uuid();
// Atomically (still under the CSR lock held above) check whether migrations are allowed and
// register the MigrationSourceManager on the CSR. This ensures that interruption due to the
@@ -229,29 +211,6 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
return std::make_tuple(std::move(metadata), std::move(collectionUUID));
}();
- const auto collectionVersion = collectionMetadata.getCollVersion();
- const auto shardVersion = collectionMetadata.getShardVersion();
-
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- shardVersion /* wantedVersion */,
- shardId,
- boost::none),
- str::stream() << "cannot move chunk " << _args.toBSON({})
- << " because collection may have been dropped. "
- << "current epoch: " << collectionVersion.epoch()
- << ", cmd epoch: " << _args.getEpoch(),
- _args.getEpoch() == collectionVersion.epoch());
-
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- shardVersion /* wantedVersion */,
- shardId,
- boost::none),
- str::stream() << "cannot move chunk " << _args.toBSON({})
- << " because the shard doesn't contain any chunks",
- shardVersion.majorVersion() > 0);
-
// Compute the max bound in case only `min` is set (moveRange)
if (!_args.getMax().has_value()) {
// TODO SERVER-64926 do not assume min always present
@@ -269,61 +228,12 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
_moveTimingHelper.setMax(max);
}
- const auto& keyPattern = collectionMetadata.getKeyPattern();
- const bool validBounds = [&]() {
- // Return true if provided bounds are respecting the shard key format, false otherwise
- const auto nFields = keyPattern.nFields();
- if (nFields != (*_args.getMin()).nFields() || nFields != (*_args.getMax()).nFields()) {
- return false;
- }
+ checkShardKeyPattern(
+ _opCtx, nss(), collectionMetadata, ChunkRange(*_args.getMin(), *_args.getMax()));
+ checkRangeWithinChunk(
+ _opCtx, nss(), collectionMetadata, ChunkRange(*_args.getMin(), *_args.getMax()));
- BSONObjIterator keyPatternIt(keyPattern), minIt(*_args.getMin()), maxIt(*_args.getMax());
-
- while (keyPatternIt.more()) {
- const auto keyPatternField = keyPatternIt.next().fieldNameStringData();
- const auto minField = minIt.next().fieldNameStringData();
- const auto maxField = maxIt.next().fieldNameStringData();
-
- if (keyPatternField != minField || keyPatternField != maxField) {
- return false;
- }
- }
-
- return true;
- }();
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- shardVersion /* wantedVersion */,
- shardId,
- boost::none),
- str::stream() << "Range bounds do not match the shard key pattern. KeyPattern: "
- << keyPattern.toString() << " - Bounds: "
- << ChunkRange(*_args.getMin(), *_args.getMax()).toString() << ".",
- validBounds);
-
- ChunkType existingChunk;
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- shardVersion /* wantedVersion */,
- shardId,
- boost::none),
- str::stream() << "Range with bounds "
- << ChunkRange(*_args.getMin(), *_args.getMax()).toString()
- << " is not owned by this shard.",
- collectionMetadata.getNextChunk(*_args.getMin(), &existingChunk));
-
- uassert(StaleConfigInfo(nss(),
- ChunkVersion::IGNORED() /* receivedVersion */,
- shardVersion /* wantedVersion */,
- shardId,
- boost::none),
- str::stream() << "Unable to move range with bounds "
- << ChunkRange(*_args.getMin(), *_args.getMax()).toString()
- << " . The closest owned chunk is "
- << ChunkRange(existingChunk.getMin(), existingChunk.getMax()).toString(),
- existingChunk.getRange().covers(ChunkRange(*_args.getMin(), *_args.getMax())));
-
- _collectionEpoch = collectionVersion.epoch();
+ _collectionEpoch = _args.getEpoch();
_collectionUUID = collectionUUID;
_chunkVersion = collectionMetadata.getChunkManager()
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index 6fd4ce5fc4f..0621a5bd87a 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -87,6 +87,22 @@ void OperationShardingState::setShardRole(OperationContext* opCtx,
}
}
+void OperationShardingState::unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(
+ OperationContext* opCtx, const NamespaceString& nss) {
+ auto& oss = OperationShardingState::get(opCtx);
+
+ auto it = oss._shardVersions.find(nss.ns());
+ if (it != oss._shardVersions.end()) {
+ auto& tracker = it->second;
+ tassert(6848500,
+ "DDL operation should not recursively use the shard role",
+ --tracker.recursion == 0);
+ if (tracker.recursion == 0)
+ oss._shardVersions.erase(it);
+ }
+ return;
+}
+
boost::optional<ShardVersion> OperationShardingState::getShardVersion(const NamespaceString& nss) {
const auto it = _shardVersions.find(nss.ns());
if (it != _shardVersions.end()) {
diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h
index dff0c8df630..695b33f4927 100644
--- a/src/mongo/db/s/operation_sharding_state.h
+++ b/src/mongo/db/s/operation_sharding_state.h
@@ -117,6 +117,14 @@ public:
const boost::optional<DatabaseVersion>& dbVersion);
/**
+ * Used to clear the shard role from the opCtx for ddl operations which are not required to send
+ * the index version (ex. split, merge). These operations will do their own metadata checks
+ * rather than us the collection sharding runtime checks.
+ */
+ static void unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(
+ OperationContext* opCtx, const NamespaceString& nss);
+
+ /**
* Returns the shard version (i.e. maximum chunk version) of a namespace being used by the
* operation. Documents in chunks which did not belong on this shard at this shard version
* will be filtered out.
diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
index 4baeca09bb6..7c844d3444f 100644
--- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
+++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/field_parser.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/active_migrations_registry.h"
+#include "mongo/db/s/chunk_operation_precondition_checks.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
@@ -90,58 +91,17 @@ void mergeChunks(OperationContext* opCtx,
uassertStatusOK(ActiveMigrationsRegistry::get(opCtx).registerSplitOrMergeChunk(
opCtx, nss, ChunkRange(minKey, maxKey))));
- auto& oss = OperationShardingState::get(opCtx);
- if (!oss.getShardVersion(nss)) {
- onShardVersionMismatch(opCtx, nss, boost::none);
- }
-
- const auto metadataBeforeMerge = [&] {
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- auto csr = CollectionShardingRuntime::get(opCtx, nss);
-
- // If there is a version attached to the OperationContext, validate it
- if (oss.getShardVersion(nss)) {
- csr->checkShardVersionOrThrow(opCtx);
- } else {
- auto optMetadata = csr->getCurrentMetadataIfKnown();
-
- ShardId shardId = ShardingState::get(opCtx)->shardId();
-
- uassert(StaleConfigInfo(nss,
- ChunkVersion::IGNORED() /* receivedVersion */,
- boost::none /* wantedVersion */,
- shardId),
- str::stream() << "Collection " << nss.ns() << " needs to be recovered",
- optMetadata);
- uassert(StaleConfigInfo(nss,
- ChunkVersion::IGNORED() /* receivedVersion */,
- ChunkVersion::UNSHARDED() /* wantedVersion */,
- shardId),
- str::stream() << "Collection " << nss.ns() << " is not sharded",
- optMetadata->isSharded());
- const auto epoch = optMetadata->getShardVersion().epoch();
- uassert(StaleConfigInfo(nss,
- ChunkVersion::IGNORED() /* receivedVersion */,
- optMetadata->getShardVersion() /* wantedVersion */,
- shardId),
- str::stream() << "Could not merge chunks because collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: "
- << expectedEpoch << ", current epoch: " << epoch << ")",
- epoch == expectedEpoch &&
- (!expectedTimestamp ||
- optMetadata->getShardVersion().getTimestamp() == expectedTimestamp));
- }
-
- return *csr->getCurrentMetadataIfKnown();
- }();
-
ChunkRange chunkRange(minKey, maxKey);
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "could not merge chunks, the range " << chunkRange.toString()
- << " is not valid for collection " << nss.ns() << " with key pattern "
- << metadataBeforeMerge.getKeyPattern().toString(),
- metadataBeforeMerge.isValidKey(minKey) && metadataBeforeMerge.isValidKey(maxKey));
+ // Check that the preconditions for merge chunks are met and throw StaleShardVersion otherwise.
+ const auto metadataBeforeMerge = [&]() {
+ OperationShardingState::unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(
+ opCtx, nss);
+ const auto metadata = checkCollectionIdentity(opCtx, nss, expectedEpoch, expectedTimestamp);
+ checkShardKeyPattern(opCtx, nss, metadata, chunkRange);
+ checkRangeOwnership(opCtx, nss, metadata, chunkRange);
+ return metadata;
+ }();
auto cmdResponse = commitMergeOnConfigServer(
opCtx, nss, expectedEpoch, expectedTimestamp, chunkRange, metadataBeforeMerge);
diff --git a/src/mongo/db/s/shardsvr_split_chunk_command.cpp b/src/mongo/db/s/shardsvr_split_chunk_command.cpp
index 16a79ae12a3..bdcae8af9ad 100644
--- a/src/mongo/db/s/shardsvr_split_chunk_command.cpp
+++ b/src/mongo/db/s/shardsvr_split_chunk_command.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/commands.h"
+#include "mongo/db/s/chunk_operation_precondition_checks.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
@@ -97,29 +98,6 @@ public:
const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj));
- // throw if the provided shard version is too old
- {
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- auto csr = CollectionShardingRuntime::get(opCtx, nss);
-
- // pre 5.1 client will send the collection version, which will make
- // checkShardVersionOrThrow fail. TODO remove try-catch in 6.1
- try {
- csr->checkShardVersionOrThrow(opCtx);
- } catch (const ExceptionFor<ErrorCodes::StaleConfig>& e) {
- do {
- if (auto staleInfo = e.extraInfo<StaleConfigInfo>()) {
- if (staleInfo->getVersionWanted() &&
- staleInfo->getVersionWanted()->isOlderThan(
- staleInfo->getVersionReceived())) {
- break;
- }
- }
- throw; // cause a refresh
- } while (false);
- }
- }
-
// Check whether parameters passed to splitChunk are sound
BSONObj keyPatternObj;
{
@@ -178,6 +156,17 @@ public:
return status.isOK() && field;
}();
+ // Check that the preconditions for split chunk are met and throw StaleShardVersion
+ // otherwise.
+ {
+ OperationShardingState::
+ unsetShardRoleForLegacyDDLOperationsSentWithShardVersionIfNeeded(opCtx, nss);
+ const auto metadata = checkCollectionIdentity(
+ opCtx, nss, expectedCollectionEpoch, expectedCollectionTimestamp);
+ checkShardKeyPattern(opCtx, nss, metadata, chunkRange);
+ checkChunkMatchesRange(opCtx, nss, metadata, chunkRange);
+ }
+
auto topChunk = uassertStatusOK(splitChunk(opCtx,
nss,
keyPatternObj,