summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Gratzer <simon.gratzer@mongodb.com>2021-05-18 20:59:17 +0200
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-06-01 13:00:09 +0000
commit727cb006bc1723840d5cf98775d56f2b0eaca8a3 (patch)
treeea85c5c62fca22d7b41cb987e88a1c9b8e5d23be
parent94c7512afa2ef51609202d68324624356fa06b5d (diff)
downloadmongo-727cb006bc1723840d5cf98775d56f2b0eaca8a3.tar.gz
SERVER-57057 Reduce routing info refreshes on the mergeChunks path (BACKPORT-9358)
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp13
-rw-r--r--src/mongo/s/catalog_cache.cpp11
-rw-r--r--src/mongo/s/catalog_cache.h7
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp13
4 files changed, 36 insertions, 8 deletions
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 0853213a910..615b2bdac9d 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
@@ -92,11 +93,19 @@ void mergeChunks(OperationContext* opCtx,
// We now have the collection distributed lock, refresh metadata to latest version and sanity
// check
- forceShardFilteringMetadataRefresh(opCtx, nss, true /* forceRefreshFromThisThread */);
+ const bool isVersioned = OperationShardingState::isOperationVersioned(opCtx);
+ if (!isVersioned) {
+ forceShardFilteringMetadataRefresh(opCtx, nss, true /* forceRefreshFromThisThread */);
+ }
const auto collDesc = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getCollectionDescription();
+ auto css = CollectionShardingState::get(opCtx, nss);
+ // If there is a version attached to the OperationContext, validate it
+ if (isVersioned) {
+ css->checkShardVersionOrThrow(opCtx);
+ }
+ return css->getCollectionDescription();
}();
uassert(ErrorCodes::StaleEpoch,
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 74d481b7b8b..e8ce4063e80 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -339,6 +339,17 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutin
return routingInfoStatus;
}
+StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutingInfo(
+ OperationContext* opCtx, const NamespaceString& nss) {
+ auto routingInfoStatus = _getCollectionRoutingInfoAt(opCtx, nss, boost::none).statusWithInfo;
+ if (routingInfoStatus.isOK() && !routingInfoStatus.getValue().cm()) {
+ return {ErrorCodes::NamespaceNotSharded,
+ str::stream() << "Collection " << nss.ns() << " is not sharded."};
+ }
+
+ return routingInfoStatus;
+}
+
void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const DatabaseVersion& databaseVersion) {
stdx::lock_guard<Latch> lg(_mutex);
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 616fb8312be..78ded1c4016 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -196,6 +196,13 @@ public:
OperationContext* opCtx, const NamespaceString& nss);
/**
+ * Same as getCollectionRoutingInfoWithRefresh above, but in addition returns a
+ * NamespaceNotSharded error if the collection is not sharded.
+ */
+ StatusWith<CachedCollectionRoutingInfo> getShardedCollectionRoutingInfo(
+ OperationContext* opCtx, const NamespaceString& nss);
+
+ /**
* Non-blocking method that marks the current cached database entry as needing refresh if the
* entry's databaseVersion matches 'databaseVersion'.
*
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 26b9435f91a..a9deb4c4a9d 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -103,11 +103,6 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto routingInfo = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
- const auto cm = routingInfo.cm();
-
vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
return false;
@@ -136,6 +131,10 @@ public:
return false;
}
+ auto routingInfo = uassertStatusOK(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss));
+ const auto cm = routingInfo.cm();
+
if (!cm->getShardKeyPattern().isShardKey(minKey) ||
!cm->getShardKeyPattern().isShardKey(maxKey)) {
errmsg = str::stream()
@@ -149,6 +148,7 @@ public:
maxKey = cm->getShardKeyPattern().normalizeShardKey(maxKey);
const auto firstChunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
+ ChunkVersion shardVersion = cm->getVersion(firstChunk.getShardId());
BSONObjBuilder remoteCmdObjB;
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::nsField()]);
@@ -158,7 +158,8 @@ public:
Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(),
firstChunk.getShardId().toString());
- remoteCmdObjB.append("epoch", cm->getVersion().epoch());
+ remoteCmdObjB.append("epoch", shardVersion.epoch());
+ shardVersion.appendToCommand(&remoteCmdObjB);
BSONObj remoteResult;