summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorRui Liu <rui.liu@mongodb.com>2022-04-07 13:20:41 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-07 14:18:34 +0000
commit1d14f3fa16533fe806027ab79e1caec4acd7b5ae (patch)
treea85cd18253b85de427511d14adc3528416ee2a95 /src/mongo
parent3567535c340e69feeadbb0866f8019a6d84e355c (diff)
downloadmongo-1d14f3fa16533fe806027ab79e1caec4acd7b5ae.tar.gz
SERVER-65227 Pause migration and calculate shards after pause in collMod coordinator
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/collmod_coordinator.cpp157
-rw-r--r--src/mongo/db/s/collmod_coordinator.h21
-rw-r--r--src/mongo/db/s/collmod_coordinator_document.idl26
-rw-r--r--src/mongo/db/s/shardsvr_collmod_participant_command.cpp12
4 files changed, 108 insertions, 108 deletions
diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp
index 7b359f0844a..3ad2fe44440 100644
--- a/src/mongo/db/s/collmod_coordinator.cpp
+++ b/src/mongo/db/s/collmod_coordinator.cpp
@@ -71,29 +71,6 @@ bool hasTimeSeriesGranularityUpdate(const CollModRequest& request) {
return request.getTimeseries() && request.getTimeseries()->getGranularity();
}
-CollModCollectionInfo getCollModCollectionInfo(OperationContext* opCtx,
- const NamespaceString& nss) {
- CollModCollectionInfo info;
- info.setTimeSeriesOptions(timeseries::getTimeseriesOptions(opCtx, nss, true));
- info.setNsForTargetting(info.getTimeSeriesOptions() ? nss.makeTimeseriesBucketsNamespace()
- : nss);
- info.setIsSharded(isShardedColl(opCtx, info.getNsForTargetting()));
- if (info.getIsSharded()) {
- const auto chunkManager =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
- opCtx, info.getNsForTargetting()));
- info.setPrimaryShard(chunkManager.dbPrimary());
- tassert(8423352,
- "Unexpected unsharded collection info found on local catalog cache during collMod",
- chunkManager.isSharded());
- std::set<ShardId> shardIdsSet;
- chunkManager.getAllShardIds(&shardIdsSet);
- std::vector<ShardId> shardIdsVec{shardIdsSet.begin(), shardIdsSet.end()};
- info.setShardsOwningChunks(shardIdsVec);
- }
- return info;
-}
-
} // namespace
CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service,
@@ -169,6 +146,35 @@ void CollModCoordinator::_performNoopRetryableWriteOnParticipants(
opCtx, shardsAndConfigsvr, getCurrentSession(_doc), executor);
}
+void CollModCoordinator::_saveCollectionInfoOnCoordinatorIfNecessary(OperationContext* opCtx) {
+ if (!_collInfo) {
+ CollectionInfo info;
+ info.timeSeriesOptions = timeseries::getTimeseriesOptions(opCtx, nss(), true);
+ info.nsForTargeting =
+ info.timeSeriesOptions ? nss().makeTimeseriesBucketsNamespace() : nss();
+ info.isSharded = isShardedColl(opCtx, info.nsForTargeting);
+ _collInfo = std::move(info);
+ }
+}
+
+void CollModCoordinator::_saveShardingInfoOnCoordinatorIfNecessary(OperationContext* opCtx) {
+ tassert(
+ 6522700, "Sharding information must be gathered after collection information", _collInfo);
+ if (!_shardingInfo && _collInfo->isSharded) {
+ ShardingInfo info;
+ const auto chunkManager =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
+ opCtx, _collInfo->nsForTargeting));
+
+ info.primaryShard = chunkManager.dbPrimary();
+ std::set<ShardId> shardIdsSet;
+ chunkManager.getAllShardIds(&shardIdsSet);
+ std::vector<ShardId> shardIdsVec{shardIdsSet.begin(), shardIdsSet.end()};
+ info.shardsOwningChunks = std::move(shardIdsVec);
+ _shardingInfo = std::move(info);
+ }
+}
+
ExecutorFuture<void> CollModCoordinator::_runImpl(
std::shared_ptr<executor::ScopedTaskExecutor> executor,
const CancellationToken& token) noexcept {
@@ -181,6 +187,28 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
if (_doc.getPhase() > Phase::kUnset) {
_performNoopRetryableWriteOnParticipants(opCtx, **executor);
}
+
+ {
+ AutoGetCollection coll{
+ opCtx, nss(), MODE_IS, AutoGetCollectionViewMode::kViewsPermitted};
+ checkCollectionUUIDMismatch(
+ opCtx, nss(), *coll, _doc.getCollModRequest().getCollectionUUID());
+ }
+
+ _saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
+
+ auto isGranularityUpdate = hasTimeSeriesGranularityUpdate(_doc.getCollModRequest());
+ uassert(6201808,
+ "Cannot use time-series options for a non-timeseries collection",
+ _collInfo->timeSeriesOptions || !isGranularityUpdate);
+ if (isGranularityUpdate) {
+ uassert(ErrorCodes::InvalidOptions,
+ "Invalid transition for timeseries.granularity. Can only transition "
+ "from 'seconds' to 'minutes' or 'minutes' to 'hours'.",
+ timeseries::isValidTimeseriesGranularityTransition(
+ _collInfo->timeSeriesOptions->getGranularity(),
+ *_doc.getCollModRequest().getTimeseries()->getGranularity()));
+ }
})
.then(_executePhase(
Phase::kBlockShards,
@@ -191,51 +219,23 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
_doc = _updateSession(opCtx, _doc);
- {
- AutoGetCollection coll{
- opCtx, nss(), MODE_IS, AutoGetCollectionViewMode::kViewsPermitted};
- checkCollectionUUIDMismatch(
- opCtx, nss(), *coll, _doc.getCollModRequest().getCollectionUUID());
- }
+ _saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
- if (!_doc.getInfo()) {
- _doc.setInfo(getCollModCollectionInfo(opCtx, nss()));
+ if (_collInfo->isSharded) {
+ _doc.setCollUUID(
+ sharding_ddl_util::getCollectionUUID(opCtx, nss(), true /* allowViews */));
+ sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollUUID());
}
+ _saveShardingInfoOnCoordinatorIfNecessary(opCtx);
- auto isGranularityUpdate = hasTimeSeriesGranularityUpdate(_doc.getCollModRequest());
- uassert(6201808,
- "Cannot use time-series options for a non-timeseries collection",
- _doc.getInfo()->getTimeSeriesOptions() || !isGranularityUpdate);
- if (isGranularityUpdate) {
- uassert(ErrorCodes::InvalidOptions,
- "Invalid transition for timeseries.granularity. Can only transition "
- "from 'seconds' to 'minutes' or 'minutes' to 'hours'.",
- timeseries::isValidTimeseriesGranularityTransition(
- _doc.getInfo()->getTimeSeriesOptions()->getGranularity(),
- *_doc.getCollModRequest().getTimeseries()->getGranularity()));
-
- if (_doc.getInfo()->getIsSharded()) {
- tassert(6201805,
- "shardsOwningChunks should be set on state document for sharded "
- "collection",
- _doc.getInfo()->getShardsOwningChunks());
-
- _doc.setCollUUID(sharding_ddl_util::getCollectionUUID(
- opCtx, nss(), true /* allowViews */));
- sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollUUID());
-
- ShardsvrParticipantBlock blockCRUDOperationsRequest(
- _doc.getInfo()->getNsForTargetting());
- const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(
- blockCRUDOperationsRequest.toBSON({}));
- sharding_ddl_util::sendAuthenticatedCommandToShards(
- opCtx,
- nss().db(),
- cmdObj,
- *_doc.getInfo()->getShardsOwningChunks(),
- **executor);
- }
+ if (_collInfo->isSharded &&
+ hasTimeSeriesGranularityUpdate(_doc.getCollModRequest())) {
+ ShardsvrParticipantBlock blockCRUDOperationsRequest(_collInfo->nsForTargeting);
+ const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(
+ blockCRUDOperationsRequest.toBSON({}));
+ sharding_ddl_util::sendAuthenticatedCommandToShards(
+ opCtx, nss().db(), cmdObj, _shardingInfo->shardsOwningChunks, **executor);
}
}))
.then(_executePhase(
@@ -248,12 +248,13 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
getForwardableOpMetadata().setOn(opCtx);
_doc = _updateSession(opCtx, _doc);
- tassert(6201803, "collMod collection information should be set", _doc.getInfo());
- if (_doc.getInfo()->getIsSharded() && _doc.getInfo()->getTimeSeriesOptions() &&
+ _saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
+ _saveShardingInfoOnCoordinatorIfNecessary(opCtx);
+
+ if (_collInfo->isSharded && _collInfo->timeSeriesOptions &&
hasTimeSeriesGranularityUpdate(_doc.getCollModRequest())) {
- ConfigsvrCollMod request(_doc.getInfo()->getNsForTargetting(),
- _doc.getCollModRequest());
+ ConfigsvrCollMod request(_collInfo->nsForTargeting, _doc.getCollModRequest());
const auto cmdObj =
CommandHelpers::appendMajorityWriteConcern(request.toBSON({}));
@@ -274,27 +275,21 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
getForwardableOpMetadata().setOn(opCtx);
_doc = _updateSession(opCtx, _doc);
- tassert(6201804, "collMod collection information should be set", _doc.getInfo());
- if (_doc.getInfo()->getIsSharded()) {
- tassert(
- 6201806,
- "shardsOwningChunks should be set on state document for sharded collection",
- _doc.getInfo()->getShardsOwningChunks());
- tassert(6201807,
- "primaryShard should be set on state document for sharded collection",
- _doc.getInfo()->getPrimaryShard());
+ _saveCollectionInfoOnCoordinatorIfNecessary(opCtx);
+ _saveShardingInfoOnCoordinatorIfNecessary(opCtx);
+ if (_collInfo->isSharded) {
ShardsvrCollModParticipant request(nss(), _doc.getCollModRequest());
- bool needsUnblock = _doc.getInfo()->getTimeSeriesOptions() &&
+ bool needsUnblock = _collInfo->timeSeriesOptions &&
hasTimeSeriesGranularityUpdate(_doc.getCollModRequest());
request.setNeedsUnblock(needsUnblock);
std::vector<AsyncRequestsSender::Response> responses;
- auto shardsOwningChunks = *_doc.getInfo()->getShardsOwningChunks();
+ auto shardsOwningChunks = _shardingInfo->shardsOwningChunks;
auto primaryShardOwningChunk = std::find(shardsOwningChunks.begin(),
shardsOwningChunks.end(),
- _doc.getInfo()->getPrimaryShard());
+ _shardingInfo->primaryShard);
// A view definition will only be present on the primary shard. So we pass an
// addition 'performViewChange' flag only to the primary shard.
if (primaryShardOwningChunk != shardsOwningChunks.end()) {
@@ -304,7 +299,7 @@ ExecutorFuture<void> CollModCoordinator::_runImpl(
opCtx,
nss().db(),
CommandHelpers::appendMajorityWriteConcern(request.toBSON({})),
- {*_doc.getInfo()->getPrimaryShard()},
+ {_shardingInfo->primaryShard},
**executor);
responses.insert(
responses.end(), primaryResponse.begin(), primaryResponse.end());
diff --git a/src/mongo/db/s/collmod_coordinator.h b/src/mongo/db/s/collmod_coordinator.h
index 885551d4853..98243d8bdbb 100644
--- a/src/mongo/db/s/collmod_coordinator.h
+++ b/src/mongo/db/s/collmod_coordinator.h
@@ -59,6 +59,21 @@ public:
}
private:
+ struct CollectionInfo {
+ bool isSharded;
+ boost::optional<TimeseriesOptions> timeSeriesOptions;
+ // The targeting namespace can be different from the original namespace in some cases, like
+ // time-series collections.
+ NamespaceString nsForTargeting;
+ };
+
+ struct ShardingInfo {
+ // The primary shard for the collection, only set if the collection is sharded.
+ ShardId primaryShard;
+ // The shards owning chunks for the collection, only set if the collection is sharded.
+ std::vector<ShardId> shardsOwningChunks;
+ };
+
ShardingDDLCoordinatorMetadata const& metadata() const override {
return _doc.getShardingDDLCoordinatorMetadata();
}
@@ -88,9 +103,15 @@ private:
void _performNoopRetryableWriteOnParticipants(
OperationContext* opCtx, const std::shared_ptr<executor::TaskExecutor>& executor);
+ void _saveCollectionInfoOnCoordinatorIfNecessary(OperationContext* opCtx);
+
+ void _saveShardingInfoOnCoordinatorIfNecessary(OperationContext* opCtx);
+
BSONObj _initialState;
CollModCoordinatorDocument _doc;
boost::optional<BSONObj> _result;
+ boost::optional<CollectionInfo> _collInfo;
+ boost::optional<ShardingInfo> _shardingInfo;
};
} // namespace mongo
diff --git a/src/mongo/db/s/collmod_coordinator_document.idl b/src/mongo/db/s/collmod_coordinator_document.idl
index 2b7fe16579b..1f2bc418e34 100644
--- a/src/mongo/db/s/collmod_coordinator_document.idl
+++ b/src/mongo/db/s/collmod_coordinator_document.idl
@@ -51,28 +51,6 @@ enums:
kUpdateShards: "UpdateShards"
structs:
- CollModCollectionInfo:
- description: "Collection information that may affect collMod processing"
- generate_comparison_operators: false
- strict: false
- fields:
- timeSeriesOptions:
- type: TimeseriesOptions
- optional: true
- nsForTargetting:
- type: namespacestring
- description: "The targeting namespace that may be different from the original namespace in some cases, like time-series collections"
- isSharded:
- type: bool
- primaryShard:
- type: shard_id
- optional: true
- description: "The primary shard for the collection, only set if the collection is sharded"
- shardsOwningChunks:
- type: array<shard_id>
- optional: true
- description: "The shards owning chunks for the collection, only set if the collection is sharded"
-
CollModCoordinatorDocument:
description: "Represents a collMod operation on the coordinator shard."
generate_comparison_operators: false
@@ -90,7 +68,3 @@ structs:
type: uuid
description: "Collection uuid."
optional: true
- info:
- type: CollModCollectionInfo
- description: "Collection information that may affect collMod processing"
- optional: true
diff --git a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
index 32640aa2b4f..7845a705185 100644
--- a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
+++ b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/commands.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/collmod_coordinator.h"
#include "mongo/db/s/database_sharding_state.h"
#include "mongo/db/s/recoverable_critical_section_service.h"
@@ -87,7 +88,16 @@ public:
"collMod unblocking should always be on a time-series collection",
timeseries::getTimeseriesOptions(opCtx, ns(), true));
auto bucketNs = ns().makeTimeseriesBucketsNamespace();
- forceShardFilteringMetadataRefresh(opCtx, bucketNs);
+
+ try {
+ forceShardFilteringMetadataRefresh(opCtx, bucketNs);
+ } catch (const DBException&) {
+ // If the refresh fails, then set the shard version to UNKNOWN and let a future
+ // operation to refresh the metadata.
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ AutoGetCollection autoColl(opCtx, bucketNs, MODE_IX);
+ CollectionShardingRuntime::get(opCtx, bucketNs)->clearFilteringMetadata(opCtx);
+ }
auto service = RecoverableCriticalSectionService::get(opCtx);
const auto reason = BSON("command"