summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp6
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp11
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp13
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.h6
-rw-r--r--src/mongo/s/catalog_cache.cpp44
-rw-r--r--src/mongo/s/catalog_cache.h168
-rw-r--r--src/mongo/s/request_types/set_shard_version_request.cpp31
-rw-r--r--src/mongo/s/request_types/set_shard_version_request.h21
-rw-r--r--src/mongo/s/request_types/set_shard_version_request_test.cpp104
10 files changed, 315 insertions, 91 deletions
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 1177a8dbf71..fdea6235019 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -227,7 +227,7 @@ public:
return false;
}
- forceShardFilteringMetadataRefresh(opCtx, nss);
+ forceShardFilteringMetadataRefresh(opCtx, nss, true /* forceRefreshFromThisThread */);
BSONObj stoppedAtKey;
CleanupResult cleanupResult =
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 59d2511d671..1e4609937af 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -443,7 +443,8 @@ Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Nam
fassert(28781, ConnectionString::parse(shardEntry.getHost())),
nss,
ChunkVersion::DROPPED(),
- true);
+ true /* isAuthoritative */,
+ true /* forceRefresh */);
auto ssvResult = shard->runCommandWithFixedRetryAttempts(
opCtx,
@@ -570,7 +571,8 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
primaryShard->getConnString(),
nss,
collVersion,
- true);
+ true /* isAuthoritative */,
+ true /* forceRefresh */);
auto ssvResponse =
shard->runCommandWithFixedRetryAttempts(opCtx,
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 6dca94efffd..967332fabc2 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -133,6 +133,12 @@ public:
LastError::get(client).disable();
const bool authoritative = cmdObj.getBoolField("authoritative");
+ // A flag that specifies whether the set shard version catalog refresh
+ // is allowed to join an in-progress refresh triggered by an other
+ // thread, or whether it's required to either a) trigger its own
+ // refresh or b) wait for a refresh to be started after it has entered the
+ // getCollectionRoutingInfoWithRefresh function
+ const bool forceRefresh = cmdObj.getBoolField("forceRefresh");
const bool noConnectionVersioning = cmdObj.getBoolField("noConnectionVersioning");
ShardedConnectionInfo dummyInfo;
@@ -335,7 +341,10 @@ public:
// Step 7
- const auto status = onShardVersionMismatch(opCtx, nss, requestedVersion);
+ // Note: The forceRefresh flag controls whether we make sure to do our
+ // own refresh or if we're okay with joining another thread
+ const auto status = onShardVersionMismatch(
+ opCtx, nss, requestedVersion, forceRefresh /*forceRefreshFromThisThread*/);
{
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 90c776cae58..04288ed16bd 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -49,7 +49,8 @@ namespace mongo {
Status onShardVersionMismatch(OperationContext* opCtx,
const NamespaceString& nss,
- ChunkVersion shardVersionReceived) noexcept {
+ ChunkVersion shardVersionReceived,
+ bool forceRefreshFromThisThread) noexcept {
invariant(!opCtx->lockState()->isLocked());
invariant(!opCtx->getClient()->isInDirectClient());
@@ -89,7 +90,7 @@ Status onShardVersionMismatch(OperationContext* opCtx,
}
try {
- forceShardFilteringMetadataRefresh(opCtx, nss);
+ forceShardFilteringMetadataRefresh(opCtx, nss, forceRefreshFromThisThread);
return Status::OK();
} catch (const DBException& ex) {
log() << "Failed to refresh metadata for collection" << nss << causedBy(redact(ex));
@@ -98,15 +99,17 @@ Status onShardVersionMismatch(OperationContext* opCtx,
}
ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
- const NamespaceString& nss) {
+ const NamespaceString& nss,
+ bool forceRefreshFromThisThread) {
invariant(!opCtx->lockState()->isLocked());
invariant(!opCtx->getClient()->isInDirectClient());
auto const shardingState = ShardingState::get(opCtx);
invariant(shardingState->canAcceptShardedCommands());
- const auto routingInfo = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(opCtx, nss));
+ const auto routingInfo =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
+ opCtx, nss, forceRefreshFromThisThread));
const auto cm = routingInfo.cm();
if (!cm) {
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.h b/src/mongo/db/s/shard_filtering_metadata_refresh.h
index 378bf1ee6b1..c4ae17afd02 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.h
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.h
@@ -55,7 +55,8 @@ class OperationContext;
*/
Status onShardVersionMismatch(OperationContext* opCtx,
const NamespaceString& nss,
- ChunkVersion shardVersionReceived) noexcept;
+ ChunkVersion shardVersionReceived,
+ bool forceRefreshFromThisThread = false) noexcept;
/**
* Unconditionally causes the shard's filtering metadata to be refreshed from the config server and
@@ -65,7 +66,8 @@ Status onShardVersionMismatch(OperationContext* opCtx,
* called with a lock
*/
ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
- const NamespaceString& nss);
+ const NamespaceString& nss,
+ bool forceRefreshFromThisThread = false);
/**
* Should be called when any client request on this shard generates a StaleDbVersion exception.
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 254bd358cd0..d05e6b017fb 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -178,33 +178,47 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx
StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfo(
OperationContext* opCtx, const NamespaceString& nss) {
+ return _getCollectionRoutingInfo(opCtx, nss).statusWithInfo;
+}
+
+CatalogCache::RefreshResult CatalogCache::_getCollectionRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& nss) {
return _getCollectionRoutingInfoAt(opCtx, nss, boost::none);
}
+
StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfoAt(
OperationContext* opCtx, const NamespaceString& nss, Timestamp atClusterTime) {
- return _getCollectionRoutingInfoAt(opCtx, nss, atClusterTime);
+ return _getCollectionRoutingInfoAt(opCtx, nss, atClusterTime).statusWithInfo;
}
-StatusWith<CachedCollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoAt(
+CatalogCache::RefreshResult CatalogCache::_getCollectionRoutingInfoAt(
OperationContext* opCtx, const NamespaceString& nss, boost::optional<Timestamp> atClusterTime) {
+ // This default value can cause a single unnecessary extra refresh if this thread did do the
+ // refresh but the refresh failed, or if the database or collection was not found, but only if
+ // the caller is getCollectionRoutingInfoWithRefresh with the parameter
+ // forceRefreshFromThisThread set to true
+ RefreshAction refreshActionTaken(RefreshAction::kDidNotPerformRefresh);
while (true) {
const auto swDbInfo = getDatabase(opCtx, nss.db());
if (!swDbInfo.isOK()) {
- return swDbInfo.getStatus();
+ return {swDbInfo.getStatus(), refreshActionTaken};
}
+
const auto dbInfo = std::move(swDbInfo.getValue());
stdx::unique_lock<stdx::mutex> ul(_mutex);
const auto itDb = _collectionsByDb.find(nss.db());
if (itDb == _collectionsByDb.end()) {
- return {CachedCollectionRoutingInfo(nss, dbInfo, nullptr)};
+ return {CachedCollectionRoutingInfo(nss, dbInfo, nullptr), refreshActionTaken};
}
+
const auto itColl = itDb->second.find(nss.ns());
if (itColl == itDb->second.end()) {
- return {CachedCollectionRoutingInfo(nss, dbInfo, nullptr)};
+ return {CachedCollectionRoutingInfo(nss, dbInfo, nullptr), refreshActionTaken};
}
+
auto& collEntry = itColl->second;
if (collEntry->needsRefresh) {
@@ -213,6 +227,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoA
refreshNotification = (collEntry->refreshCompletionNotification =
std::make_shared<Notification<Status>>());
_scheduleCollectionRefresh(ul, collEntry, nss, 1);
+ refreshActionTaken = RefreshAction::kPerformedRefresh;
}
// Wait on the notification outside of the mutex
@@ -236,7 +251,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoA
}();
if (!refreshStatus.isOK()) {
- return refreshStatus;
+ return {refreshStatus, refreshActionTaken};
}
// Once the refresh is complete, loop around to get the latest value
@@ -245,7 +260,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoA
auto cm = std::make_shared<ChunkManager>(collEntry->routingInfo, atClusterTime);
- return {CachedCollectionRoutingInfo(nss, dbInfo, std::move(cm))};
+ return {CachedCollectionRoutingInfo(nss, dbInfo, std::move(cm)), refreshActionTaken};
}
}
@@ -256,9 +271,20 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabaseWithRefresh(OperationCon
}
StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfoWithRefresh(
- OperationContext* opCtx, const NamespaceString& nss) {
+ OperationContext* opCtx, const NamespaceString& nss, bool forceRefreshFromThisThread) {
invalidateShardedCollection(nss);
- return getCollectionRoutingInfo(opCtx, nss);
+ auto refreshResult = _getCollectionRoutingInfo(opCtx, nss);
+ // We want to ensure that we don't join an in-progress refresh because that
+ // could violate causal consistency for this client. We don't need to actually perform the
+ // refresh ourselves but we do need the refresh to begin *after* this function is
+ // called, so calling it twice is enough regardless of what happens the
+ // second time. See SERVER-33954 for reasoning.
+ if (forceRefreshFromThisThread &&
+ refreshResult.actionTaken == RefreshAction::kDidNotPerformRefresh) {
+ invalidateShardedCollection(nss);
+ refreshResult = _getCollectionRoutingInfo(opCtx, nss);
+ }
+ return refreshResult.statusWithInfo;
}
StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutingInfoWithRefresh(
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 483f86ef164..158e8d52a4f 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -52,6 +52,66 @@ class OperationContext;
static constexpr int kMaxNumStaleVersionRetries = 10;
/**
+ * Constructed exclusively by the CatalogCache, contains a reference to the cached information for
+ * the specified database.
+ */
+class CachedDatabaseInfo {
+public:
+ const ShardId& primaryId() const;
+ std::shared_ptr<Shard> primary() const {
+ return _primaryShard;
+ };
+
+ bool shardingEnabled() const;
+ boost::optional<DatabaseVersion> databaseVersion() const;
+
+private:
+ friend class CatalogCache;
+ CachedDatabaseInfo(DatabaseType dbt, std::shared_ptr<Shard> primaryShard);
+
+ DatabaseType _dbt;
+ std::shared_ptr<Shard> _primaryShard;
+};
+
+/**
+ * Constructed exclusively by the CatalogCache.
+ *
+ * This RoutingInfo can be considered a "package" of routing info for the database and for the
+ * collection. Once unsharded collections are treated as sharded collections with a single chunk,
+ * they will also have a ChunkManager with a "chunk distribution." At that point, this "package" can
+ * be dismantled: routing for commands that route by database can directly retrieve the
+ * CachedDatabaseInfo, while routing for commands that route by collection can directly retrieve the
+ * ChunkManager.
+ */
+class CachedCollectionRoutingInfo {
+public:
+ CachedDatabaseInfo db() const {
+ return _db;
+ };
+
+ std::shared_ptr<ChunkManager> cm() const {
+ return _cm;
+ }
+
+private:
+ friend class CatalogCache;
+ friend class CachedDatabaseInfo;
+
+ CachedCollectionRoutingInfo(NamespaceString nss,
+ CachedDatabaseInfo db,
+ std::shared_ptr<ChunkManager> cm);
+
+ NamespaceString _nss;
+
+ // Copy of the database's cached info.
+ CachedDatabaseInfo _db;
+
+ // Shared reference to the collection's cached chunk distribution if sharded, otherwise null.
+ // This is a shared reference rather than a copy because the chunk distribution can be large.
+ std::shared_ptr<ChunkManager> _cm;
+};
+
+/**
* This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only
* in the sense that it only reads from the persistent store, but never writes to it. Instead
* writes happen through the ShardingCatalogManager and the cache hierarchy needs to be invalidated.
@@ -104,9 +164,21 @@ public:
/**
* Same as getCollectionRoutingInfo above, but in addition causes the namespace to be refreshed.
+ *
+ * When forceRefreshFromThisThread is false, it's possible for this call to
+ * join an ongoing refresh from another thread forceRefreshFromThisThread.
+ * forceRefreshFromThisThread checks whether it joined another thread and
+ * then forces it to try again, which is necessary in cases where calls to
+ * getCollectionRoutingInfoWithRefresh must be causally consistent
+ *
+ * TODO: Remove this parameter in favor of using collection creation time +
+ * collection version to decide when a refresh is necessary and provide
+ * proper causal consistency
*/
StatusWith<CachedCollectionRoutingInfo> getCollectionRoutingInfoWithRefresh(
- OperationContext* opCtx, const NamespaceString& nss);
+ OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool forceRefreshFromThisThread = false);
/**
* Same as getCollectionRoutingInfoWithRefresh above, but in addition returns a
@@ -227,8 +299,40 @@ private:
std::shared_ptr<CollectionRoutingInfoEntry> collEntry,
NamespaceString const& nss,
int refreshAttempt);
+ /**
+ * Used as a flag to indicate whether or not this thread performed its own
+ * refresh for certain helper functions
+ *
+ * kPerformedRefresh is used only when the calling thread performed the
+ * refresh *itself*
+ *
+ * kDidNotPerformRefresh is used either when there was an error or when
+ * this thread joined an ongoing refresh
+ */
+ enum class RefreshAction {
+ kPerformedRefresh,
+ kDidNotPerformRefresh,
+ };
+
+ /**
+ * Return type for helper functions performing refreshes so that they can
+ * indicate both status and whether or not this thread performed its own
+ * refresh
+ */
+ struct RefreshResult {
+ // Status containing result of refresh
+ StatusWith<CachedCollectionRoutingInfo> statusWithInfo;
+ RefreshAction actionTaken;
+ };
- StatusWith<CachedCollectionRoutingInfo> _getCollectionRoutingInfoAt(
+ /**
+ * Helper function used when we need the refresh action taken (e.g. when we
+ * want to force refresh)
+ */
+ CatalogCache::RefreshResult _getCollectionRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& nss);
+
+ CatalogCache::RefreshResult _getCollectionRoutingInfoAt(
OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<Timestamp> atClusterTime);
@@ -283,64 +387,4 @@ private:
CollectionsByDbMap _collectionsByDb;
};
-/**
- * Constructed exclusively by the CatalogCache, contains a reference to the cached information for
- * the specified database.
- */
-class CachedDatabaseInfo {
-public:
- const ShardId& primaryId() const;
- std::shared_ptr<Shard> primary() const {
- return _primaryShard;
- };
-
- bool shardingEnabled() const;
- boost::optional<DatabaseVersion> databaseVersion() const;
-
-private:
- friend class CatalogCache;
- CachedDatabaseInfo(DatabaseType dbt, std::shared_ptr<Shard> primaryShard);
-
- DatabaseType _dbt;
- std::shared_ptr<Shard> _primaryShard;
-};
-
-/**
- * Constructed exclusively by the CatalogCache.
- *
- * This RoutingInfo can be considered a "package" of routing info for the database and for the
- * collection. Once unsharded collections are treated as sharded collections with a single chunk,
- * they will also have a ChunkManager with a "chunk distribution." At that point, this "package" can
- * be dismantled: routing for commands that route by database can directly retrieve the
- * CachedDatabaseInfo, while routing for commands that route by collection can directly retrieve the
- * ChunkManager.
- */
-class CachedCollectionRoutingInfo {
-public:
- CachedDatabaseInfo db() const {
- return _db;
- };
-
- std::shared_ptr<ChunkManager> cm() const {
- return _cm;
- }
-
-private:
- friend class CatalogCache;
- friend class CachedDatabaseInfo;
-
- CachedCollectionRoutingInfo(NamespaceString nss,
- CachedDatabaseInfo db,
- std::shared_ptr<ChunkManager> cm);
-
- NamespaceString _nss;
-
- // Copy of the database's cached info.
- CachedDatabaseInfo _db;
-
- // Shared reference to the collection's cached chunk distribution if sharded, otherwise null.
- // This is a shared reference rather than a copy because the chunk distribution can be large.
- std::shared_ptr<ChunkManager> _cm;
-};
-
} // namespace mongo
diff --git a/src/mongo/s/request_types/set_shard_version_request.cpp b/src/mongo/s/request_types/set_shard_version_request.cpp
index e3c8b4bb965..68f72f80132 100644
--- a/src/mongo/s/request_types/set_shard_version_request.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request.cpp
@@ -46,6 +46,7 @@ const char kConfigServer[] = "configdb";
const char kShardName[] = "shard";
const char kShardConnectionString[] = "shardHost";
const char kInit[] = "init";
+const char kForceRefresh[] = "forceRefresh";
const char kAuthoritative[] = "authoritative";
const char kNoConnectionVersioning[] = "noConnectionVersioning";
@@ -65,9 +66,11 @@ SetShardVersionRequest::SetShardVersionRequest(ConnectionString configServer,
ConnectionString shardConnectionString,
NamespaceString nss,
ChunkVersion version,
- bool isAuthoritative)
+ bool isAuthoritative,
+ bool forceRefresh)
: _init(false),
_isAuthoritative(isAuthoritative),
+ _forceRefresh(forceRefresh),
_configServer(std::move(configServer)),
_shardName(std::move(shardName)),
_shardCS(std::move(shardConnectionString)),
@@ -89,10 +92,16 @@ SetShardVersionRequest SetShardVersionRequest::makeForVersioning(
const ConnectionString& shardConnectionString,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
- bool isAuthoritative) {
+ bool isAuthoritative,
+ bool forceRefresh) {
invariant(nss.isValid());
- return SetShardVersionRequest(
- configServer, shardName, shardConnectionString, nss, nssVersion, isAuthoritative);
+ return SetShardVersionRequest(configServer,
+ shardName,
+ shardConnectionString,
+ nss,
+ nssVersion,
+ isAuthoritative,
+ forceRefresh);
}
SetShardVersionRequest SetShardVersionRequest::makeForVersioningNoPersist(
@@ -101,8 +110,10 @@ SetShardVersionRequest SetShardVersionRequest::makeForVersioningNoPersist(
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
- bool isAuthoritative) {
- auto ssv = makeForVersioning(configServer, shardName, shard, nss, nssVersion, isAuthoritative);
+ bool isAuthoritative,
+ bool forceRefresh) {
+ auto ssv = makeForVersioning(
+ configServer, shardName, shard, nss, nssVersion, isAuthoritative, forceRefresh);
ssv._noConnectionVersioning = true;
return ssv;
@@ -141,6 +152,13 @@ StatusWith<SetShardVersionRequest> SetShardVersionRequest::parseFromBSON(const B
{
Status status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, kForceRefresh, false, &request._forceRefresh);
+ if (!status.isOK())
+ return status;
+ }
+
+ {
+ Status status = bsonExtractBooleanFieldWithDefault(
cmdObj, kAuthoritative, false, &request._isAuthoritative);
if (!status.isOK())
return status;
@@ -191,6 +209,7 @@ BSONObj SetShardVersionRequest::toBSON() const {
cmdBuilder.append(kCmdName, _init ? "" : _nss.get().ns());
cmdBuilder.append(kInit, _init);
+ cmdBuilder.append(kForceRefresh, _forceRefresh);
cmdBuilder.append(kAuthoritative, _isAuthoritative);
// 'configdb' field is only included for v3.4 backwards compatibility
cmdBuilder.append(kConfigServer, _configServer.toString());
diff --git a/src/mongo/s/request_types/set_shard_version_request.h b/src/mongo/s/request_types/set_shard_version_request.h
index 0fb5a149d93..7ebdf9de54a 100644
--- a/src/mongo/s/request_types/set_shard_version_request.h
+++ b/src/mongo/s/request_types/set_shard_version_request.h
@@ -73,7 +73,8 @@ public:
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
- bool isAuthoritative);
+ bool isAuthoritative,
+ bool forceRefresh = false);
/**
* Constructs a new set shard version request, which is of the "versioning" type, meaning it has
@@ -88,7 +89,8 @@ public:
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
- bool isAuthoritative);
+ bool isAuthoritative,
+ bool forceRefresh = false);
/**
* Parses an SSV request from a set shard version command.
@@ -118,6 +120,17 @@ public:
return _isAuthoritative;
}
+ /**
+ * Returns whether the set shard version catalog refresh is allowed to join
+ * an in-progress refresh triggered by an other thread, or whether it's
+ * required to either a) trigger its own refresh or b) wait for a refresh
+ * to be started after it has entered the getCollectionRoutingInfoWithRefresh function
+ */
+ bool shouldForceRefresh() const {
+ return _forceRefresh;
+ }
+
+
const ShardId& getShardName() const {
return _shardName;
}
@@ -156,12 +169,14 @@ private:
ConnectionString shardConnectionString,
NamespaceString nss,
ChunkVersion version,
- bool isAuthoritative);
+ bool isAuthoritative,
+ bool forceRefresh = false);
SetShardVersionRequest();
bool _init{false};
bool _isAuthoritative{false};
+ bool _forceRefresh{false};
bool _noConnectionVersioning{false};
// Only required for v3.4 backwards compatibility.
diff --git a/src/mongo/s/request_types/set_shard_version_request_test.cpp b/src/mongo/s/request_types/set_shard_version_request_test.cpp
index 8f834c84c34..5163b6f1779 100644
--- a/src/mongo/s/request_types/set_shard_version_request_test.cpp
+++ b/src/mongo/s/request_types/set_shard_version_request_test.cpp
@@ -121,6 +121,7 @@ TEST(SetShardVersionRequest, ParseFull) {
<< chunkVersion.epoch())));
ASSERT(!request.isInit());
+ ASSERT(!request.shouldForceRefresh());
ASSERT(!request.isAuthoritative());
ASSERT(!request.getNoConnectionVersioning());
ASSERT_EQ(request.getShardName(), "TestShard");
@@ -149,6 +150,7 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
<< true)));
ASSERT(!request.isInit());
+ ASSERT(!request.shouldForceRefresh());
ASSERT(request.isAuthoritative());
ASSERT(!request.getNoConnectionVersioning());
ASSERT_EQ(request.getShardName(), "TestShard");
@@ -177,6 +179,7 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
<< true)));
ASSERT(!request.isInit());
+ ASSERT(!request.shouldForceRefresh());
ASSERT(!request.isAuthoritative());
ASSERT(request.getNoConnectionVersioning());
ASSERT_EQ(request.getShardName(), "TestShard");
@@ -226,6 +229,7 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
SetShardVersionRequest::makeForInit(configCS, ShardId("TestShard"), shardCS);
ASSERT(ssv.isInit());
+ ASSERT(!ssv.shouldForceRefresh());
ASSERT(ssv.isAuthoritative());
ASSERT(!ssv.getNoConnectionVersioning());
ASSERT_EQ(ssv.getShardName(), "TestShard");
@@ -236,6 +240,8 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
<< ""
<< "init"
<< true
+ << "forceRefresh"
+ << false
<< "authoritative"
<< true
<< "configdb"
@@ -255,6 +261,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, false);
ASSERT(!ssv.isInit());
+ ASSERT(!ssv.shouldForceRefresh());
ASSERT(!ssv.isAuthoritative());
ASSERT(!ssv.getNoConnectionVersioning());
ASSERT_EQ(ssv.getShardName(), "TestShard");
@@ -268,6 +275,8 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
<< "db.coll"
<< "init"
<< false
+ << "forceRefresh"
+ << false
<< "authoritative"
<< false
<< "configdb"
@@ -289,6 +298,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, true);
ASSERT(!ssv.isInit());
+ ASSERT(!ssv.shouldForceRefresh());
ASSERT(ssv.isAuthoritative());
ASSERT(!ssv.getNoConnectionVersioning());
ASSERT_EQ(ssv.getShardName(), "TestShard");
@@ -302,6 +312,8 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
<< "db.coll"
<< "init"
<< false
+ << "forceRefresh"
+ << false
<< "authoritative"
<< true
<< "configdb"
@@ -316,6 +328,49 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
<< chunkVersion.epoch()));
}
+TEST(SetShardVersionRequest, ToSSVCommandFullForceRefresh) {
+ const ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ SetShardVersionRequest ssv =
+ SetShardVersionRequest::makeForVersioning(configCS,
+ ShardId("TestShard"),
+ shardCS,
+ NamespaceString("db.coll"),
+ chunkVersion,
+ false,
+ true);
+
+ ASSERT(!ssv.isInit());
+ ASSERT(ssv.shouldForceRefresh());
+ ASSERT(!ssv.isAuthoritative());
+ ASSERT(!ssv.getNoConnectionVersioning());
+ ASSERT_EQ(ssv.getShardName(), "TestShard");
+ ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
+ ASSERT_EQ(ssv.getNS().ns(), "db.coll");
+ ASSERT_BSONOBJ_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
+ chunkVersion.toBSONWithPrefix("version"));
+
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << "db.coll"
+ << "init"
+ << false
+ << "forceRefresh"
+ << true
+ << "authoritative"
+ << false
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
+}
+
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
@@ -323,6 +378,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, true);
ASSERT(!ssv.isInit());
+ ASSERT(!ssv.shouldForceRefresh());
ASSERT(ssv.isAuthoritative());
ASSERT(ssv.getNoConnectionVersioning());
ASSERT_EQ(ssv.getShardName(), "TestShard");
@@ -336,6 +392,8 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
<< "db.coll"
<< "init"
<< false
+ << "forceRefresh"
+ << false
<< "authoritative"
<< true
<< "configdb"
@@ -352,5 +410,51 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
<< true));
}
+TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioningForceRefresh) {
+ const ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ SetShardVersionRequest ssv =
+ SetShardVersionRequest::makeForVersioningNoPersist(configCS,
+ ShardId("TestShard"),
+ shardCS,
+ NamespaceString("db.coll"),
+ chunkVersion,
+ false,
+ true);
+
+ ASSERT(!ssv.isInit());
+ ASSERT(ssv.shouldForceRefresh());
+ ASSERT(!ssv.isAuthoritative());
+ ASSERT(ssv.getNoConnectionVersioning());
+ ASSERT_EQ(ssv.getShardName(), "TestShard");
+ ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
+ ASSERT_EQ(ssv.getNS().ns(), "db.coll");
+ ASSERT_BSONOBJ_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
+ chunkVersion.toBSONWithPrefix("version"));
+
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << "db.coll"
+ << "init"
+ << false
+ << "forceRefresh"
+ << true
+ << "authoritative"
+ << false
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()
+ << "noConnectionVersioning"
+ << true));
+}
+
+
} // namespace
} // namespace mongo