summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2022-07-21 16:52:32 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-21 18:10:04 +0000
commit808c61d3153241613acabe1da2058c26d434f152 (patch)
tree2c9eec7b810c7711bf677693616eff08a30b8079 /src/mongo/s
parent75a7e2a20b345bcca9601756f64a84ffbcdaf4b1 (diff)
downloadmongo-808c61d3153241613acabe1da2058c26d434f152.tar.gz
SERVER-68171 Do not initialized the ShardRegistry with an empty placeholder
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/client/shard_registry.cpp61
-rw-r--r--src/mongo/s/client/shard_registry.h3
2 files changed, 25 insertions, 39 deletions
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index a2a50cf5b20..bf9c10f58f3 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -119,34 +119,38 @@ ShardRegistry::Cache::LookupResult ShardRegistry::_lookup(OperationContext* opCt
const Cache::ValueHandle& cachedData,
const Time& timeInStore) {
invariant(key == _kSingleton);
- invariant(cachedData, "ShardRegistry::_lookup called but the cache is empty");
+
+ auto lastForcedReloadIncrement = _forceReloadIncrement.load();
LOGV2_DEBUG(4620250,
2,
"Starting ShardRegistry::_lookup",
- "cachedData"_attr = cachedData->toBSON(),
- "cachedData.getTime()"_attr = cachedData.getTime(),
- "timeInStore"_attr = timeInStore);
+ "cachedData"_attr = cachedData ? cachedData->toBSON() : BSONObj{},
+ "cachedData.getTime()"_attr = cachedData ? cachedData.getTime() : Time{},
+ "timeInStore"_attr = timeInStore,
+ "lastForcedReloadIncrement"_attr = lastForcedReloadIncrement);
// Check if we need to refresh from the configsvrs. If so, then do that and get the results,
// otherwise (this is a lookup only to incorporate updated connection strings from the RSM),
// then get the equivalent values from the previously cached data.
- auto [returnData, returnTopologyTime, returnForceReloadIncrement, removedShards] =
- [&]() -> std::tuple<ShardRegistryData, Timestamp, Increment, ShardRegistryData::ShardMap> {
- if (timeInStore.topologyTime > cachedData.getTime().topologyTime ||
- timeInStore.forceReloadIncrement > cachedData.getTime().forceReloadIncrement) {
+ auto [returnData, returnTopologyTime, removedShards] =
+ [&]() -> std::tuple<ShardRegistryData, Timestamp, ShardRegistryData::ShardMap> {
+ if (!cachedData) {
+ auto [reloadedData, maxTopologyTime] =
+ ShardRegistryData::createFromCatalogClient(opCtx, _shardFactory.get());
+
+ return {std::move(reloadedData), std::move(maxTopologyTime), {}};
+ } else if (timeInStore.topologyTime > cachedData.getTime().topologyTime ||
+ lastForcedReloadIncrement > cachedData.getTime().forceReloadIncrement) {
auto [reloadedData, maxTopologyTime] =
ShardRegistryData::createFromCatalogClient(opCtx, _shardFactory.get());
auto [mergedData, removedShards] =
ShardRegistryData::mergeExisting(*cachedData, reloadedData);
- return {mergedData, maxTopologyTime, timeInStore.forceReloadIncrement, removedShards};
+ return {std::move(mergedData), std::move(maxTopologyTime), std::move(removedShards)};
} else {
- return {*cachedData,
- cachedData.getTime().topologyTime,
- cachedData.getTime().forceReloadIncrement,
- {}};
+ return {*cachedData, cachedData.getTime().topologyTime, {}};
}
}();
@@ -181,7 +185,7 @@ ShardRegistry::Cache::LookupResult ShardRegistry::_lookup(OperationContext* opCt
}
}
- Time returnTime{returnTopologyTime, rsmIncrementForConnStrings, returnForceReloadIncrement};
+ Time returnTime{returnTopologyTime, rsmIncrementForConnStrings, lastForcedReloadIncrement};
LOGV2_DEBUG(4620251,
2,
"Finished ShardRegistry::_lookup",
@@ -417,8 +421,9 @@ void ShardRegistry::toBSON(BSONObjBuilder* result) const {
BSONObjBuilder map;
BSONObjBuilder hosts;
BSONObjBuilder connStrings;
- auto data = _getCachedData();
- data->toBSON(&map, &hosts, &connStrings);
+ if (auto data = _getCachedData()) {
+ data->toBSON(&map, &hosts, &connStrings);
+ }
{
stdx::lock_guard<Latch> lk(_mutex);
_configShardData.toBSON(&map, &hosts, &connStrings);
@@ -494,26 +499,7 @@ void ShardRegistry::updateReplicaSetOnConfigServer(ServiceContext* serviceContex
}
}
-// Inserts the initial empty ShardRegistryData into the cache, if the cache is empty.
-void ShardRegistry::_initializeCacheIfNecessary() const {
- if (!_cache->peekLatestCached(_kSingleton)) {
- stdx::lock_guard<Latch> lk(_mutex);
- if (!_cache->peekLatestCached(_kSingleton)) {
- LOGV2_DEBUG(
- 6471801, 1, "Initializing Shard Registry's cache to an empty set of shards");
- _cache->insertOrAssign(_kSingleton, {}, Date_t::now(), Time());
- }
- }
-}
-
SharedSemiFuture<ShardRegistry::Cache::ValueHandle> ShardRegistry::_getDataAsync() {
- _initializeCacheIfNecessary();
-
- // If the forceReloadIncrement is 0, then we've never done a lookup, so we should be sure to do
- // one now.
- Increment uninitializedIncrement{0};
- _forceReloadIncrement.compareAndSwap(&uninitializedIncrement, 1);
-
// Update the time the cache should be aiming for.
auto now = VectorClock::get(_service)->getTime();
// The topologyTime should be advanced to the gossiped topologyTime.
@@ -537,7 +523,6 @@ bool ShardRegistry::isConfigServer(const HostAndPort& host) const {
// TODO SERVER-50206: Remove usage of these non-causally consistent accessors.
ShardRegistry::Cache::ValueHandle ShardRegistry::_getCachedData() const {
- _initializeCacheIfNecessary();
return _cache->peekLatestCached(_kSingleton);
}
@@ -550,6 +535,10 @@ std::shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort&
}
}
auto data = _getCachedData();
+ if (!data) {
+ return nullptr;
+ }
+
return data->findByHostAndPort(host);
}
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index d606a3c0976..2d7286e0504 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -461,9 +461,6 @@ private:
AtomicWord<bool> _isInitialized{false};
- // The ShardRegistry is "up" once there has been a successful refresh.
- AtomicWord<bool> _isUp{false};
-
// Set to true in shutdown call to prevent calling it twice.
AtomicWord<bool> _isShutdown{false};