summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2021-05-18 10:19:04 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-21 08:37:39 +0000
commitfb131f649e238c388b0af118d4c716ea5d182f28 (patch)
treeba827f31dfda8c904551ea1333376edc3e6dc416 /src/mongo
parentb779af6606bd4133ed29904f8db737872a527832 (diff)
downloadmongo-fb131f649e238c388b0af118d4c716ea5d182f28.tar.gz
SERVER-57022 The CollectionShardingRuntime should be updated when there is an update of the metadata format
(cherry picked from commit 6edab4953a0186e0d062d444682f9f4135a0a4b8)
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp185
-rw-r--r--src/mongo/db/s/database_sharding_state_test.cpp225
-rw-r--r--src/mongo/db/s/metadata_manager.cpp9
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp28
-rw-r--r--src/mongo/s/database_version.h2
6 files changed, 439 insertions, 11 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 0c50ffb75ff..cff37406ec1 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -458,6 +458,7 @@ env.CppUnitTest(
'collection_metadata_filtering_test.cpp',
'collection_metadata_test.cpp',
'collection_sharding_runtime_test.cpp',
+ 'database_sharding_state_test.cpp',
'dist_lock_catalog_mock.cpp',
'dist_lock_catalog_replset_test.cpp',
'dist_lock_manager_replset_test.cpp',
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index cb7cbf2b6fc..3f2ce37dea5 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -29,6 +29,7 @@
#include "mongo/platform/basic.h"
+#include "boost/optional/optional_io.hpp"
#include "mongo/db/catalog/create_collection.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/db_raii.h"
@@ -36,7 +37,11 @@
#include "mongo/db/repl/wait_for_majority_service.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/operation_sharding_state.h"
+#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/shard_server_test_fixture.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/s/catalog/sharding_catalog_client_mock.h"
+#include "mongo/s/catalog_cache_loader_mock.h"
#include "mongo/util/fail_point.h"
namespace mongo {
@@ -183,6 +188,186 @@ TEST_F(CollectionShardingRuntimeTest,
csr.getCollectionDescription(opCtx).uuidMatches(*newMetadata.getChunkManager()->getUUID()));
}
+class CollectionShardingRuntimeTestWithMockedLoader : public ShardServerTestFixture {
+public:
+ const NamespaceString kNss{"test.foo"};
+ const UUID kCollUUID = UUID::gen();
+ const std::string kShardKey = "x";
+ const HostAndPort kConfigHostAndPort{"DummyConfig", 12345};
+ const std::vector<ShardType> kShardList = {ShardType("shard0", "Host0:12345")};
+
+ void setUp() override {
+ // Don't call ShardServerTestFixture::setUp so we can install a mock catalog cache
+ // loader.
+ ShardingMongodTestFixture::setUp();
+
+ replicationCoordinator()->alwaysAllowWrites(true);
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+
+ _clusterId = OID::gen();
+ ShardingState::get(getServiceContext())
+ ->setInitialized(kShardList[0].getName(), _clusterId);
+
+ auto mockLoader = std::make_unique<CatalogCacheLoaderMock>();
+ _mockCatalogCacheLoader = mockLoader.get();
+ CatalogCacheLoader::set(getServiceContext(), std::move(mockLoader));
+
+ uassertStatusOK(
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(kConfigHostAndPort)));
+
+ configTargeterMock()->setFindHostReturnValue(kConfigHostAndPort);
+
+ WaitForMajorityService::get(getServiceContext()).startup(getServiceContext());
+
+ for (const auto& shard : kShardList) {
+ std::unique_ptr<RemoteCommandTargeterMock> targeter(
+ std::make_unique<RemoteCommandTargeterMock>());
+ HostAndPort host(shard.getHost());
+ targeter->setConnectionStringReturnValue(ConnectionString(host));
+ targeter->setFindHostReturnValue(host);
+ targeterFactory()->addTargeterToReturn(ConnectionString(host), std::move(targeter));
+ }
+ }
+
+ void tearDown() override {
+ WaitForMajorityService::get(getServiceContext()).shutDown();
+
+ ShardServerTestFixture::tearDown();
+ }
+
+ class StaticCatalogClient final : public ShardingCatalogClientMock {
+ public:
+ StaticCatalogClient(std::vector<ShardType> shards) : _shards(std::move(shards)) {}
+
+ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
+ return repl::OpTimeWith<std::vector<ShardType>>(_shards);
+ }
+
+ std::vector<CollectionType> getCollections(
+ OperationContext* opCtx,
+ StringData dbName,
+ repl::ReadConcernLevel readConcernLevel) override {
+ return _colls;
+ }
+
+ void setCollections(std::vector<CollectionType> colls) {
+ _colls = std::move(colls);
+ }
+
+ private:
+ const std::vector<ShardType> _shards;
+ std::vector<CollectionType> _colls;
+ };
+
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<StaticCatalogClient>(kShardList);
+ }
+
+ CollectionType createCollection(const OID& epoch,
+ boost::optional<Timestamp> timestamp = boost::none) {
+ CollectionType res(kNss, epoch, timestamp, Date_t::now(), kCollUUID);
+ res.setKeyPattern(BSON(kShardKey << 1));
+ res.setUnique(false);
+ res.setAllowMigrations(false);
+ return res;
+ }
+
+ std::vector<ChunkType> createChunks(const OID& epoch,
+ boost::optional<Timestamp> timestamp = boost::none) {
+ auto range1 = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << 5));
+ ChunkType chunk1(
+ kNss, range1, ChunkVersion(1, 0, epoch, timestamp), kShardList[0].getName());
+
+ auto range2 = ChunkRange(BSON(kShardKey << 5), BSON(kShardKey << MAXKEY));
+ ChunkType chunk2(
+ kNss, range2, ChunkVersion(1, 1, epoch, timestamp), kShardList[0].getName());
+
+ return {chunk1, chunk2};
+ }
+
+protected:
+ CatalogCacheLoaderMock* _mockCatalogCacheLoader;
+};
+
+TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
+ ForceShardFilteringMetadataRefreshWithUpdateMetadataFormat) {
+ const DatabaseType dbType(
+ kNss.db().toString(), kShardList[0].getName(), true, DatabaseVersion(UUID::gen()));
+
+ const auto epoch = OID::gen();
+ const Timestamp timestamp(42);
+
+ const auto coll = createCollection(epoch);
+ const auto chunks = createChunks(epoch);
+
+ const auto timestampedColl = createCollection(epoch, timestamp);
+ const auto timestampedChunks = createChunks(epoch, timestamp);
+
+ auto checkForceFilteringMetadataRefresh = [&](const auto& coll, const auto& chunks) {
+ auto opCtx = operationContext();
+
+ _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(dbType);
+ _mockCatalogCacheLoader->setCollectionRefreshValues(
+ kNss, coll, chunks, boost::none /* reshardingFields */);
+ forceShardFilteringMetadataRefresh(opCtx, kNss);
+ AutoGetCollection autoColl(opCtx, kNss, LockMode::MODE_IS);
+ const auto currentMetadata =
+ CollectionShardingRuntime::get(opCtx, kNss)->getCurrentMetadataIfKnown();
+ ASSERT_TRUE(currentMetadata);
+ ASSERT_EQ(currentMetadata->getCollVersion().getTimestamp(), coll.getTimestamp());
+ };
+
+ // Testing the following transitions:
+ // CV<E, M, m> -> CV<E, T, M, m> -> CV<E, M, m>
+ // Note that the loader only returns the last chunk since we didn't modify any chunk.
+ checkForceFilteringMetadataRefresh(coll, chunks);
+ checkForceFilteringMetadataRefresh(timestampedColl, std::vector{timestampedChunks.back()});
+ checkForceFilteringMetadataRefresh(coll, std::vector{chunks.back()});
+}
+
+TEST_F(CollectionShardingRuntimeTestWithMockedLoader,
+ OnShardVersionMismatchWithUpdateMetadataFormat) {
+ const DatabaseType dbType(
+ kNss.db().toString(), kShardList[0].getName(), true, DatabaseVersion(UUID::gen()));
+
+ const auto epoch = OID::gen();
+ const Timestamp timestamp(42);
+
+ const auto coll = createCollection(epoch);
+ const auto chunks = createChunks(epoch);
+ const auto collVersion = chunks.back().getVersion();
+
+ const auto timestampedColl = createCollection(epoch, timestamp);
+ const auto timestampedChunks = createChunks(epoch, timestamp);
+ const auto timestampedCollVersion = timestampedChunks.back().getVersion();
+
+ auto opCtx = operationContext();
+
+ auto onShardVersionMismatchCheck =
+ [&](const auto& coll, const auto& chunks, const auto& receivedVersion) {
+ _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(dbType);
+ _mockCatalogCacheLoader->setCollectionRefreshValues(
+ kNss, coll, chunks, boost::none /* reshardingFields */);
+
+ onShardVersionMismatch(opCtx, kNss, receivedVersion);
+
+ AutoGetCollection autoColl(opCtx, kNss, LockMode::MODE_IS);
+ auto currentMetadata =
+ CollectionShardingRuntime::get(opCtx, kNss)->getCurrentMetadataIfKnown();
+ ASSERT_TRUE(currentMetadata);
+ ASSERT_EQ(currentMetadata->getCollVersion(), receivedVersion);
+ };
+
+ // Testing the following transitions:
+ // CV<E, M, m> -> CV<E, T, M, m> -> CV<E, M, m>
+ // Note that the loader only returns the last chunk since we didn't modify any chunk.
+ onShardVersionMismatchCheck(coll, chunks, collVersion);
+ onShardVersionMismatchCheck(
+ timestampedColl, std::vector{timestampedChunks.back()}, timestampedCollVersion);
+ onShardVersionMismatchCheck(coll, std::vector{chunks.back()}, collVersion);
+}
+
/**
* Fixture for when range deletion functionality is required in CollectionShardingRuntime tests.
*/
diff --git a/src/mongo/db/s/database_sharding_state_test.cpp b/src/mongo/db/s/database_sharding_state_test.cpp
new file mode 100644
index 00000000000..4522c060433
--- /dev/null
+++ b/src/mongo/db/s/database_sharding_state_test.cpp
@@ -0,0 +1,225 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "boost/optional/optional_io.hpp"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/repl/wait_for_majority_service.h"
+#include "mongo/db/s/database_sharding_state.h"
+#include "mongo/db/s/shard_filtering_metadata_refresh.h"
+#include "mongo/db/s/shard_server_test_fixture.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/s/catalog/sharding_catalog_client_mock.h"
+#include "mongo/s/catalog_cache_loader_mock.h"
+
+namespace mongo {
+namespace {
+
+class DatabaseShardingStateTestWithMockedLoader : public ShardServerTestFixture {
+public:
+ const StringData kDbName{"test"};
+
+ const HostAndPort kConfigHostAndPort{"DummyConfig", 12345};
+ const std::vector<ShardType> kShardList = {ShardType("shard0", "Host0:12345")};
+
+ void setUp() override {
+ // Don't call ShardServerTestFixture::setUp so we can install a mock catalog cache
+ // loader.
+ ShardingMongodTestFixture::setUp();
+
+ replicationCoordinator()->alwaysAllowWrites(true);
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+
+ _clusterId = OID::gen();
+ ShardingState::get(getServiceContext())
+ ->setInitialized(kShardList[0].getName(), _clusterId);
+
+ auto mockLoader = std::make_unique<CatalogCacheLoaderMock>();
+ _mockCatalogCacheLoader = mockLoader.get();
+ CatalogCacheLoader::set(getServiceContext(), std::move(mockLoader));
+
+ uassertStatusOK(
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(kConfigHostAndPort)));
+
+ configTargeterMock()->setFindHostReturnValue(kConfigHostAndPort);
+
+ WaitForMajorityService::get(getServiceContext()).startup(getServiceContext());
+
+ for (const auto& shard : kShardList) {
+ std::unique_ptr<RemoteCommandTargeterMock> targeter(
+ std::make_unique<RemoteCommandTargeterMock>());
+ HostAndPort host(shard.getHost());
+ targeter->setConnectionStringReturnValue(ConnectionString(host));
+ targeter->setFindHostReturnValue(host);
+ targeterFactory()->addTargeterToReturn(ConnectionString(host), std::move(targeter));
+ }
+ }
+
+ void tearDown() override {
+ WaitForMajorityService::get(getServiceContext()).shutDown();
+
+ ShardServerTestFixture::tearDown();
+ }
+
+ class StaticCatalogClient final : public ShardingCatalogClientMock {
+ public:
+ StaticCatalogClient(std::vector<ShardType> shards) : _shards(std::move(shards)) {}
+
+ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override {
+ return repl::OpTimeWith<std::vector<ShardType>>(_shards);
+ }
+
+ std::vector<CollectionType> getCollections(
+ OperationContext* opCtx,
+ StringData dbName,
+ repl::ReadConcernLevel readConcernLevel) override {
+ return _colls;
+ }
+
+ void setCollections(std::vector<CollectionType> colls) {
+ _colls = std::move(colls);
+ }
+
+ private:
+ const std::vector<ShardType> _shards;
+ std::vector<CollectionType> _colls;
+ };
+
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient() override {
+ return std::make_unique<StaticCatalogClient>(kShardList);
+ }
+
+ DatabaseType createDatabase(const UUID& uuid,
+ boost::optional<Timestamp> timestamp = boost::none) {
+ return DatabaseType(
+ kDbName.toString(), kShardList[0].getName(), true, DatabaseVersion(uuid, timestamp));
+ }
+
+protected:
+ CatalogCacheLoaderMock* _mockCatalogCacheLoader;
+};
+
+TEST_F(DatabaseShardingStateTestWithMockedLoader, OnDbVersionMismatch) {
+ const auto oldDb = createDatabase(UUID::gen());
+ const auto newDb = createDatabase(UUID::gen());
+
+ auto checkOnDbVersionMismatch = [&](const auto& newDb) {
+ const auto newDbVersion = newDb.getVersion();
+ auto opCtx = operationContext();
+
+ auto getActiveDbVersion = [&] {
+ AutoGetDb autoDb(opCtx, kDbName, MODE_IS);
+ const auto dss = DatabaseShardingState::get(opCtx, kDbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ return dss->getDbVersion(opCtx, dssLock);
+ };
+
+ boost::optional<DatabaseVersion> activeDbVersion = getActiveDbVersion();
+
+ _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(newDb);
+ ASSERT_OK(onDbVersionMismatchNoExcept(opCtx, kDbName, newDbVersion, activeDbVersion));
+
+ activeDbVersion = getActiveDbVersion();
+ ASSERT_TRUE(activeDbVersion);
+ ASSERT_EQ(newDbVersion.getTimestamp(), activeDbVersion->getTimestamp());
+ };
+
+ checkOnDbVersionMismatch(oldDb);
+ checkOnDbVersionMismatch(newDb);
+ checkOnDbVersionMismatch(oldDb);
+}
+
+TEST_F(DatabaseShardingStateTestWithMockedLoader, OnDbVersionMismatchWithUpdateMetadataFormat) {
+ const auto uuid = UUID::gen();
+ const Timestamp timestamp(42);
+
+ const auto db = createDatabase(uuid);
+ const auto timestampedDb = createDatabase(uuid, timestamp);
+
+ auto checkOnDbVersionMismatch = [&](const auto& newDb) {
+ auto opCtx = operationContext();
+
+ _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(newDb);
+
+ auto getActiveDbVersion = [&] {
+ AutoGetDb autoDb(opCtx, kDbName, MODE_IS);
+ const auto dss = DatabaseShardingState::get(opCtx, kDbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ return dss->getDbVersion(opCtx, dssLock);
+ };
+
+ boost::optional<DatabaseVersion> activeDbVersion = getActiveDbVersion();
+
+ const auto& newDbVersion = newDb.getVersion();
+ ASSERT_OK(onDbVersionMismatchNoExcept(opCtx, kDbName, newDbVersion, activeDbVersion));
+
+ activeDbVersion = getActiveDbVersion();
+ ASSERT_TRUE(activeDbVersion);
+ ASSERT_EQ(newDbVersion.getTimestamp(), activeDbVersion->getTimestamp());
+ };
+
+ checkOnDbVersionMismatch(db);
+ checkOnDbVersionMismatch(timestampedDb);
+ checkOnDbVersionMismatch(db);
+}
+
+TEST_F(DatabaseShardingStateTestWithMockedLoader, ForceDatabaseRefreshWithUpdateMetadataFormat) {
+ const auto uuid = UUID::gen();
+ const Timestamp timestamp(42);
+
+ const auto db = createDatabase(uuid);
+ const auto timestampedDb = createDatabase(uuid, timestamp);
+
+ auto checkForceDatabaseRefresh = [&](const auto& newDb) {
+ const auto newDbVersion = newDb.getVersion();
+ auto opCtx = operationContext();
+
+ _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(newDb);
+ forceDatabaseRefresh(opCtx, kDbName);
+
+ boost::optional<DatabaseVersion> activeDbVersion = [&] {
+ AutoGetDb autoDb(opCtx, kDbName, MODE_IS);
+ const auto dss = DatabaseShardingState::get(opCtx, kDbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ return dss->getDbVersion(opCtx, dssLock);
+ }();
+ ASSERT_TRUE(activeDbVersion);
+ ASSERT_EQ(newDbVersion.getTimestamp(), activeDbVersion->getTimestamp());
+ };
+
+ checkForceDatabaseRefresh(db);
+ checkForceDatabaseRefresh(timestampedDb);
+ checkForceDatabaseRefresh(db);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 4a857f4952c..4376b32a2c6 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -178,8 +178,13 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
invariant(_metadata.back()->metadata);
const auto& activeMetadata = _metadata.back()->metadata.get();
- // We already have the same or newer version
- if (remoteMetadata.getCollVersion().isOlderOrEqualThan(activeMetadata.getCollVersion())) {
+ const auto remoteCollVersion = remoteMetadata.getCollVersion();
+ const auto activeCollVersion = activeMetadata.getCollVersion();
+ // Do nothing if the remote version is older than the current active one,
+ // or it is the same and there was not an update on the metadata format.
+ if (remoteCollVersion.isOlderThan(activeCollVersion) ||
+ (remoteCollVersion == activeCollVersion &&
+ remoteCollVersion.getTimestamp() == activeCollVersion.getTimestamp())) {
LOGV2_DEBUG(21984,
1,
"Ignoring incoming metadata update {activeMetadata} for {namespace} because "
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 6a04f9b916b..d6b24fdd79f 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -76,7 +76,9 @@ void onDbVersionMismatch(OperationContext* opCtx,
const ComparableDatabaseVersion comparableClientDbVersion =
ComparableDatabaseVersion::makeComparableDatabaseVersion(clientDbVersion);
- if (comparableClientDbVersion <= comparableServerDbVersion) {
+ if (comparableClientDbVersion < comparableServerDbVersion ||
+ (comparableClientDbVersion == comparableServerDbVersion &&
+ clientDbVersion.getTimestamp() == serverDbVersion->getTimestamp())) {
// The client was stale; do not trigger server-side refresh.
return;
}
@@ -244,7 +246,9 @@ void onShardVersionMismatch(OperationContext* opCtx,
const auto currentShardVersion = metadata->getShardVersion();
// Don't need to remotely reload if we're in the same epoch and the requested
// version is smaller than the known one. This means that the remote side is behind.
- if (shardVersionReceived->isOlderThan(currentShardVersion)) {
+ if (shardVersionReceived->isOlderThan(currentShardVersion) ||
+ (*shardVersionReceived == currentShardVersion &&
+ shardVersionReceived->getTimestamp() == currentShardVersion.getTimestamp())) {
return;
}
}
@@ -440,7 +444,9 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
if (optMetadata) {
const auto& metadata = *optMetadata;
if (metadata.isSharded() &&
- cm.getVersion().isOlderOrEqualThan(metadata.getCollVersion())) {
+ (cm.getVersion().isOlderThan(metadata.getCollVersion()) ||
+ (cm.getVersion() == metadata.getCollVersion() &&
+ cm.getVersion().getTimestamp() == metadata.getCollVersion().getTimestamp()))) {
LOGV2_DEBUG(
22063,
1,
@@ -472,7 +478,9 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
if (optMetadata) {
const auto& metadata = *optMetadata;
if (metadata.isSharded() &&
- cm.getVersion().isOlderOrEqualThan(metadata.getCollVersion())) {
+ (cm.getVersion().isOlderThan(metadata.getCollVersion()) ||
+ (cm.getVersion() == metadata.getCollVersion() &&
+ cm.getVersion().getTimestamp() == metadata.getCollVersion().getTimestamp()))) {
LOGV2_DEBUG(
22064,
1,
@@ -532,6 +540,7 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
}
auto refreshedDbInfo = uassertStatusOK(std::move(swRefreshedDbInfo));
+ const auto refreshedDBVersion = refreshedDbInfo.databaseVersion();
// First, check under a shared lock if another thread already updated the cached version.
// This is a best-effort optimization to make as few threads as possible to convoy on the
@@ -546,15 +555,16 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
const auto cachedDbVersion = dss->getDbVersion(opCtx, dssLock);
if (cachedDbVersion) {
// Do not reorder these two statements! if the comparison is done through epochs, the
- // construction order matters: we are pessimistically assuming that the client version
- // is newer when they have different uuids
+ // construction order matters: we are pessimistically assuming that the refreshed
+ // version is newer when they have different uuids
const ComparableDatabaseVersion comparableCachedDbVersion =
ComparableDatabaseVersion::makeComparableDatabaseVersion(*cachedDbVersion);
const ComparableDatabaseVersion comparableRefreshedDbVersion =
- ComparableDatabaseVersion::makeComparableDatabaseVersion(
- refreshedDbInfo.databaseVersion());
+ ComparableDatabaseVersion::makeComparableDatabaseVersion(refreshedDBVersion);
- if (comparableRefreshedDbVersion <= comparableCachedDbVersion) {
+ if (comparableRefreshedDbVersion < comparableCachedDbVersion ||
+ (comparableRefreshedDbVersion == comparableCachedDbVersion &&
+ cachedDbVersion->getTimestamp() == refreshedDBVersion.getTimestamp())) {
LOGV2_DEBUG(5369130,
2,
"Skipping updating cached database info from refreshed version "
diff --git a/src/mongo/s/database_version.h b/src/mongo/s/database_version.h
index 1bd95297328..546272ba625 100644
--- a/src/mongo/s/database_version.h
+++ b/src/mongo/s/database_version.h
@@ -45,6 +45,8 @@ namespace mongo {
*/
class DatabaseVersion : public DatabaseVersionBase {
public:
+ using DatabaseVersionBase::getTimestamp;
+
DatabaseVersion() = default;
explicit DatabaseVersion(const BSONObj& obj) {