summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnrico Golfieri <enrico.golfieri@mongodb.com>2023-02-07 09:48:59 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-07 12:19:07 +0000
commitf314903119bfe09ee0e54c7c00f7f814c370efb5 (patch)
tree1e4057970ad862549342e72e851b7ed2ce9b69a0
parent39d3dd4e28c188cb112a10039a783d6bacd7588e (diff)
downloadmongo-f314903119bfe09ee0e54c7c00f7f814c370efb5.tar.gz
SERVER-73052 ShardingCatalogClient::getShardsThatOwnData...AtClusterTime() methods need a richer data structure to describe the returned Placement Metadata
-rw-r--r--src/mongo/db/namespace_string.cpp4
-rw-r--r--src/mongo/db/namespace_string.h5
-rw-r--r--src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp4
-rw-r--r--src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp452
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h11
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp263
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h15
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h10
-rw-r--r--src/mongo/s/catalog/type_namespace_placement.idl17
-rw-r--r--src/mongo/s/request_types/get_historical_placement_info.idl18
11 files changed, 574 insertions, 233 deletions
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index b6c5b1c3d2b..2ba1c52a614 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -184,6 +184,10 @@ const NamespaceString NamespaceString::kShardCollectionCatalogNamespace(Namespac
const NamespaceString NamespaceString::kConfigsvrPlacementHistoryNamespace(
NamespaceString::kConfigDb, "placementHistory");
+const NamespaceString NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace("",
+ "",
+ boost::none);
+
const NamespaceString NamespaceString::kLockpingsNamespace(NamespaceString::kConfigDb, "lockpings");
const NamespaceString NamespaceString::kDistLocksNamepsace(NamespaceString::kConfigDb, "locks");
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 619a4e6a592..fc451951373 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -263,6 +263,11 @@ public:
// Namespace used for storing NamespacePlacementType docs on the CSRS.
static const NamespaceString kConfigsvrPlacementHistoryNamespace;
+ // Namespace value used to identify the "fcv marker entry" of
+ // kConfigsvrPlacementHistoryNamespace collection which marks the start or the end of a FCV
+ // upgrade/downgrade.
+ static const NamespaceString kConfigsvrPlacementHistoryFcvMarkerNamespace;
+
// TODO SERVER-68551: remove once 7.0 becomes last-lts
static const NamespaceString kLockpingsNamespace;
diff --git a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp
index 9e71224c7a0..9fc46c92577 100644
--- a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp
+++ b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp
@@ -69,8 +69,8 @@ public:
shardsWithOpTime.value.end(),
std::back_inserter(shardIds),
[](const ShardType& s) { return s.getName(); });
- ConfigsvrGetHistoricalPlacementResponse response(std::move(shardIds));
- response.setIsExact(false);
+ HistoricalPlacement historicalPlacement{std::move(shardIds), false};
+ ConfigsvrGetHistoricalPlacementResponse response(std::move(historicalPlacement));
return response;
}
diff --git a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp
index 8689c9ee77a..c273fc9d486 100644
--- a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp
+++ b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
+#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_namespace_placement_gen.h"
@@ -47,9 +48,7 @@ namespace {
class CatalogClientAggregationsTest : public ConfigServerTestFixture {
public:
struct PlacementDescriptor {
- PlacementDescriptor(Timestamp&& timestamp,
- std::string&& ns,
- std::vector<std::string>&& shardsIds)
+ PlacementDescriptor(Timestamp timestamp, std::string ns, std::vector<std::string> shardsIds)
: timestamp(std::move(timestamp)), ns(std::move(ns)), shardsIds(std::move(shardsIds)) {}
Timestamp timestamp;
@@ -127,19 +126,24 @@ private:
}
}; // CatalogClientAggregationsTest
-void assertSameShardSet(std::vector<ShardId>& retrievedSet,
- std::vector<std::string>&& expectedSet) {
+void assertSameHistoricalPlacement(HistoricalPlacement historicalPlacement,
+ std::vector<std::string>&& expectedSet,
+ bool expectedIsExact = true) {
+ auto retrievedSet = historicalPlacement.getShards();
ASSERT_EQ(retrievedSet.size(), expectedSet.size());
std::sort(retrievedSet.begin(), retrievedSet.end());
std::sort(expectedSet.begin(), expectedSet.end());
for (size_t i = 0; i < retrievedSet.size(); i++) {
ASSERT_EQ(retrievedSet[i], expectedSet[i]);
}
+ ASSERT_EQ(historicalPlacement.getIsExact(), expectedIsExact);
}
} // namespace
// ######################## PlacementHistory: Query by collection ##########################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_ShardedCollection) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory for a sharded collection should return the shards that owned the
* collection at the given clusterTime*/
auto opCtx = operationContext();
@@ -151,16 +155,16 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_S
setupConfigShard(opCtx, 4 /*nShards*/);
// 2 shards must own collection1
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2"});
// 2 shards must own collection2
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection2"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest,
@@ -178,10 +182,10 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 4 /*nShards*/);
// 3 shards must own collection1 at timestamp 4
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
}
TEST_F(CatalogClientAggregationsTest,
@@ -197,23 +201,25 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 3 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard1"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db2.collection"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard2"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard2"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db3.collection"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard3"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_DifferentTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Query the placementHistory at different timestamp should return different results*/
auto opCtx = operationContext();
@@ -227,33 +233,35 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_D
setupConfigShard(opCtx, 4 /*nShards*/);
// no shards at timestamp 0
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(0, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(2, 0));
- assertSameShardSet(shards, {"shard1", "shard2"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(5, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_SameTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Having different namespaces for the same timestamp should not influece the expected result*/
auto opCtx = operationContext();
@@ -267,24 +275,26 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_S
setupConfigShard(opCtx, 9 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection2"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1", "shard4", "shard5"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard4", "shard5"});
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db2.collection"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard6", "shard7", "shard8", "shard9"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard6", "shard7", "shard8", "shard9"});
}
TEST_F(CatalogClientAggregationsTest,
GetShardsThatOwnDataForCollAtClusterTime_InvertedTimestampOrder) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Ordering of document insertion into config.placementHistory must not matter*/
auto opCtx = operationContext();
@@ -297,14 +307,16 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 8 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest,
GetShardsThatOwnDataForCollAtClusterTime_ReturnPrimaryShardWhenNoShards) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report only the primary shard when an empty list of shards
* is reported for the collection*/
auto opCtx = operationContext();
@@ -318,19 +330,21 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 3 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection2"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"});
// Note: at timestamp 3 the collection's shard list is not empty
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection2"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_AddPrimaryShard) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report the primary shard in addition to the list of shards
* related to db.collection. Primary shards must always be returned*/
auto opCtx = operationContext();
@@ -343,20 +357,22 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_A
setupConfigShard(opCtx, 5 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(2, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
// Note: the primary shard is shard5 at timestamp 3
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard5", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard5", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest,
GetShardsThatOwnDataForCollAtClusterTime_AddPrimaryShardAtSameTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report the primary shard in addition to the list of shards
* related to db.collection. Primary shards must always be returned*/
auto opCtx = operationContext();
@@ -370,14 +386,63 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 8 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
+}
+
+TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_WithMarkers) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
+ auto opCtx = operationContext();
+ PlacementDescriptor _startFcvMarker = {
+ Timestamp(1, 0),
+ NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(),
+ {"shard1", "shard2", "shard3", "shard4", "shard5"}};
+ PlacementDescriptor _endFcvMarker = {
+ Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}};
+
+ // initialization
+ setupConfigPlacementHistory(
+ opCtx,
+ {_startFcvMarker,
+ {Timestamp(2, 0), "db.collection1", {"shard1", "shard2", "shard3"}},
+ {Timestamp(2, 0), "db", {"shard4"}},
+ {Timestamp(2, 0), "db.collection2", {"shard1", "shard2", "shard3"}},
+ _endFcvMarker});
+
+ // after initialization-
+ setupConfigPlacementHistory(
+ opCtx,
+ {{Timestamp(4, 0), "db", {"shard1"}},
+ {Timestamp(5, 0), "db.collection1", {"shard1", "shard2", "shard3"}},
+ {Timestamp(6, 0), "db.collection1", {}}});
+
+ setupConfigShard(opCtx, 4 /*nShards*/);
+
+ // Asking for a timestamp before the closing marker should return the shards from the first
+ // marker of the fcv upgrade. As result, "isExact" is expected to be false
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ opCtx, NamespaceString("db.collection1"), Timestamp(2, 0));
+ assertSameHistoricalPlacement(
+ historicalPlacement, {"shard1", "shard2", "shard3", "shard4", "shard5"}, false);
+
+ // Asking for a timestamp after the closing marker should return the expected shards
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ opCtx, NamespaceString("db.collection1"), Timestamp(3, 0));
+ assertSameHistoricalPlacement(
+ historicalPlacement, {"shard1", "shard2", "shard3", "shard4"}, true);
+
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ opCtx, NamespaceString("db.collection1"), Timestamp(6, 0));
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"}, true);
}
// ######################## PlacementHistory: Query by database ############################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_SingleDatabase) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report all the shards for every collection belonging to
* the input db*/
auto opCtx = operationContext();
@@ -389,13 +454,16 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Sin
setupConfigShard(opCtx, 5 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4", "shard5"});
+ assertSameHistoricalPlacement(historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard4", "shard5"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_MultipleDatabases) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report all the shards for every collection belonging to
* the input db*/
auto opCtx = operationContext();
@@ -409,23 +477,25 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Mul
setupConfigShard(opCtx, 7 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(5, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db2"), Timestamp(5, 0));
- assertSameShardSet(shards, {"shard4", "shard5", "shard6"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard4", "shard5", "shard6"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db3"), Timestamp(5, 0));
- assertSameShardSet(shards, {"shard7"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard7"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_DifferentTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Query the placementHistory at different timestamp should return different results*/
auto opCtx = operationContext();
@@ -439,33 +509,35 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Dif
setupConfigShard(opCtx, 4 /*nShards*/);
// no shards at timestamp 0
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(0, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(2, 0));
- assertSameShardSet(shards, {"shard1", "shard2"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(5, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_SameTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Having different namespaces for the same timestamp should not influece the expected result*/
auto opCtx = operationContext();
@@ -479,19 +551,22 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Sam
setupConfigShard(opCtx, 9 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4", "shard5"});
+ assertSameHistoricalPlacement(historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard4", "shard5"});
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db2"), Timestamp(1, 0));
- assertSameShardSet(shards, {"shard6", "shard7", "shard8", "shard9"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard6", "shard7", "shard8", "shard9"});
}
TEST_F(CatalogClientAggregationsTest,
GetShardsThatOwnDataForDbAtClusterTime_InvertedTimestampOrder) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Ordering of document insertion into config.placementHistory must not matter*/
auto opCtx = operationContext();
@@ -504,13 +579,15 @@ TEST_F(CatalogClientAggregationsTest,
setupConfigShard(opCtx, 8 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NoShardsForDb) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report no shards if the list of shards belonging to every
* collection and the db is empty*/
auto opCtx = operationContext();
@@ -524,19 +601,21 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NoS
setupConfigShard(opCtx, 3 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(4, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
// Note: at timestamp 3 the collection's shard list was not empty
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(3, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NewShardForDb) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must correctly identify a new primary for the db*/
auto opCtx = operationContext();
@@ -548,20 +627,69 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_New
setupConfigShard(opCtx, 4 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(2, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
// At timestamp 3 the db shard list was updated with a new primary
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ opCtx, NamespaceString("db"), Timestamp(3, 0));
+
+ assertSameHistoricalPlacement(historicalPlacement, {"shard4", "shard1", "shard2", "shard3"});
+}
+
+TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_WithMarkers) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
+ auto opCtx = operationContext();
+ PlacementDescriptor _startFcvMarker = {
+ Timestamp(1, 0),
+ NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(),
+ {"shard1", "shard2", "shard3", "shard4", "shard5"}};
+ PlacementDescriptor _endFcvMarker = {
+ Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}};
+
+ // initialization
+ setupConfigPlacementHistory(
+ opCtx,
+ {_startFcvMarker,
+ {Timestamp(2, 0), "db.collection1", {"shard1", "shard2", "shard3"}},
+ {Timestamp(2, 0), "db", {"shard4"}},
+ {Timestamp(2, 0), "db.collection2", {"shard1", "shard2", "shard3"}},
+ _endFcvMarker});
+
+ // after initialization-
+ setupConfigPlacementHistory(
+ opCtx,
+ {{Timestamp(4, 0), "db", {"shard1"}},
+ {Timestamp(5, 0), "db.collection1", {"shard1", "shard2", "shard3"}},
+ {Timestamp(6, 0), "db.collection1", {}}});
+
+ setupConfigShard(opCtx, 4 /*nShards*/);
+
+ // Asking for a timestamp before the closing marker should return the shards from the first
+ // marker of the fcv upgrade. As result, "isExact" is expected to be false
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ opCtx, NamespaceString("db"), Timestamp(2, 0));
+ assertSameHistoricalPlacement(
+ historicalPlacement, {"shard1", "shard2", "shard3", "shard4", "shard5"}, false);
+
+ // Asking for a timestamp after the closing marker should return the expected shards
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(3, 0));
+ assertSameHistoricalPlacement(
+ historicalPlacement, {"shard1", "shard2", "shard3", "shard4"}, true);
- assertSameShardSet(shards, {"shard4", "shard1", "shard2", "shard3"});
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ opCtx, NamespaceString("db"), Timestamp(7, 0));
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"}, true);
}
// ######################## PlacementHistory: Query the entire cluster ##################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SingleDatabase) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report all the shards for every collection and db*/
auto opCtx = operationContext();
@@ -572,12 +700,16 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SingleDa
setupConfigShard(opCtx, 5 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(3, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(3, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4", "shard5"});
+ assertSameHistoricalPlacement(historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard4", "shard5"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_MultipleDatabases) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report all the shards for every collection and db*/
auto opCtx = operationContext();
@@ -590,13 +722,17 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Multiple
setupConfigShard(opCtx, 7 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(5, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(5, 0));
- assertSameShardSet(shards,
- {"shard1", "shard2", "shard3", "shard4", "shard5", "shard6", "shard7"});
+ assertSameHistoricalPlacement(
+ historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard4", "shard5", "shard6", "shard7"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_DifferentTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Query the placementHistory at different timestamp should return different results*/
auto opCtx = operationContext();
@@ -610,28 +746,35 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Differen
setupConfigShard(opCtx, 4 /*nShards*/);
// no shards at timestamp 0
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(0, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(0, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
- shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(1, 0));
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(1, 0));
- assertSameShardSet(shards, {"shard1"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1"});
- shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(2, 0));
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(2, 0));
- assertSameShardSet(shards, {"shard1", "shard2"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2"});
- shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
- shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(5, 0));
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(5, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard4"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3", "shard4"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SameTimestamp) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Having different namespaces for the same timestamp should not influence the expected
* result*/
auto opCtx = operationContext();
@@ -646,14 +789,17 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SameTime
setupConfigShard(opCtx, 9 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(1, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(1, 0));
- assertSameShardSet(
- shards,
+ assertSameHistoricalPlacement(
+ historicalPlacement,
{"shard1", "shard2", "shard3", "shard4", "shard5", "shard6", "shard7", "shard8", "shard9"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_InvertedTimestampOrder) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Ordering of document insertion into config.placementHistory must not matter*/
auto opCtx = operationContext();
@@ -666,13 +812,17 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Inverted
setupConfigShard(opCtx, 8 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
- assertSameShardSet(
- shards, {"shard1", "shard2", "shard3", "shard4", "shard5", "shard6", "shard7", "shard8"});
+ assertSameHistoricalPlacement(
+ historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard4", "shard5", "shard6", "shard7", "shard8"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_NoShards) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must report no shards if the list of shards belonging to
* every db.collection and db is empty*/
auto opCtx = operationContext();
@@ -686,18 +836,67 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_NoShards
setupConfigShard(opCtx, 3 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
// Note: at timestamp 3 the collection was still sharded
- shards = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(3, 0));
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(3, 0));
+
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
+}
+
+TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_WithMarkers) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
+ auto opCtx = operationContext();
+ PlacementDescriptor _startFcvMarker = {
+ Timestamp(1, 0),
+ NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(),
+ {"shard1", "shard2", "shard3", "shard4"}};
+ PlacementDescriptor _endFcvMarker = {
+ Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}};
+
+ // initialization
+ setupConfigPlacementHistory(
+ opCtx,
+ {_startFcvMarker,
+ {Timestamp(2, 0), "db.collection1", {"shard1", "shard2", "shard3"}},
+ {Timestamp(2, 0), "db", {"shard1"}},
+ {Timestamp(2, 0), "db.collection2", {"shard2"}},
+ _endFcvMarker});
+
+ // after initialization-
+ setupConfigPlacementHistory(
+ opCtx,
+ {{Timestamp(4, 0), "db", {"shard2"}},
+ {Timestamp(5, 0), "db.collection2", {"shard1", "shard2", "shard3"}}});
+
+ setupConfigShard(opCtx, 3 /*nShards*/);
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ // Asking for a timestamp before the closing marker should return the shards from the first
+ // marker of the fcv upgrade
+ auto historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(2, 0));
+ assertSameHistoricalPlacement(
+ historicalPlacement, {"shard1", "shard2", "shard3", "shard4"}, false);
+
+ // Asking for a timestamp after the closing marker should return the expected shards
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(3, 0));
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"}, true);
+
+ historicalPlacement =
+ catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(5, 0));
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"}, true);
}
// ######################## PlacementHistory: Regex Stage #####################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_ConfigSystem) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*The regex stage must match correctly the config.system.namespaces collection*/
auto opCtx = operationContext();
@@ -710,13 +909,15 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta
setupConfigShard(opCtx, 5 /*nShards*/);
// testing config.system.collections
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("config.system.collections"), Timestamp(7, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_NssWithPrefix) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*The regex stage must match correctly the input namespaces*/
auto opCtx = operationContext();
@@ -740,31 +941,34 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta
setupConfigShard(opCtx, 9 /*nShards*/);
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(12, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
// no data must be returned since the namespace is not found
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("d.collection1"), Timestamp(12, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
// database exists
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(12, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3", "shard7", "shard8", "shard9"});
+ assertSameHistoricalPlacement(historicalPlacement,
+ {"shard1", "shard2", "shard3", "shard7", "shard8", "shard9"});
// database does not exist
- shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("d"), Timestamp(12, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
}
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_DbWithSymbols) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*The regex stage must correctly escape special character*/
auto opCtx = operationContext();
@@ -786,20 +990,22 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta
setupConfigShard(opCtx, 14 /*nShards*/);
// db|db , db*db etc... must not be found when quering by database
- auto shards = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(10, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
// db|db , db*db etc... must not be found when quering by collection
- shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection"), Timestamp(10, 0));
- assertSameShardSet(shards, {"shard1", "shard2", "shard3"});
+ assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"});
}
// ######################## PlacementHistory: SnapshotTooOld #####################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SnapshotTooOld) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Quering the placementHistory must throw SnapshotTooOld when the returned list of shards
contains at least one shard no longer active*/
auto opCtx = operationContext();
@@ -829,29 +1035,33 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Snapshot
// ######################## PlacementHistory: EmptyHistory #####################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_EmptyHistory) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
// Quering an empty placementHistory must return an empty vector
auto opCtx = operationContext();
// no shards must be returned
- auto shards = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
+ auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime(
opCtx, NamespaceString("db.collection1"), Timestamp(4, 0));
- assertSameShardSet(shards, {});
+ assertSameHistoricalPlacement(historicalPlacement, {});
// no shards must be returned
- auto shards2 = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
+ auto historicalPlacement2 = catalogClient()->getShardsThatOwnDataForDbAtClusterTime(
opCtx, NamespaceString("db"), Timestamp(4, 0));
- ASSERT_EQ(0U, shards2.size());
+ ASSERT_EQ(0U, historicalPlacement.getShards().size());
// no shards must be returned
auto shards3 = catalogClient()->getShardsThatOwnDataAtClusterTime(opCtx, Timestamp(4, 0));
- ASSERT_EQ(0U, shards3.size());
+ ASSERT_EQ(0U, historicalPlacement.getShards().size());
}
// ######################## PlacementHistory: InvalidOptions #####################
TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_InvalidOptions) {
+ RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{
+ "featureFlagHistoricalPlacementShardingCatalog", true};
/*Testing input validation*/
auto opCtx = operationContext();
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index e37a6f830f6..36fa1290999 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -42,6 +42,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/index_version.h"
+#include "mongo/s/request_types/get_historical_placement_info_gen.h"
namespace mongo {
@@ -342,7 +343,7 @@ public:
* that used to contain data for the specified collection at clusterTime >= input clusterTime
* based on placementHistory
*/
- virtual std::vector<ShardId> getShardsThatOwnDataForCollAtClusterTime(
+ virtual HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime(
OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) = 0;
/**
@@ -350,15 +351,15 @@ public:
* that used to contain data for the specified database at clusterTime >= input clusterTime
* based on placementHistory
*/
- virtual std::vector<ShardId> getShardsThatOwnDataForDbAtClusterTime(
+ virtual HistoricalPlacement getShardsThatOwnDataForDbAtClusterTime(
OperationContext* opCtx, const NamespaceString& dbName, const Timestamp& clusterTime) = 0;
/**
* Returns the list of active shards that still contains data or that used to contain data
* at clusterTime >= input clusterTime based on placementHistory
*/
- virtual std::vector<ShardId> getShardsThatOwnDataAtClusterTime(
- OperationContext* opCtx, const Timestamp& clusterTime) = 0;
+ virtual HistoricalPlacement getShardsThatOwnDataAtClusterTime(OperationContext* opCtx,
+ const Timestamp& clusterTime) = 0;
/**
* Queries config.placementHistory to retrieve placement metadata on the requested namespace at
@@ -368,7 +369,7 @@ public:
*
* TODO (SERVER-73029): convert to private method of ShardingCatalogClientImpl
*/
- virtual std::vector<ShardId> getHistoricalPlacement(
+ virtual HistoricalPlacement getHistoricalPlacement(
OperationContext* opCtx,
const Timestamp& atClusterTime,
const boost::optional<NamespaceString>& nss) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index c79e9c314ba..78c89f29d1f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/pipeline/aggregate_command_gen.h"
#include "mongo/db/pipeline/document_source_add_fields.h"
+#include "mongo/db/pipeline/document_source_facet.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/pipeline/document_source_match.h"
@@ -61,6 +62,7 @@
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/catalog/type_database_gen.h"
+#include "mongo/s/catalog/type_namespace_placement_gen.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard.h"
@@ -495,7 +497,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
}
}
-std::vector<ShardId> ShardingCatalogClientImpl::_fetchPlacementMetadata(
+HistoricalPlacement ShardingCatalogClientImpl::_fetchPlacementMetadata(
OperationContext* opCtx, ConfigsvrGetHistoricalPlacement&& request) {
auto remoteResponse = uassertStatusOK(_getConfigShard(opCtx)->runCommandWithFixedRetryAttempts(
opCtx,
@@ -510,7 +512,7 @@ std::vector<ShardId> ShardingCatalogClientImpl::_fetchPlacementMetadata(
auto placementDetails = ConfigsvrGetHistoricalPlacementResponse::parse(
IDLParserContext("ShardingCatalogClient"), remoteResponse.response);
- return placementDetails.getShards();
+ return placementDetails.getHistoricalPlacement();
}
CollectionType ShardingCatalogClientImpl::getCollection(OperationContext* opCtx,
@@ -1273,7 +1275,7 @@ StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNe
}
-std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataForCollAtClusterTime(
+HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForCollAtClusterTime(
OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) {
uassert(ErrorCodes::InvalidOptions,
@@ -1288,7 +1290,7 @@ std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataForCollAtClu
}
-std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtClusterTime(
+HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtClusterTime(
OperationContext* opCtx, const NamespaceString& dbName, const Timestamp& clusterTime) {
uassert(ErrorCodes::InvalidOptions,
@@ -1302,7 +1304,7 @@ std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtClust
return _fetchPlacementMetadata(opCtx, ConfigsvrGetHistoricalPlacement(dbName, clusterTime));
}
-std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataAtClusterTime(
+HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataAtClusterTime(
OperationContext* opCtx, const Timestamp& clusterTime) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
@@ -1314,7 +1316,7 @@ std::vector<ShardId> ShardingCatalogClientImpl::getShardsThatOwnDataAtClusterTim
return _fetchPlacementMetadata(opCtx, std::move(request));
}
-std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
+HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement(
OperationContext* opCtx,
const Timestamp& atClusterTime,
const boost::optional<NamespaceString>& nss) {
@@ -1323,66 +1325,141 @@ std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
auto configShard = _getConfigShard(opCtx);
/*
- Stage 1. Select only the entry with timestamp <= clusterTime and filter out all nss that are
- not the collection or the database
+ The aggregation pipeline is split in 2 sub pipelines:
+ - one pipeline "exactPlacementData" describing the list of shards currently active in the
+ cluster in which the data belonging to the input nss were placed at the given clusterTime. The
+ primary shard is always included in the list. In case the input nss is empty, the list of shards
+ includes all the shards in the cluster containing data at the given clusterTime. Stages can be
+ described as follow:
+ Stage 1. Select only the entry with timestamp <= clusterTime and filter out
+ all nss that are not the collection or the database using a regex. We also esclude all the
+ entries related to the fcv marker. In case the whole cluster info is searched, we filter any nss
+ with at least 1 caracter
Stage 2. sort by timestamp
Stage 3. Extract the first document for each database and collection matching the received
- namespace
+ namespace
Stage 4. Discard the entries with empty shards (i.e. the collection was dropped or
- renamed)
+ renamed)
Stage 5. Group all documents and concat shards (this will generate an array of arrays)
- Stage 6. Flatten the array of arrays into a set (this will also remove duplicates)
-
+ Stage 6. Flatten the array of arrays into a set
+ (this will also remove duplicates)
Stage 7. Access to the list of shards currently active in the cluster
Stage 8. Count the number of shards obtained on stage 6 that also appear in the list of
- active shards
+ active shards
Stage 9. Do not return the list of active shards (used only for the count)
- regex=^db(\.collection)?$ // matches db or db.collection ( this is skipped in case the whole
- cluster info is searched)
- [
- {
- "$match": { "timestamp": { "$lte": clusterTime } , "nss" : { $regex: regex} }
- },
- {
- "$sort": { "timestamp": -1 }
- },
- {
+ - one pipeline "approximatePlacementData" retreiving the last "marker" which is a special entry
+ where the nss is empty and the list of shard can be either empty or not.
+ - In case the list is not empty: it means the clusterTime requested was during an fcv
+ upgrade/downgrade. Thus we cannot guarantee the result of 'exactPlacementData' to
+ be correct. We therefore report the list of shards present in the "marker" entry, which
+ correspond to the list of shards in the cluster at the time the fcv upgrade/downgrade started.
+ - The pipeline selects only the fcv markers, sorts by decreasing timestamp and gets the
+ first element.
+
+ regex=^db(\.collection)?$ // matches db or db.collection
+ {
+ "$facet": {
+ "exactPlacementData": [
+ {
+ "$match": {
+ "timestamp": {
+ "$lte": <clusterTime>
+ },
+ "nss": {
+ $regex: regex
+ }
+ }
+ },
+ {
+ "$sort": {
+ "timestamp": -1
+ }
+ },
+ {
"$group": {
_id: "$nss",
- shards: { $first: "$shards" }
+ shards: {
+ $first: "$shards"
+ }
+ }
+ },
+ {
+ "$match": {
+ shards: {
+ $not: {
+ $size: 0
+ }
+ }
}
- },
- { "$match": { shards: { $not: { $size: 0 } } } },
- {
+ },
+ {
"$group": {
_id: "",
- shards: { $push: "$shards" }
+ shards: {
+ $push: "$shards"
+ }
}
- },
- {
+ },
+ {
$project: {
"shards": {
- $reduce: {
- input: "$shards",
- initialValue: [],
- in: { "$setUnion": [ "$$this", "$$value"] }
- }
+ $reduce: {
+ input: "$shards",
+ initialValue: [],
+ in: {
+ "$setUnion": [
+ "$$this",
+ "$$value"
+ ]
+ }
+ }
}
}
- },
- {
- $lookup:
+ },
{
+ $lookup: {
from: "shards",
localField: "shards",
foreignField: "_id",
as: "activeShards"
}
- },
- { "$set" : { "numActiveShards" : { "$size" : "$activeShards" } } },
- { "$project": { "activeShards" : 0 } }
- ]
+ },
+ {
+ "$set": {
+ "numActiveShards": {
+ "$size": "$activeShards"
+ }
+ }
+ },
+ {
+ "$project": {
+ "activeShards": 0,
+ "_id": 0
+ }
+ }
+ ],
+ "approximatePlacementData": [
+ {
+ "$match": {
+ "timestamp": {
+ "$lte": Timestamp(3, 0)
+ },
+ "nss":
+ }
+ },
+ {
+ "$sort": {
+ "timestamp": -1
+ }
+ },
+ {
+ "$limit": 1
+ }
+ }
+ ]
+ }
+
*/
auto expCtx = make_intrusive<ExpressionContext>(
@@ -1395,23 +1472,25 @@ std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
std::vector<BSONObj>() /* pipeline */};
expCtx->setResolvedNamespaces(resolvedNamespaces);
+ // Build the pipeline for the exact placement data.
// 1. Get all the history entries prior to the requested time concerning either the collection
// or the parent database.
-
-
+ const auto& kMarkerNss = NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns();
auto matchStage = [&]() {
bool isClusterSearch = !nss.has_value();
if (isClusterSearch)
- return DocumentSourceMatch::create(BSON("timestamp" << BSON("$lte" << atClusterTime)),
+ return DocumentSourceMatch::create(BSON("nss" << BSON("$ne" << kMarkerNss)
+ << "timestamp"
+ << BSON("$lte" << atClusterTime)),
expCtx);
bool isCollectionSearch = !nss->db().empty() && !nss->coll().empty();
auto collMatchExpression = isCollectionSearch ? pcre_util::quoteMeta(nss->coll()) : ".*";
auto regexString =
"^" + pcre_util::quoteMeta(nss->db()) + "(\\." + collMatchExpression + ")?$";
- return DocumentSourceMatch::create(BSON("timestamp" << BSON("$lte" << atClusterTime)
- << "nss"
- << BSON("$regex" << regexString)),
+ return DocumentSourceMatch::create(BSON("nss" << BSON("$regex" << regexString)
+ << "timestamp"
+ << BSON("$lte" << atClusterTime)),
expCtx);
}();
@@ -1467,10 +1546,9 @@ std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
// Stage 9. Disable activeShards field to avoid sending it to the client
auto projectStageDoc = Document{{"activeShards", 0}};
- auto projectStage = DocumentSourceProject::create(
- projectStageDoc.toBson(), expCtx, "getShardsThatOwnDataForNamespaceAtClusterTime");
+ auto projectStageHideActiveShards = DocumentSourceProject::createFromBson(
+ Document{{"$project", projectStageDoc.toBson()}}.toBson().firstElement(), expCtx);
- // Create pipeline
Pipeline::SourceContainer stages;
stages.emplace_back(std::move(matchStage));
stages.emplace_back(std::move(sortStage));
@@ -1480,9 +1558,29 @@ std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
stages.emplace_back(std::move(projectStageFlatten));
stages.emplace_back(std::move(lookupStage));
stages.emplace_back(std::move(setStage));
- stages.emplace_back(std::move(projectStage));
-
- const auto pipeline = Pipeline::create(stages, expCtx);
+ stages.emplace_back(std::move(projectStageHideActiveShards));
+ auto exactDataPipeline = Pipeline::create(stages, expCtx);
+
+ // Build the pipeline for the approximate data.
+ auto MatchFcvMarkerStage = DocumentSourceMatch::create(
+ BSON("timestamp" << BSON("$lte" << atClusterTime) << "nss" << kMarkerNss), expCtx);
+ auto SortFcvMarkerStage = DocumentSourceSort::create(expCtx, BSON("timestamp" << -1));
+ auto LimitFcvMarkerStage = DocumentSourceLimit::create(expCtx, 1);
+ Pipeline::SourceContainer stages2;
+ stages2.emplace_back(std::move(MatchFcvMarkerStage));
+ stages2.emplace_back(std::move(SortFcvMarkerStage));
+ stages2.emplace_back(std::move(LimitFcvMarkerStage));
+ auto approximateDataPipeline = Pipeline::create(stages2, expCtx);
+
+
+ // Build the facet pipeline
+ auto facetStageBson = BSON("approximatePlacementData"
+ << approximateDataPipeline->serializeToBson() << "exactPlacementData"
+ << exactDataPipeline->serializeToBson());
+ auto facetStage = DocumentSourceFacet::createFromBson(
+ Document{{"$facet", std::move(facetStageBson)}}.toBson().firstElement(), expCtx);
+
+ const auto pipeline = Pipeline::create({facetStage}, expCtx);
auto aggRequest = AggregateCommandRequest(NamespaceString::kConfigsvrPlacementHistoryNamespace,
pipeline->serializeToBson());
@@ -1503,27 +1601,46 @@ std::vector<ShardId> ShardingCatalogClientImpl::getHistoricalPlacement(
auto aggrResult = runCatalogAggregation(
opCtx, configShard, aggRequest, readConcern, Shard::kDefaultConfigCommandTimeout);
- // Parse the result
- std::vector<ShardId> activeShards;
- if (!aggrResult.empty()) {
- invariant(aggrResult.size() == 1);
-
- // Extract the result
- const auto& doc = aggrResult.front();
- auto numActiveShards = doc.getField("numActiveShards").Int();
- // Use Obj() instead of Array() to avoid instantiating a temporary std::vector.
- const auto& shards = doc.getField("shards").Obj();
-
- uassert(ErrorCodes::SnapshotTooOld,
- "Part of the history may no longer be retrieved because of one or more removed "
- "shards.",
- numActiveShards == static_cast<int>(shards.nFields()));
-
- for (const auto& shard : shards) {
- activeShards.push_back(shard.String());
+ auto extractShardIds = [](const BSONObj& obj, const std::string& pipelineName) {
+ // each sub-pipeline of $facet produces an array with a single element containing a 'shards'
+ // field. for this aggregation, every pipeline result is an array of one element
+ auto pipelineResult = obj[pipelineName].Array();
+ if (pipelineResult.empty()) {
+ return std::vector<ShardId>{};
+ } else {
+ auto shards = pipelineResult[0]["shards"].Obj();
+ std::vector<ShardId> activeShards;
+ for (const auto& shard : shards) {
+ activeShards.push_back(shard.String());
+ }
+ return activeShards;
}
+ };
+
+ invariant(aggrResult.size() == 1);
+ // if there is an fcv marker and the shards array is not empty, return the shards
+ // array, declaring the retrieved data as "not exact".
+ auto fcvMarkerShards = extractShardIds(aggrResult.front(), "approximatePlacementData");
+ if (!fcvMarkerShards.empty()) {
+ return HistoricalPlacement{fcvMarkerShards, false};
+ }
+
+ // if the fcv marker shards array is empty, return the shards array from the exact data
+ auto exactShards = extractShardIds(aggrResult.front(), "exactPlacementData");
+ if (exactShards.empty()) {
+ return HistoricalPlacement{{}, true};
}
- return activeShards;
+
+ // check that the shards in the exact data are all active shards
+ const int numActiveShards =
+ aggrResult.front()["exactPlacementData"].Array()[0]["numActiveShards"].Int();
+
+ uassert(ErrorCodes::SnapshotTooOld,
+ "Part of the history may no longer be retrieved because of one or more removed "
+ "shards.",
+ numActiveShards == static_cast<int>(exactShards.size()));
+
+ return HistoricalPlacement{exactShards, true};
}
std::shared_ptr<Shard> ShardingCatalogClientImpl::_getConfigShard(OperationContext* opCtx) {
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 6bd6f16bcc3..34b535000f0 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -34,7 +34,6 @@
#include "mongo/platform/mutex.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/request_types/get_historical_placement_info_gen.h"
namespace mongo {
@@ -181,7 +180,7 @@ public:
3. An empty array if the collection and the database are not found
* In case at least one of the shard is no longer active, a SnapshotTooOld error is thrown.
*/
- std::vector<ShardId> getShardsThatOwnDataForCollAtClusterTime(
+ HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime(
OperationContext* opCtx,
const NamespaceString& collName,
const Timestamp& clusterTime) override;
@@ -193,7 +192,7 @@ public:
2. An empty array if the collection and the database are not found
* In case at least one of the shard is no longer active, a SnapshotTooOld error is thrown.
*/
- std::vector<ShardId> getShardsThatOwnDataForDbAtClusterTime(
+ HistoricalPlacement getShardsThatOwnDataForDbAtClusterTime(
OperationContext* opCtx,
const NamespaceString& dbName,
const Timestamp& clusterTime) override;
@@ -202,10 +201,10 @@ public:
* Returns the list of active shards that still contains data or that used to contain data
* at clusterTime >= input clusterTime based on placementHistory
*/
- std::vector<ShardId> getShardsThatOwnDataAtClusterTime(OperationContext* opCtx,
- const Timestamp& clusterTime) override;
+ HistoricalPlacement getShardsThatOwnDataAtClusterTime(OperationContext* opCtx,
+ const Timestamp& clusterTime) override;
- std::vector<ShardId> getHistoricalPlacement(
+ HistoricalPlacement getHistoricalPlacement(
OperationContext* opCtx,
const Timestamp& atClusterTime,
const boost::optional<NamespaceString>& nss) override;
@@ -257,8 +256,8 @@ private:
* TODO (SERVER-73029): Remove the method - and replace its invocations with
* runPlacementHistoryAggregation()
*/
- std::vector<ShardId> _fetchPlacementMetadata(OperationContext* opCtx,
- ConfigsvrGetHistoricalPlacement&& request);
+ HistoricalPlacement _fetchPlacementMetadata(OperationContext* opCtx,
+ ConfigsvrGetHistoricalPlacement&& request);
/**
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 60feb3833a3..b803b561185 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -209,22 +209,22 @@ ShardingCatalogClientMock::_exhaustiveFindOnConfig(OperationContext* opCtx,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-std::vector<ShardId> ShardingCatalogClientMock::getShardsThatOwnDataForCollAtClusterTime(
+HistoricalPlacement ShardingCatalogClientMock::getShardsThatOwnDataForCollAtClusterTime(
OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) {
uasserted(ErrorCodes::InternalError, "Method not implemented");
}
-std::vector<ShardId> ShardingCatalogClientMock::getShardsThatOwnDataForDbAtClusterTime(
+HistoricalPlacement ShardingCatalogClientMock::getShardsThatOwnDataForDbAtClusterTime(
OperationContext* opCtx, const NamespaceString& dbName, const Timestamp& clusterTime) {
uasserted(ErrorCodes::InternalError, "Method not implemented");
}
-std::vector<ShardId> ShardingCatalogClientMock::getShardsThatOwnDataAtClusterTime(
+HistoricalPlacement ShardingCatalogClientMock::getShardsThatOwnDataAtClusterTime(
OperationContext* opCtx, const Timestamp& clusterTime) {
uasserted(ErrorCodes::InternalError, "Method not implemented");
}
-std::vector<ShardId> ShardingCatalogClientMock::getHistoricalPlacement(
+HistoricalPlacement ShardingCatalogClientMock::getHistoricalPlacement(
OperationContext* opCtx,
const Timestamp& atClusterTime,
const boost::optional<NamespaceString>& nss) {
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 30cf2ed0826..eb88d04989b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -149,20 +149,20 @@ public:
const LogicalTime& newerThanThis,
repl::ReadConcernLevel readConcernLevel) override;
- std::vector<ShardId> getShardsThatOwnDataForCollAtClusterTime(
+ HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime(
OperationContext* opCtx,
const NamespaceString& collName,
const Timestamp& clusterTime) override;
- std::vector<ShardId> getShardsThatOwnDataForDbAtClusterTime(
+ HistoricalPlacement getShardsThatOwnDataForDbAtClusterTime(
OperationContext* opCtx,
const NamespaceString& dbName,
const Timestamp& clusterTime) override;
- std::vector<ShardId> getShardsThatOwnDataAtClusterTime(OperationContext* opCtx,
- const Timestamp& clusterTime) override;
+ HistoricalPlacement getShardsThatOwnDataAtClusterTime(OperationContext* opCtx,
+ const Timestamp& clusterTime) override;
- std::vector<ShardId> getHistoricalPlacement(
+ HistoricalPlacement getHistoricalPlacement(
OperationContext* opCtx,
const Timestamp& atClusterTime,
const boost::optional<NamespaceString>& nss) override;
diff --git a/src/mongo/s/catalog/type_namespace_placement.idl b/src/mongo/s/catalog/type_namespace_placement.idl
index 70efc024470..f745bbf1d09 100644
--- a/src/mongo/s/catalog/type_namespace_placement.idl
+++ b/src/mongo/s/catalog/type_namespace_placement.idl
@@ -61,3 +61,20 @@ structs:
description: "The list of shard IDs containing one of more chunks of 'nss'
at time 'timestamp' (may be empty)."
+ HistoricalPlacement:
+ description: "Result of the placement search query at a point in time over the
+ config.placementHistory"
+ strict: false
+ fields:
+ shards:
+ type: array<shard_id>
+ description: "The set of shard IDs containing data on the requested nss/cluster at
+ the point in time"
+ isExact:
+ type: bool
+ description: "When true, the returned list of shards is an accurate recording of
+ the placement info at the requested point in time.
+ When false, the result value represents an approximation based on
+ a present/past reading of config.shards"
+
+
diff --git a/src/mongo/s/request_types/get_historical_placement_info.idl b/src/mongo/s/request_types/get_historical_placement_info.idl
index aca7f49a4cf..c2e6026e93e 100644
--- a/src/mongo/s/request_types/get_historical_placement_info.idl
+++ b/src/mongo/s/request_types/get_historical_placement_info.idl
@@ -33,27 +33,15 @@ global:
imports:
- "mongo/db/basic_types.idl"
- "mongo/s/sharding_types.idl"
-
+ - "mongo/s/catalog/type_namespace_placement.idl"
+
structs:
-
ConfigsvrGetHistoricalPlacementResponse:
description: "Response for the _configsvrGetHistoricalPlacement command"
strict: false
is_command_reply: true
fields:
- shards:
- type: array<shard_id>
- description: "The set of shard IDs containing data on the requested nss/cluster at
- the point in time"
- isExact:
- type: bool
- description: "When true, the returned list of shards is an accurate recording of
- the placement info at the requested point in time.
- When false, the result value represents an approximation based on
- a present/past reading of config.shards"
- default: true
-
-
+ historicalPlacement: HistoricalPlacement
commands:
_configsvrGetHistoricalPlacement: