summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-09-27 08:11:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-27 08:41:57 +0000
commit98edae346676d65c402467f38f4563f1aa163bce (patch)
treeadeb90e1b8c8654508d6b679baad2ee4ae335f9c
parent2cfc78ff27a7ed95e7632f7749f8d09383be308b (diff)
downloadmongo-98edae346676d65c402467f38f4563f1aa163bce.tar.gz
SERVER-68926 Generate historical placement data for createDatabase
-rw-r--r--jstests/sharding/store_historical_placement_data.js39
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp57
2 files changed, 82 insertions, 14 deletions
diff --git a/jstests/sharding/store_historical_placement_data.js b/jstests/sharding/store_historical_placement_data.js
new file mode 100644
index 00000000000..65a02f70f2a
--- /dev/null
+++ b/jstests/sharding/store_historical_placement_data.js
@@ -0,0 +1,39 @@
+/*
+ * The test verifies that each Sharding DDL operation that gets successfully completed
+ * also produces a document detailing the changes to the placement of the targeted nss.
+ *
+ *
+ */
+(function() {
+"use strict";
+load("jstests/libs/feature_flag_util.js");
+
+const st = new ShardingTest({shards: 2});
+// TODO SERVER-69106 remove the logic to skip the test execution
+const historicalPlacementDataFeatureFlag = FeatureFlagUtil.isEnabled(
+ st.configRS.getPrimary().getDB('admin'), "HistoricalPlacementShardingCatalog");
+if (!historicalPlacementDataFeatureFlag) {
+ jsTestLog("Skipping as featureFlagHistoricalPlacementShardingCatalog is disabled");
+ st.stop();
+ return;
+}
+
+const dbName = 'test';
+const configDB = st.s.getDB('config');
+
+jsTest.log('Verifying placement data generated by createDatabase()');
+assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName}));
+
+const placementHistoryEntries = configDB.placementHistory.find().toArray();
+assert.eq(placementHistoryEntries.length, 1);
+const placementDetails = placementHistoryEntries[0];
+const databaseEntries = configDB.databases.find({_id: placementDetails.nss}).toArray();
+assert.eq(1, databaseEntries.length);
+const databaseDetails = databaseEntries[0];
+assert(timestampCmp(databaseDetails.version.timestamp, placementDetails.timestamp) == 0);
+assert.eq(1, placementDetails.shards.length);
+assert.eq(databaseDetails.primary, placementDetails.shards[0]);
+
+st.stop();
+}());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 95328a9b248..868e94b1a77 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/write_concern.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_database_gen.h"
+#include "mongo/s/catalog/type_namespace_placement_gen.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/grid.h"
@@ -152,13 +153,14 @@ DatabaseType ShardingCatalogManager::createDatabase(
const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- // Check if a database already exists with the same name (case sensitive), and if so, return the
- // existing entry.
+ // Check if a database already exists with the same name (case insensitive), and if so, return
+ // the existing entry.
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
DatabaseType::kNameFieldName, "^{}$"_format(pcre_util::quoteMeta(dbName)), "i");
auto dbDoc = client.findOne(NamespaceString::kConfigDatabasesNamespace, queryBuilder.obj());
+ bool waitForMajorityAfterAccessingCatalog = true;
auto const [primaryShardPtr, database] = [&] {
if (!dbDoc.isEmpty()) {
auto actualDb = DatabaseType::parse(IDLParserContext("DatabaseType"), dbDoc);
@@ -204,23 +206,50 @@ DatabaseType ShardingCatalogManager::createDatabase(
"Registering new database in sharding catalog",
"db"_attr = db);
- // Do this write with majority writeConcern to guarantee that the shard sees the write
- // when it receives the _flushDatabaseCacheUpdates.
- uassertStatusOK(
- catalogClient->insertConfigDocument(opCtx,
- NamespaceString::kConfigDatabasesNamespace,
- db.toBSON(),
- ShardingCatalogClient::kMajorityWriteConcern));
+ if (feature_flags::gHistoricalPlacementShardingCatalog.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
+ NamespacePlacementType placementInfo(
+ NamespaceString(dbName),
+ clusterTime,
+ std::vector<mongo::ShardId>{shardPtr->getId()});
+ withTransaction(
+ opCtx,
+ NamespaceString::kConfigDatabasesNamespace,
+ [&](OperationContext* opCtx, TxnNumber txnNumber) {
+ insertConfigDocuments(opCtx,
+ NamespaceString::kConfigDatabasesNamespace,
+ {db.toBSON()},
+ txnNumber);
+ insertConfigDocuments(opCtx,
+ NamespaceString::kConfigsvrPlacementHistoryNamespace,
+ {placementInfo.toBSON()},
+ txnNumber);
+ });
+
+ // Skip the wait for majority; the transaction semantics already guarantee the
+ // needed write concern.
+ waitForMajorityAfterAccessingCatalog = false;
+ } else {
+ // Do this write with majority writeConcern to guarantee that the shard sees the
+ // write when it receives the _flushDatabaseCacheUpdates.
+ uassertStatusOK(catalogClient->insertConfigDocument(
+ opCtx,
+ NamespaceString::kConfigDatabasesNamespace,
+ db.toBSON(),
+ ShardingCatalogClient::kMajorityWriteConcern));
+ }
return std::make_pair(shardPtr, db);
}
}();
- WriteConcernResult unusedResult;
- uassertStatusOK(waitForWriteConcern(opCtx,
- replClient.getLastOp(),
- ShardingCatalogClient::kMajorityWriteConcern,
- &unusedResult));
+ if (waitForMajorityAfterAccessingCatalog) {
+ WriteConcernResult unusedResult;
+ uassertStatusOK(waitForWriteConcern(opCtx,
+ replClient.getLastOp(),
+ ShardingCatalogClient::kMajorityWriteConcern,
+ &unusedResult));
+ }
// Note, making the primary shard refresh its databaseVersion here is not required for
// correctness, since either: