summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjannaerin <golden.janna@gmail.com>2019-07-03 13:24:58 -0400
committerjannaerin <golden.janna@gmail.com>2019-07-08 17:09:04 -0400
commitc6468f7d3ac783b4bcdeb6744fc976c8ecee2969 (patch)
tree1f425d3bc85d26cd24ff1fdf0d8c9ad81aca8d66
parent13434801be12a72d370147aed4c2ac194e658062 (diff)
downloadmongo-c6468f7d3ac783b4bcdeb6744fc976c8ecee2969.tar.gz
SERVER-34431 MoveDatabaseShardingState into its own map
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/sharding/database_and_shard_versioning_all_commands.js64
-rw-r--r--jstests/sharding/database_versioning_safe_secondary_reads.js119
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp31
-rw-r--r--src/mongo/db/catalog_raii.cpp14
-rw-r--r--src/mongo/db/commands/create_indexes.cpp28
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp9
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp26
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp79
-rw-r--r--src/mongo/db/s/database_sharding_state.h17
-rw-r--r--src/mongo/db/s/flush_database_cache_updates_command.cpp13
-rw-r--r--src/mongo/db/s/get_database_version_command.cpp11
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp36
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp44
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp16
15 files changed, 276 insertions, 232 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 5c09197d829..72ebad3a59e 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -141,6 +141,7 @@ selector:
- jstests/sharding/explain_exec_stats_on_shards.js
- jstests/sharding/refine_collection_shard_key_basic.js
- jstests/sharding/move_primary_clone_test.js
+ - jstests/sharding/database_versioning_safe_secondary_reads.js
# Enable if SERVER-41813 is backported or 4.4 becomes last-stable
- jstests/sharding/invalid_system_views_sharded_collection.js
diff --git a/jstests/sharding/database_and_shard_versioning_all_commands.js b/jstests/sharding/database_and_shard_versioning_all_commands.js
index a1627387aa1..6927a756252 100644
--- a/jstests/sharding/database_and_shard_versioning_all_commands.js
+++ b/jstests/sharding/database_and_shard_versioning_all_commands.js
@@ -449,7 +449,7 @@
testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
});
- class AllCommandsTestRunner {
+ class TestRunner {
constructor() {
this.st = new ShardingTest(this.getShardingTestOptions());
let db = this.st.s.getDB(dbName);
@@ -474,10 +474,26 @@
}
getShardingTestOptions() {
- throw new Error("not implemented");
+ return {shards: 2};
}
makeShardDatabaseCacheStale() {
- throw new Error("not implemented");
+ let fromShard = this.st.getPrimaryShard(dbName);
+ let toShard = this.st.getOther(fromShard);
+
+ this.primaryShard = toShard;
+ this.previousDbVersion = this.dbVersion;
+
+ assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
+ this.dbVersion =
+ this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
+
+ // The dbVersion should have changed due to the movePrimary operation.
+ assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
+
+ // The fromShard should have cleared its in-memory database info.
+ const res = fromShard.adminCommand({getDatabaseVersion: dbName});
+ assert.commandWorked(res);
+ assert.eq({}, res.dbVersion);
}
assertSentDatabaseVersion(testCase, commandProfile) {
@@ -586,43 +602,7 @@
}
}
- class DropDatabaseTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 1};
- }
-
- makeShardDatabaseCacheStale() {
- // Drop the database from the shard to clear the shard's cached in-memory database info.
- assert.commandWorked(this.primaryShard.getDB(dbName).runCommand({dropDatabase: 1}));
- }
- }
-
- class MovePrimaryTestRunner extends AllCommandsTestRunner {
- getShardingTestOptions() {
- return {shards: 2};
- }
-
- makeShardDatabaseCacheStale() {
- let fromShard = this.st.getPrimaryShard(dbName);
- let toShard = this.st.getOther(fromShard);
-
- this.primaryShard = toShard;
- this.previousDbVersion = this.dbVersion;
-
- assert.commandWorked(this.st.s0.adminCommand({movePrimary: dbName, to: toShard.name}));
- this.dbVersion =
- this.st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
-
- // The dbVersion should have changed due to the movePrimary operation.
- assert.eq(this.dbVersion.lastMod, this.previousDbVersion.lastMod + 1);
- }
- }
-
- let dropDatabaseTestRunner = new DropDatabaseTestRunner();
- dropDatabaseTestRunner.runCommands();
- dropDatabaseTestRunner.shutdown();
-
- let movePrimaryTestRunner = new MovePrimaryTestRunner();
- movePrimaryTestRunner.runCommands();
- movePrimaryTestRunner.shutdown();
+ let testRunner = new TestRunner();
+ testRunner.runCommands();
+ testRunner.shutdown();
})();
diff --git a/jstests/sharding/database_versioning_safe_secondary_reads.js b/jstests/sharding/database_versioning_safe_secondary_reads.js
index 301f246f4e7..f97896b3f45 100644
--- a/jstests/sharding/database_versioning_safe_secondary_reads.js
+++ b/jstests/sharding/database_versioning_safe_secondary_reads.js
@@ -13,8 +13,8 @@
const st = new ShardingTest({
mongos: 2,
- rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
- rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}]},
+ rs0: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0}}]},
+ rs1: {nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0}}]},
verbose: 2
});
@@ -30,42 +30,55 @@
checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
// Use 'enableSharding' to create the database only in the sharding catalog (the database will
- // not exist on any shards).
+ // not exist on any shards). Manually update the 'primary' field in the new dbEntry in
+ // config.databases so that we know which shard is the primary.
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ assert.commandWorked(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
+ $set: {primary: st.rs1.name}
+ }));
// Check that a command that attaches databaseVersion returns empty results, even though the
- // database does not actually exist on any shard (because the version won't be checked).
+ // database does not actually exist on any shard.
assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
+ // Ensure the current primary shard's primary has written the new database entry to disk.
+ st.rs1.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+ // Ensure the database entry on the current primary shard has replicated to the secondary.
+ st.rs1.awaitReplication();
+
+ const dbEntry0 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
+
+ // The primary shard's primary should have refreshed its in-memory and on-disk caches. The
+ // primary shard's secondary should have refreshed its on-disk entry.
checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+ checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0.version);
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0);
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0);
assert.commandWorked(st.s.getDB(dbName).runCommand(
{listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was updated when the primary refreshed, above).
+ // The primary shard's secondary should have refreshed its in-memory cache (its on-disk cache
+ // was updated when the primary refreshed, above).
checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
+ checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0.version);
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
+ checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0.version);
checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0);
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0);
- // Use 'movePrimary' to ensure shard0 is the primary shard. This will create the database on the
- // shards only if shard0 was not already the primary shard.
+ // Use 'movePrimary' to ensure shard0 is the primary shard.
st.ensurePrimaryShard(dbName, st.shard0.shardName);
const dbEntry1 = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
+ assert.eq(dbEntry1.version.uuid, dbEntry0.version.uuid);
+ assert.eq(dbEntry1.version.lastMod, dbEntry0.version.lastMod + 1);
// Ensure the database actually gets created on the primary shard by creating a collection in
// it.
@@ -82,15 +95,16 @@
// Ensure the database entry on the current primary shard has replicated to the secondary.
st.rs0.awaitReplication();
- // The primary shard's primary should have refreshed.
+ // The primary shard's primary should have refreshed its in-memory cache and on-disk entry. The
+ // primary shard's secondary will have refreshed its on-disk entry.
checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0);
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0);
// Now run a command that attaches databaseVersion with readPref=secondary to make the current
// primary shard's secondary refresh its in-memory database version from its on-disk entry.
@@ -98,15 +112,15 @@
assert.commandWorked(st.s.getDB(dbName).runCommand(
{listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
- // The primary shard's secondary should have refreshed.
+ // The primary shard's secondary should have refreshed its in memory-cache.
checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1.version);
checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1.version);
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0);
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0);
// Make "staleMongos" load the stale database info into memory.
const freshMongos = st.s0;
@@ -135,9 +149,9 @@
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry0);
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry1);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry0);
// Run listCollections with readPref=secondary from the stale mongos. First, this should cause
// the old primary shard's secondary to provoke the old primary shard's primary to refresh. Then
@@ -157,8 +171,7 @@
checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
- // Ensure that dropping the database drops it from all shards, which clears their in-memory
- // caches but not their on-disk caches.
+ // Drop the database
jsTest.log("About to drop database from the cluster");
assert.commandWorked(freshMongos.getDB(dbName).runCommand({dropDatabase: 1}));
@@ -166,19 +179,16 @@
st.rs0.awaitReplication();
st.rs1.awaitReplication();
- // Once SERVER-34431 goes in, this should not have caused the in-memory versions to be cleared.
+ // The in-memory caches will be cleared on both shard's primary nodes and the on-disk database
+ // entries will be cleared on all shards.
checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
-
- // Confirm that we have a bug (SERVER-34431), where if a database is dropped and recreated on a
- // different shard, a stale mongos that has cached the old database's primary shard will *not*
- // be routed to the new database's primary shard (and will see an empty database).
+ checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
// Use 'enableSharding' to create the database only in the sharding catalog (the database will
// not exist on any shards).
@@ -186,37 +196,42 @@
// Simulate that the database was created on 'shard0' by directly modifying the database entry
// (we cannot use movePrimary, since movePrimary creates the database on the shards).
- const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName}).version;
assert.writeOK(st.s.getDB("config").getCollection("databases").update({_id: dbName}, {
$set: {primary: st.shard0.shardName}
}));
+ const dbEntry = st.s.getDB("config").getCollection("databases").findOne({_id: dbName});
assert.commandWorked(st.s.getDB(dbName).runCommand({listCollections: 1}));
- // Once SERVER-34431 goes in, this should have caused the primary shard's primary to refresh its
- // in-memory and on-disk caches.
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+ // Ensure the current primary shard's primary has written the new database entry to disk.
+ st.rs0.getPrimary().adminCommand({_flushDatabaseCacheUpdates: dbName, syncFromConfig: false});
+
+ // Ensure the database entry on the current primary shard has replicated to the secondary.
+ st.rs0.awaitReplication();
+
+ // The primary shard's primary should have refreshed its in-memory and on-disk caches.
+ checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry.version);
checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+ checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
assert.commandWorked(st.s.getDB(dbName).runCommand(
{listCollections: 1, $readPreference: {mode: "secondary"}, readConcern: {level: "local"}}));
- // Once SERVER-34431 goes in, this should have caused the primary shard's secondary to refresh
- // its in-memory cache (its on-disk cache was already updated when the primary refreshed).
- checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, {});
+ // The primary shard's secondary should have refreshed its in-memory cache (its on-disk cache
+ // was already updated when the primary refreshed).
+ checkInMemoryDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry.version);
checkInMemoryDatabaseVersion(st.rs1.getPrimary(), dbName, {});
- checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, {});
+ checkInMemoryDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry.version);
checkInMemoryDatabaseVersion(st.rs1.getSecondary(), dbName, {});
- checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry2);
- checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, dbEntry2);
+ checkOnDiskDatabaseVersion(st.rs0.getPrimary(), dbName, dbEntry);
+ checkOnDiskDatabaseVersion(st.rs1.getPrimary(), dbName, undefined);
+ checkOnDiskDatabaseVersion(st.rs0.getSecondary(), dbName, dbEntry);
+ checkOnDiskDatabaseVersion(st.rs1.getSecondary(), dbName, undefined);
st.stop();
})();
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index bb3d6f97f32..2492fbac0d4 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -108,12 +108,6 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx,
if (!db)
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
- {
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
- }
-
Collection* const sourceColl = db->getCollection(opCtx, source);
if (!sourceColl) {
if (ViewCatalog::get(db)->lookup(opCtx, source.ns()))
@@ -286,6 +280,13 @@ Status renameCollectionWithinDB(OperationContext* opCtx,
DisableDocumentValidation validationDisabler(opCtx);
Lock::DBLock dbWriteLock(opCtx, source.db(), MODE_IX);
+
+ {
+ auto dss = DatabaseShardingState::get(opCtx, source.db());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
+ }
+
boost::optional<Lock::CollectionLock> sourceLock;
boost::optional<Lock::CollectionLock> targetLock;
// To prevent deadlock, always lock system.views collection in the end because concurrent
@@ -334,6 +335,12 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx,
Lock::DBLock dbWriteLock(opCtx, source.db(), MODE_X);
+ {
+ auto dss = DatabaseShardingState::get(opCtx, source.db());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
+ }
+
auto status = checkSourceAndTargetNamespaces(opCtx, source, target, options);
if (!status.isOK())
return status;
@@ -420,18 +427,18 @@ Status renameBetweenDBs(OperationContext* opCtx,
if (!opCtx->lockState()->isW())
globalWriteLock.emplace(opCtx);
+ {
+ auto dss = DatabaseShardingState::get(opCtx, source.db());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
+ }
+
DisableDocumentValidation validationDisabler(opCtx);
auto sourceDB = DatabaseHolder::get(opCtx)->getDb(opCtx, source.db());
if (!sourceDB)
return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
- {
- auto& dss = DatabaseShardingState::get(sourceDB);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
- }
-
boost::optional<AutoStatsTracker> statsTracker(boost::in_place_init,
opCtx,
source,
diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp
index b3f109c90e5..3fe1fee4bc2 100644
--- a/src/mongo/db/catalog_raii.cpp
+++ b/src/mongo/db/catalog_raii.cpp
@@ -49,11 +49,9 @@ AutoGetDb::AutoGetDb(OperationContext* opCtx, StringData dbName, LockMode mode,
auto databaseHolder = DatabaseHolder::get(opCtx);
return databaseHolder->getDb(opCtx, dbName);
}()) {
- if (_db) {
- auto& dss = DatabaseShardingState::get(_db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
- }
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
}
AutoGetCollection::AutoGetCollection(OperationContext* opCtx,
@@ -183,9 +181,9 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* opCtx,
_db = databaseHolder->openDb(opCtx, dbName, &_justCreated);
}
- auto& dss = DatabaseShardingState::get(_db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
}
ConcealCollectionCatalogChangesBlock::ConcealCollectionCatalogChangesBlock(OperationContext* opCtx)
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 227c4d843a1..ea679e2bca7 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -339,6 +339,15 @@ bool indexesAlreadyExist(OperationContext* opCtx,
}
/**
+ * Checks database sharding state. Throws exception on error.
+ */
+void checkDatabaseShardingState(OperationContext* opCtx, StringData dbName) {
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
+}
+
+/**
* Opens or creates database for index creation.
* On database creation, the lock will be made exclusive.
*/
@@ -353,16 +362,9 @@ Database* getOrCreateDatabase(OperationContext* opCtx, StringData dbName, Lock::
// replication state from changing. Abandon the current snapshot to see changed metadata.
opCtx->recoveryUnit()->abandonSnapshot();
dbLock->relockWithMode(MODE_X);
- return databaseHolder->openDb(opCtx, dbName);
-}
-/**
- * Checks database sharding state. Throws exception on error.
- */
-void checkDatabaseShardingState(OperationContext* opCtx, Database* db) {
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
+ checkDatabaseShardingState(opCtx, dbName);
+ return databaseHolder->openDb(opCtx, dbName);
}
/**
@@ -428,6 +430,7 @@ bool runCreateIndexes(OperationContext* opCtx,
// Do not use AutoGetOrCreateDb because we may relock the database in mode X.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IX);
+ checkDatabaseShardingState(opCtx, ns.db());
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns());
@@ -439,8 +442,6 @@ bool runCreateIndexes(OperationContext* opCtx,
auto db = getOrCreateDatabase(opCtx, ns.db(), &dbLock);
- checkDatabaseShardingState(opCtx, db);
-
opCtx->recoveryUnit()->abandonSnapshot();
boost::optional<Lock::CollectionLock> exclusiveCollectionLock(
boost::in_place_init, opCtx, ns, MODE_X);
@@ -566,8 +567,6 @@ bool runCreateIndexes(OperationContext* opCtx,
db = databaseHolder->getDb(opCtx, ns.db());
invariant(db->getCollection(opCtx, ns));
- checkDatabaseShardingState(opCtx, db);
-
// Perform the third and final drain while holding the exclusive collection lock.
uassertStatusOK(indexer.drainBackgroundWrites(opCtx));
@@ -624,6 +623,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
{
// Do not use AutoGetOrCreateDb because we may relock the database in mode X.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IX);
+ checkDatabaseShardingState(opCtx, ns.db());
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns());
@@ -635,8 +635,6 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
auto db = getOrCreateDatabase(opCtx, ns.db(), &dbLock);
- checkDatabaseShardingState(opCtx, db);
-
opCtx->recoveryUnit()->abandonSnapshot();
Lock::CollectionLock collLock(opCtx, ns, MODE_X);
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 935462681ec..2b424821c7b 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -925,10 +925,11 @@ void IndexBuildsCoordinator::_buildIndex(OperationContext* opCtx,
// We hold the database MODE_IX lock throughout the index build.
auto db = DatabaseHolder::get(opCtx)->getDb(opCtx, nss.db());
- if (db) {
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- dss.checkDbVersion(opCtx, dssLock);
+
+ {
+ auto dss = DatabaseShardingState::get(opCtx, nss.db());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+ dss->checkDbVersion(opCtx, dssLock);
}
invariant(db,
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index daec057bd58..5440eec9f83 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/s/catalog/dist_lock_manager.h"
@@ -156,15 +157,13 @@ public:
_dropDatabaseFromShard(opCtx, dbType.getPrimary(), dbname);
// Drop the database from each of the remaining shards.
- {
- std::vector<ShardId> allShardIds;
- Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload(&allShardIds);
-
- for (const ShardId& shardId : allShardIds) {
- _dropDatabaseFromShard(opCtx, shardId, dbname);
- }
+ std::vector<ShardId> allShardIds;
+ Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload(&allShardIds);
+ for (const ShardId& shardId : allShardIds) {
+ _dropDatabaseFromShard(opCtx, shardId, dbname);
}
+
// Remove the database entry from the metadata.
const Status status =
catalogClient->removeConfigDocuments(opCtx,
@@ -174,6 +173,19 @@ public:
uassertStatusOKWithContext(
status, str::stream() << "Could not remove database '" << dbname << "' from metadata");
+ // Send _flushDatabaseCacheUpdates to all shards
+ for (const ShardId& shardId : allShardIds) {
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
+ auto cmdResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ BSON("_flushDatabaseCacheUpdates" << dbname),
+ Shard::RetryPolicy::kIdempotent));
+ // TODO SERVER-42112: uassert on the cmdResponse.
+ }
+
ShardingLogging::get(opCtx)->logChange(
opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index 5871cb3f689..f3557c5791e 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -41,34 +41,77 @@
#include "mongo/util/log.h"
namespace mongo {
+namespace {
-const Database::Decoration<DatabaseShardingState> DatabaseShardingState::get =
- Database::declareDecoration<DatabaseShardingState>();
+class DatabaseShardingStateMap {
+ DatabaseShardingStateMap& operator=(const DatabaseShardingStateMap&) = delete;
+ DatabaseShardingStateMap(const DatabaseShardingStateMap&) = delete;
-DatabaseShardingState::DatabaseShardingState() = default;
+public:
+ static const ServiceContext::Decoration<DatabaseShardingStateMap> get;
+
+ DatabaseShardingStateMap() {}
+
+ DatabaseShardingState& getOrCreate(const StringData dbName) {
+ stdx::lock_guard<stdx::mutex> lg(_mutex);
+
+ auto it = _databases.find(dbName);
+ if (it == _databases.end()) {
+ auto inserted =
+ _databases.try_emplace(dbName, std::make_unique<DatabaseShardingState>(dbName));
+ invariant(inserted.second);
+ it = std::move(inserted.first);
+ }
+
+ return *it->second;
+ }
+
+private:
+ using DatabasesMap = StringMap<std::shared_ptr<DatabaseShardingState>>;
+
+ stdx::mutex _mutex;
+ DatabasesMap _databases;
+};
+
+const ServiceContext::Decoration<DatabaseShardingStateMap> DatabaseShardingStateMap::get =
+ ServiceContext::declareDecoration<DatabaseShardingStateMap>();
+
+} // namespace
+
+DatabaseShardingState::DatabaseShardingState(const StringData dbName)
+ : _dbName(dbName.toString()) {}
+
+DatabaseShardingState* DatabaseShardingState::get(OperationContext* opCtx,
+ const StringData dbName) {
+ // db lock must be held to have a reference to the database sharding state
+ dassert(opCtx->lockState()->isDbLockedForMode(dbName, MODE_IS));
+
+ auto& databasesMap = DatabaseShardingStateMap::get(opCtx->getServiceContext());
+ return &databasesMap.getOrCreate(dbName);
+}
void DatabaseShardingState::enterCriticalSectionCatchUpPhase(OperationContext* opCtx, DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X));
_critSec.enterCriticalSectionCatchUpPhase();
}
void DatabaseShardingState::enterCriticalSectionCommitPhase(OperationContext* opCtx, DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X));
_critSec.enterCriticalSectionCommitPhase();
}
void DatabaseShardingState::exitCriticalSection(OperationContext* opCtx,
boost::optional<DatabaseVersion> newDbVersion,
DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_IX));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_IX));
_critSec.exitCriticalSection();
_dbVersion = newDbVersion;
}
boost::optional<DatabaseVersion> DatabaseShardingState::getDbVersion(OperationContext* opCtx,
DSSLock&) const {
- if (!opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_X)) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_IS));
+ if (!opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X)) {
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_IS));
}
return _dbVersion;
}
@@ -76,20 +119,18 @@ boost::optional<DatabaseVersion> DatabaseShardingState::getDbVersion(OperationCo
void DatabaseShardingState::setDbVersion(OperationContext* opCtx,
boost::optional<DatabaseVersion> newDbVersion,
DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_X));
- log() << "setting this node's cached database version for " << get.owner(this)->name() << " to "
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X));
+ log() << "setting this node's cached database version for " << _dbName << " to "
<< (newDbVersion ? newDbVersion->toBSON() : BSONObj());
_dbVersion = newDbVersion;
}
void DatabaseShardingState::checkDbVersion(OperationContext* opCtx, DSSLock&) const {
invariant(opCtx->lockState()->isLocked());
- const auto dbName = get.owner(this)->name();
- const auto clientDbVersion = OperationShardingState::get(opCtx).getDbVersion(dbName);
- if (!clientDbVersion) {
+ const auto clientDbVersion = OperationShardingState::get(opCtx).getDbVersion(_dbName);
+ if (!clientDbVersion)
return;
- }
auto criticalSectionSignal = _critSec.getSignal(opCtx->lockState()->isWriteLocked()
? ShardingMigrationCriticalSection::kWrite
@@ -98,14 +139,14 @@ void DatabaseShardingState::checkDbVersion(OperationContext* opCtx, DSSLock&) co
OperationShardingState::get(opCtx).setMovePrimaryCriticalSectionSignal(
criticalSectionSignal);
- uasserted(StaleDbRoutingVersion(dbName, *clientDbVersion, boost::none),
+ uasserted(StaleDbRoutingVersion(_dbName, *clientDbVersion, boost::none),
"movePrimary critical section active");
}
- uassert(StaleDbRoutingVersion(dbName, *clientDbVersion, boost::none),
+ uassert(StaleDbRoutingVersion(_dbName, *clientDbVersion, boost::none),
"don't know dbVersion",
_dbVersion);
- uassert(StaleDbRoutingVersion(dbName, *clientDbVersion, *_dbVersion),
+ uassert(StaleDbRoutingVersion(_dbName, *clientDbVersion, *_dbVersion),
"dbVersion mismatch",
databaseVersion::equal(*clientDbVersion, *_dbVersion));
}
@@ -117,7 +158,7 @@ MovePrimarySourceManager* DatabaseShardingState::getMovePrimarySourceManager(DSS
void DatabaseShardingState::setMovePrimarySourceManager(OperationContext* opCtx,
MovePrimarySourceManager* sourceMgr,
DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X));
invariant(sourceMgr);
invariant(!_sourceMgr);
@@ -125,7 +166,7 @@ void DatabaseShardingState::setMovePrimarySourceManager(OperationContext* opCtx,
}
void DatabaseShardingState::clearMovePrimarySourceManager(OperationContext* opCtx, DSSLock&) {
- invariant(opCtx->lockState()->isDbLockedForMode(get.owner(this)->name(), MODE_IX));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_IX));
_sourceMgr = nullptr;
}
diff --git a/src/mongo/db/s/database_sharding_state.h b/src/mongo/db/s/database_sharding_state.h
index a580e5bf4dc..938431cbf8e 100644
--- a/src/mongo/db/s/database_sharding_state.h
+++ b/src/mongo/db/s/database_sharding_state.h
@@ -53,12 +53,19 @@ public:
*/
using DSSLock = ShardingStateLock<DatabaseShardingState>;
- static const Database::Decoration<DatabaseShardingState> get;
-
- DatabaseShardingState();
+ DatabaseShardingState(const StringData dbName);
~DatabaseShardingState() = default;
/**
+ * Obtains the sharding state for the specified database. If it does not exist, it will be
+ * created and will remain in memory until the database is dropped.
+ *
+ * Must be called with some lock held on the database being looked up and the returned
+ * pointer must not be stored.
+ */
+ static DatabaseShardingState* get(OperationContext* opCtx, const StringData dbName);
+
+ /**
* Methods to control the databases's critical section. Must be called with the database X lock
* held.
*/
@@ -123,9 +130,11 @@ private:
// within.
Lock::ResourceMutex _stateChangeMutex{"DatabaseShardingState"};
+ const std::string _dbName;
+
// Modifying the state below requires holding the DBLock in X mode; holding the DBLock in any
// mode is acceptable for reading it. (Note: accessing this class at all requires holding the
- // DBLock in some mode, since it requires having a pointer to the Database).
+ // DBLock in some mode).
ShardingMigrationCriticalSection _critSec;
diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp
index f1c6009329d..5e4fadfaae0 100644
--- a/src/mongo/db/s/flush_database_cache_updates_command.cpp
+++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp
@@ -115,23 +115,16 @@ public:
{
AutoGetDb autoDb(opCtx, _dbName(), MODE_IS);
- if (!autoDb.getDb()) {
- uasserted(ErrorCodes::NamespaceNotFound,
- str::stream()
- << "Can't issue _flushDatabaseCacheUpdates on the database "
- << _dbName()
- << " because it does not exist on this shard.");
- }
// If the primary is in the critical section, secondaries must wait for the commit
// to finish on the primary in case a secondary's caller has an afterClusterTime
// inclusive of the commit (and new writes to the committed chunk) that hasn't yet
// propagated back to this shard. This ensures the read your own writes causal
// consistency guarantee.
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
+ const auto dss = DatabaseShardingState::get(opCtx, _dbName());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
- if (auto criticalSectionSignal = dss.getCriticalSectionSignal(
+ if (auto criticalSectionSignal = dss->getCriticalSectionSignal(
ShardingMigrationCriticalSection::kRead, dssLock)) {
oss.setMigrationCriticalSectionSignal(criticalSectionSignal);
}
diff --git a/src/mongo/db/s/get_database_version_command.cpp b/src/mongo/db/s/get_database_version_command.cpp
index 899c8c1e33f..dd5229fdb76 100644
--- a/src/mongo/db/s/get_database_version_command.cpp
+++ b/src/mongo/db/s/get_database_version_command.cpp
@@ -78,13 +78,12 @@ public:
serverGlobalParams.clusterRole == ClusterRole::ShardServer);
BSONObj versionObj;
AutoGetDb autoDb(opCtx, _targetDb(), MODE_IS);
- if (auto db = autoDb.getDb()) {
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
- if (auto dbVersion = dss.getDbVersion(opCtx, dssLock)) {
- versionObj = dbVersion->toBSON();
- }
+ const auto dss = DatabaseShardingState::get(opCtx, _targetDb());
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
+
+ if (auto dbVersion = dss->getDbVersion(opCtx, dssLock)) {
+ versionObj = dbVersion->toBSON();
}
result->getBodyBuilder().append("dbVersion", versionObj);
}
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index 63a1ebb7bd6..099c918af6d 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -89,10 +89,10 @@ Status MovePrimarySourceManager::clone(OperationContext* opCtx) {
// data was inserted into the database.
AutoGetOrCreateDb autoDb(opCtx, getNss().toString(), MODE_X);
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, getNss().toString());
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
- dss.setMovePrimarySourceManager(opCtx, this, dssLock);
+ dss->setMovePrimarySourceManager(opCtx, this, dssLock);
}
_state = kCloning;
@@ -149,11 +149,11 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
<< " was dropped during the movePrimary operation.");
}
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, getNss().toString());
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
// IMPORTANT: After this line, the critical section is in place and needs to be signaled
- dss.enterCriticalSectionCatchUpPhase(opCtx, dssLock);
+ dss->enterCriticalSectionCatchUpPhase(opCtx, dssLock);
}
_state = kCriticalSection;
@@ -201,12 +201,12 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
<< " was dropped during the movePrimary operation.");
}
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, getNss().toString());
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
// Read operations must begin to wait on the critical section just before we send the
// commit operation to the config server
- dss.enterCriticalSectionCommitPhase(opCtx, dssLock);
+ dss->enterCriticalSectionCommitPhase(opCtx, dssLock);
}
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
@@ -262,10 +262,10 @@ Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) {
}
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, getNss())) {
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, getNss().toString());
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
- dss.setDbVersion(opCtx, boost::none, dssLock);
+ dss->setDbVersion(opCtx, boost::none, dssLock);
uassertStatusOK(validateStatus.withContext(
str::stream() << "Unable to verify movePrimary commit for database: "
<< getNss().ns()
@@ -359,15 +359,13 @@ void MovePrimarySourceManager::_cleanup(OperationContext* opCtx) {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetDb autoDb(opCtx, getNss().toString(), MODE_IX);
- if (autoDb.getDb()) {
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, getNss().toString());
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
- dss.clearMovePrimarySourceManager(opCtx, dssLock);
+ dss->clearMovePrimarySourceManager(opCtx, dssLock);
- // Leave the critical section if we're still registered.
- dss.exitCriticalSection(opCtx, boost::none, dssLock);
- }
+ // Leave the critical section if we're still registered.
+ dss->exitCriticalSection(opCtx, boost::none, dssLock);
}
if (_state == kCriticalSection || _state == kCloneCompleted) {
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 155be7189d5..fe3790eaffd 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -228,29 +228,32 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
auto const shardingState = ShardingState::get(opCtx);
invariant(shardingState->canAcceptShardedCommands());
- const auto refreshedDbVersion =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, dbName))
- .databaseVersion();
+ DatabaseVersion refreshedDbVersion;
+ try {
+ refreshedDbVersion =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, dbName))
+ .databaseVersion();
+ } catch (const ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
+ // db has been dropped, set the db version to boost::none
+ Lock::DBLock dbLock(opCtx, dbName, MODE_X);
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
+
+ dss->setDbVersion(opCtx, boost::none, dssLock);
+ return;
+ }
// First, check under a shared lock if another thread already updated the cached version.
// This is a best-effort optimization to make as few threads as possible to convoy on the
// exclusive lock below.
- auto databaseHolder = DatabaseHolder::get(opCtx);
{
// Take the DBLock directly rather than using AutoGetDb, to prevent a recursive call
// into checkDbVersion().
Lock::DBLock dbLock(opCtx, dbName, MODE_IS);
- auto db = databaseHolder->getDb(opCtx, dbName);
- if (!db) {
- log() << "Database " << dbName
- << " has been dropped; not caching the refreshed databaseVersion";
- return;
- }
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, &dss);
-
- const auto cachedDbVersion = dss.getDbVersion(opCtx, dssLock);
+ const auto cachedDbVersion = dss->getDbVersion(opCtx, dssLock);
if (cachedDbVersion && cachedDbVersion->getUuid() == refreshedDbVersion.getUuid() &&
cachedDbVersion->getLastMod() >= refreshedDbVersion.getLastMod()) {
LOG(2) << "Skipping setting cached databaseVersion for " << dbName
@@ -263,17 +266,10 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) {
// The cached version is older than the refreshed version; update the cached version.
Lock::DBLock dbLock(opCtx, dbName, MODE_X);
- auto db = databaseHolder->getDb(opCtx, dbName);
- if (!db) {
- log() << "Database " << dbName
- << " has been dropped; not caching the refreshed databaseVersion";
- return;
- }
-
- auto& dss = DatabaseShardingState::get(db);
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
+ auto dss = DatabaseShardingState::get(opCtx, dbName);
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
- dss.setDbVersion(opCtx, std::move(refreshedDbVersion), dssLock);
+ dss->setDbVersion(opCtx, std::move(refreshedDbVersion), dssLock);
}
} // namespace mongo
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 43653b40a2a..9969d9c9f30 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -320,11 +320,9 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE
if (setField.hasField(ShardDatabaseType::enterCriticalSectionCounter.name())) {
AutoGetDb autoDb(opCtx, db, MODE_X);
- if (autoDb.getDb()) {
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
- dss.setDbVersion(opCtx, boost::none, dssLock);
- }
+ auto dss = DatabaseShardingState::get(opCtx, db);
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
+ dss->setDbVersion(opCtx, boost::none, dssLock);
}
}
}
@@ -368,11 +366,9 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx,
bsonExtractStringField(documentKey, ShardDatabaseType::name.name(), &deletedDatabase));
AutoGetDb autoDb(opCtx, deletedDatabase, MODE_X);
- if (autoDb.getDb()) {
- auto& dss = DatabaseShardingState::get(autoDb.getDb());
- auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, &dss);
- dss.setDbVersion(opCtx, boost::none, dssLock);
- }
+ auto dss = DatabaseShardingState::get(opCtx, deletedDatabase);
+ auto dssLock = DatabaseShardingState::DSSLock::lockExclusive(opCtx, dss);
+ dss->setDbVersion(opCtx, boost::none, dssLock);
}
if (nss == NamespaceString::kServerConfigurationNamespace) {