summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2018-04-13 17:40:04 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2018-04-16 13:59:13 -0400
commitc02574298a711b6de8a3d89cedcfe98040a6f55b (patch)
tree678c5a9035c1511f0333e5d01f6047592478c2ea
parent721b194f8c194db500a687e62526c8e1f8a65a10 (diff)
downloadmongo-c02574298a711b6de8a3d89cedcfe98040a6f55b.tar.gz
SERVER-33356 Ensure shards' persisted collection cache picks up collection UUIDs after setFCV=4.0
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/sharding/shard_collection_cache_upgrade_downgrade.js118
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp20
3 files changed, 134 insertions, 5 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 0b2804c05cd..84821b463f6 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -11,6 +11,7 @@ selector:
- jstests/sharding/max_time_ms_sharded_new_commands.js
# Calls setFCV=4.0, which cannot be run on last-stable shards.
- jstests/sharding/database_versioning_upgrade_downgrade.js
+ - jstests/sharding/shard_collection_cache_upgrade_downgrade.js
#### Enable when 4.0 becomes last-stable.
- jstests/sharding/change_streams_unsharded_becomes_sharded.js
- jstests/sharding/create_database.js
diff --git a/jstests/sharding/shard_collection_cache_upgrade_downgrade.js b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
new file mode 100644
index 00000000000..0a699535257
--- /dev/null
+++ b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
@@ -0,0 +1,118 @@
+/**
+ * Tests that 'config.cache.collections' on shards will get updated with UUIDs the first time
+ * the shard receives a versioned request for the collection after setFCV=4.0.
+ */
+(function() {
+ "use strict";
+
+ load("jstests/libs/feature_compatibility_version.js");
+
+ function checkCachedCollectionEntry(conn, ns, authoritativeEntry) {
+ const res = conn.getDB("config").runCommand({find: "cache.collections", filter: {_id: ns}});
+ assert.commandWorked(res);
+ const cacheEntry = res.cursor.firstBatch[0];
+ if (authoritativeEntry === undefined) {
+ assert.eq(undefined, cacheEntry);
+ } else {
+ assert.eq(cacheEntry.uuid,
+ authoritativeEntry.uuid,
+ conn + " did not have expected on-disk cached collection UUID for " + ns);
+ }
+ }
+
+ const st = new ShardingTest({shards: 2});
+ assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+ const db1Name = "db1";
+ const db2Name = "db2";
+ const collName = "foo";
+ const ns1 = db1Name + "." + collName;
+ const ns2 = db2Name + "." + collName;
+
+ // Create both collections in the sharding catalog and ensure they are on different shards.
+ assert.commandWorked(st.s.adminCommand({enableSharding: db1Name}));
+ assert.commandWorked(st.s.adminCommand({enableSharding: db2Name}));
+ st.ensurePrimaryShard(db1Name, st.shard0.shardName);
+ st.ensurePrimaryShard(db2Name, st.shard1.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns1, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns2, key: {_id: 1}}));
+
+ // Ensure the collection entries have UUIDs.
+ const ns1EntryOriginal = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
+ const ns2EntryOriginal = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ assert.neq(null, ns1EntryOriginal.uuid);
+ assert.neq(null, ns2EntryOriginal.uuid);
+
+ // Force each shard to refresh for the collection it owns to ensure it writes a cache entry.
+
+ assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
+ assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+
+ checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns2, undefined);
+ checkCachedCollectionEntry(st.shard1, ns1, undefined);
+ checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+
+ // Simulate that the cache entry was written without a UUID (SERVER-33356).
+ assert.writeOK(st.shard0.getDB("config")
+ .getCollection("cache.collections")
+ .update({}, {$unset: {uuid: ""}}, {multi: true}));
+ assert.writeOK(st.shard1.getDB("config")
+ .getCollection("cache.collections")
+ .update({}, {$unset: {uuid: ""}}, {multi: true}));
+
+ //
+ // setFCV 4.0 (upgrade)
+ //
+
+ assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+
+ // The UUID in the authoritative collection entries should not have changed.
+ const ns1EntryFCV40 = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
+ const ns2EntryFCV40 = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ assert.docEq(ns1EntryFCV40, ns1EntryOriginal);
+ assert.docEq(ns2EntryFCV40, ns2EntryOriginal);
+
+ // TODO (SERVER-33783): Add 'syncFromConfig: false' once the cached collection state is cleared
+ // on shards on setFCV.
+ assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
+ assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+
+ // The shards' collection caches should have been updated with UUIDs.
+ checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns2, undefined);
+ checkCachedCollectionEntry(st.shard1, ns1, undefined);
+ checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+
+ //
+ // setFCV 3.6 (downgrade)
+ //
+
+ assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+ // The UUID in the authoritative collection entries should still not have changed.
+ const ns1EntryFCV36 = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
+ const ns2EntryFCV36 = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ assert.docEq(ns1EntryFCV36, ns1EntryOriginal);
+ assert.docEq(ns2EntryFCV36, ns2EntryOriginal);
+
+ // TODO (SERVER-33783): Add 'syncFromConfig: false' once the cached collection state is cleared
+ // on shards on setFCV.
+ assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
+ assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+
+ // Also refresh the sessions collection so that the UUID consistency check at the end of
+ // ShardingTest, which will check for its UUID on the shards, passes.
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: "config.system.sessions"}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: "config.system.sessions"}));
+
+ // The shards' collection caches should not have changed.
+ checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns2, undefined);
+ checkCachedCollectionEntry(st.shard1, ns1, undefined);
+ checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+
+ st.stop();
+})();
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 2d62ac9f343..c377eb1ef53 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -173,10 +173,15 @@ public:
// Upgrade shards before config finishes its upgrade.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
auto allDbs = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllDBs(
- opCtx, repl::ReadConcernLevel::kLocalReadConcern));
+ opCtx, repl::ReadConcernLevel::kLocalReadConcern))
+ .value;
+
+ // The 'config' dataabase contains the sharded 'config.system.sessions' collection,
+ // but does not have an entry in config.databases.
+ allDbs.emplace_back("config", ShardId("config"), true);
auto clusterTime = LogicalClock::get(opCtx)->getClusterTime().asTimestamp();
- for (const auto& db : allDbs.value) {
+ for (const auto& db : allDbs) {
const auto dbVersion = databaseVersion::makeNew();
uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument(
@@ -258,10 +263,15 @@ public:
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
- const auto allDbs = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllDBs(
- opCtx, repl::ReadConcernLevel::kLocalReadConcern));
+ auto allDbs = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllDBs(
+ opCtx, repl::ReadConcernLevel::kLocalReadConcern))
+ .value;
+
+ // The 'config' dataabase contains the sharded 'config.system.sessions' collection,
+ // but does not have an entry in config.databases.
+ allDbs.emplace_back("config", ShardId("config"), true);
- for (const auto& db : allDbs.value) {
+ for (const auto& db : allDbs) {
uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
DatabaseType::ConfigNS,