summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Neupauer <martin.neupauer@mongodb.com>2018-04-16 13:22:38 -0400
committerMartin Neupauer <martin.neupauer@mongodb.com>2018-04-16 18:10:40 -0400
commita58a24439b972bb8af00caf0cf6c0a8696a5899c (patch)
treed1d127c2f7237ad68d51afcdb83293b2e2eb89a2
parent05fd295d14a37bb092e61b89cf7495af8ff7a471 (diff)
downloadmongo-a58a24439b972bb8af00caf0cf6c0a8696a5899c.tar.gz
SERVER-33783 Make shards and mongos do full routing/filtering metadata refresh after FCV change
-rw-r--r--jstests/sharding/shard_collection_cache_upgrade_downgrade.js78
-rw-r--r--src/mongo/db/commands/feature_compatibility_version.cpp18
2 files changed, 84 insertions, 12 deletions
diff --git a/jstests/sharding/shard_collection_cache_upgrade_downgrade.js b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
index 0a699535257..d810f7bab1d 100644
--- a/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
+++ b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
@@ -20,6 +20,22 @@
}
}
+ function checkCachedChunksEntry(conn, ns, authoritativeEntry) {
+ const res = conn.getDB("config").runCommand({find: "cache.chunks." + ns});
+ assert.commandWorked(res);
+ const cacheEntry = res.cursor.firstBatch[0];
+ if (authoritativeEntry === undefined) {
+ assert.eq(undefined, cacheEntry);
+ } else {
+ if (authoritativeEntry.hasOwnProperty("history")) {
+ assert(cacheEntry.hasOwnProperty("history"),
+ conn + "did not have expected on-disk cached history for " + ns);
+ } else {
+ assert(!cacheEntry.hasOwnProperty("history"),
+ conn + "did have unexpected on-disk cached history for " + ns);
+ }
+ }
+ }
const st = new ShardingTest({shards: 2});
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
@@ -43,6 +59,13 @@
assert.neq(null, ns1EntryOriginal.uuid);
assert.neq(null, ns2EntryOriginal.uuid);
+ const ns1ChunkEntryOriginal = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
+ const ns2ChunkEntryOriginal = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ assert.neq(null, ns1ChunkEntryOriginal);
+ assert(!ns1ChunkEntryOriginal.hasOwnProperty("history"));
+ assert.neq(null, ns2ChunkEntryOriginal);
+ assert(!ns2ChunkEntryOriginal.hasOwnProperty("history"));
+
// Force each shard to refresh for the collection it owns to ensure it writes a cache entry.
assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
@@ -53,6 +76,11 @@
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryOriginal);
+ checkCachedChunksEntry(st.shard0, ns2, undefined);
+ checkCachedChunksEntry(st.shard1, ns1, undefined);
+ checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryOriginal);
+
// Simulate that the cache entry was written without a UUID (SERVER-33356).
assert.writeOK(st.shard0.getDB("config")
.getCollection("cache.collections")
@@ -73,10 +101,22 @@
assert.docEq(ns1EntryFCV40, ns1EntryOriginal);
assert.docEq(ns2EntryFCV40, ns2EntryOriginal);
- // TODO (SERVER-33783): Add 'syncFromConfig: false' once the cached collection state is cleared
- // on shards on setFCV.
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+ const ns1ChunkEntryFCV40 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
+ const ns2ChunkEntryFCV40 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ assert.neq(null, ns1ChunkEntryFCV40);
+ assert(ns1ChunkEntryFCV40.hasOwnProperty("history"));
+ assert.neq(null, ns2ChunkEntryFCV40);
+ assert(ns2ChunkEntryFCV40.hasOwnProperty("history"));
+
+ st.s.getDB(db1Name).getCollection(collName).findOne();
+ st.s.getDB(db2Name).getCollection(collName).findOne();
+
+ // We wait for the refresh triggered by the finds to persist the new cache entry to disk,
+ // because it's done asynchronously.
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2, syncFromConfig: false}));
// The shards' collection caches should have been updated with UUIDs.
checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
@@ -84,6 +124,12 @@
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ // The shards' chunk caches should have been updated with histories.
+ checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryFCV40);
+ checkCachedChunksEntry(st.shard0, ns2, undefined);
+ checkCachedChunksEntry(st.shard1, ns1, undefined);
+ checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryFCV40);
+
//
// setFCV 3.6 (downgrade)
//
@@ -96,10 +142,20 @@
assert.docEq(ns1EntryFCV36, ns1EntryOriginal);
assert.docEq(ns2EntryFCV36, ns2EntryOriginal);
- // TODO (SERVER-33783): Add 'syncFromConfig: false' once the cached collection state is cleared
- // on shards on setFCV.
- assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
- assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+ const ns1ChunkEntryFCV36 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
+ const ns2ChunkEntryFCV36 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ assert.neq(null, ns1ChunkEntryFCV36);
+ assert(!ns1ChunkEntryFCV36.hasOwnProperty("history"));
+ assert.neq(null, ns2ChunkEntryFCV36);
+ assert(!ns2ChunkEntryFCV36.hasOwnProperty("history"));
+
+ st.s.getDB(db1Name).getCollection(collName).findOne();
+ st.s.getDB(db2Name).getCollection(collName).findOne();
+
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2, syncFromConfig: false}));
// Also refresh the sessions collection so that the UUID consistency check at the end of
// ShardingTest, which will check for its UUID on the shards, passes.
@@ -114,5 +170,11 @@
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ // The shards' chunk caches should have been updated with histories removed.
+ checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryFCV36);
+ checkCachedChunksEntry(st.shard0, ns2, undefined);
+ checkCachedChunksEntry(st.shard1, ns1, undefined);
+ checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryFCV36);
+
st.stop();
})();
diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp
index 932f8d29553..20935c0a4f2 100644
--- a/src/mongo/db/commands/feature_compatibility_version.cpp
+++ b/src/mongo/db/commands/feature_compatibility_version.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/s/database_sharding_state.h"
+#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
@@ -50,6 +51,8 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/executor/egress_tag_closer_manager.h"
#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/s/catalog_cache.h"
+#include "mongo/s/grid.h"
#include "mongo/transport/service_entry_point.h"
#include "mongo/util/log.h"
@@ -160,10 +163,11 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons
serverGlobalParams.featureCompatibility.setVersion(newVersion);
updateMinWireVersion();
- if (newVersion == ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo36) {
- // Clear the in-memory cached database versions on downgrade, so that we do not check
- // databaseVersion in FCV 3.6 (it would be meaningless, since databases do not have
- // versions in FCV 3.6).
+ if (ShardingState::get(opCtx)->enabled() &&
+ (newVersion ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo36 ||
+ newVersion == ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo40)) {
+ // Clear the in-memory cached database versions and collections metadata.
// TODO: Once SERVER-34431 goes in, just clear the DatabaseShardingStateMap.
std::vector<std::string> dbNames;
getGlobalServiceContext()->getGlobalStorageEngine()->listDatabases(&dbNames);
@@ -176,8 +180,14 @@ void FeatureCompatibilityVersion::onInsertOrUpdate(OperationContext* opCtx, cons
AutoGetDb autoDb(opCtx, dbName, MODE_X);
if (autoDb.getDb()) {
DatabaseShardingState::get(autoDb.getDb()).setDbVersion(opCtx, boost::none);
+ for (const auto& collection : *autoDb.getDb()) {
+ CollectionShardingState::get(opCtx, collection->ns())
+ ->refreshMetadata(opCtx, nullptr);
+ }
}
}
+
+ Grid::get(opCtx)->catalogCache()->purgeAllDatabases();
}
if (newVersion != ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo36) {