summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/sharding/shard_collection_cache_upgrade_downgrade.js49
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp28
2 files changed, 59 insertions, 18 deletions
diff --git a/jstests/sharding/shard_collection_cache_upgrade_downgrade.js b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
index 0baa6bd7954..1cc534ce22c 100644
--- a/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
+++ b/jstests/sharding/shard_collection_cache_upgrade_downgrade.js
@@ -41,45 +41,66 @@
const db1Name = "db1";
const db2Name = "db2";
+ const db3Name = "db3";
const collName = "foo";
const ns1 = db1Name + "." + collName;
const ns2 = db2Name + "." + collName;
+ const ns3 = db3Name + "." + collName;
// Create both collections in the sharding catalog and ensure they are on different shards.
assert.commandWorked(st.s.adminCommand({enableSharding: db1Name}));
assert.commandWorked(st.s.adminCommand({enableSharding: db2Name}));
+ assert.commandWorked(st.s.adminCommand({enableSharding: db3Name}));
st.ensurePrimaryShard(db1Name, st.shard0.shardName);
st.ensurePrimaryShard(db2Name, st.shard1.shardName);
+ st.ensurePrimaryShard(db3Name, st.shard0.shardName);
assert.commandWorked(st.s.adminCommand({shardCollection: ns1, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({shardCollection: ns2, key: {_id: 1}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns3, key: {_id: 1}}));
+
+ // Ensure ns3 has chunks in both shards
+ assert.commandWorked(st.s.adminCommand({split: ns3, middle: {_id: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: ns3, find: {_id: 0}, to: st.shard1.shardName}));
// Ensure the collection entries have UUIDs.
const ns1EntryOriginal = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
const ns2EntryOriginal = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ const ns3EntryOriginal = st.s.getDB("config").getCollection("collections").findOne({_id: ns3});
assert.neq(null, ns1EntryOriginal.uuid);
assert.neq(null, ns2EntryOriginal.uuid);
+ assert.neq(null, ns3EntryOriginal.uuid);
const ns1ChunkEntryOriginal = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
const ns2ChunkEntryOriginal = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ const ns3ChunkEntryOriginal = st.s.getDB("config").getCollection("chunks").findOne({ns: ns3});
assert.neq(null, ns1ChunkEntryOriginal);
assert(!ns1ChunkEntryOriginal.hasOwnProperty("history"));
assert.neq(null, ns2ChunkEntryOriginal);
assert(!ns2ChunkEntryOriginal.hasOwnProperty("history"));
+ assert.neq(null, ns3ChunkEntryOriginal);
+ assert(!ns3ChunkEntryOriginal.hasOwnProperty("history"));
// Force each shard to refresh for the collection it owns to ensure it writes a cache entry.
assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1}));
assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2}));
+ assert.commandWorked(st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns3}));
+ assert.commandWorked(st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns3}));
checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
checkCachedCollectionEntry(st.shard0, ns2, undefined);
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns3, ns3EntryOriginal);
+ checkCachedCollectionEntry(st.shard1, ns3, ns3EntryOriginal);
checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryOriginal);
checkCachedChunksEntry(st.shard0, ns2, undefined);
checkCachedChunksEntry(st.shard1, ns1, undefined);
checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryOriginal);
+ checkCachedChunksEntry(st.shard0, ns3, ns3ChunkEntryOriginal);
+ checkCachedChunksEntry(st.shard1, ns3, ns3ChunkEntryOriginal);
// Simulate that the cache entry was written without a UUID (SERVER-33356).
assert.writeOK(st.shard0.getDB("config")
@@ -98,18 +119,24 @@
// The UUID in the authoritative collection entries should not have changed.
const ns1EntryFCV40 = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
const ns2EntryFCV40 = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ const ns3EntryFCV40 = st.s.getDB("config").getCollection("collections").findOne({_id: ns3});
assert.docEq(ns1EntryFCV40, ns1EntryOriginal);
assert.docEq(ns2EntryFCV40, ns2EntryOriginal);
+ assert.docEq(ns3EntryFCV40, ns3EntryOriginal);
const ns1ChunkEntryFCV40 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
const ns2ChunkEntryFCV40 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ const ns3ChunkEntryFCV40 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns3});
assert.neq(null, ns1ChunkEntryFCV40);
assert(ns1ChunkEntryFCV40.hasOwnProperty("history"));
assert.neq(null, ns2ChunkEntryFCV40);
assert(ns2ChunkEntryFCV40.hasOwnProperty("history"));
+ assert.neq(null, ns3ChunkEntryFCV40);
+ assert(ns3ChunkEntryFCV40.hasOwnProperty("history"));
st.s.getDB(db1Name).getCollection(collName).findOne();
st.s.getDB(db2Name).getCollection(collName).findOne();
+ st.s.getDB(db3Name).getCollection(collName).findOne();
// We wait for the refresh triggered by the finds to persist the new cache entry to disk,
// because it's done asynchronously.
@@ -117,18 +144,26 @@
st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1, syncFromConfig: false}));
assert.commandWorked(
st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns3, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns3, syncFromConfig: false}));
// The shards' collection caches should have been updated with UUIDs.
checkCachedCollectionEntry(st.shard0, ns1, ns1EntryOriginal);
checkCachedCollectionEntry(st.shard0, ns2, undefined);
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns3, ns3EntryOriginal);
+ checkCachedCollectionEntry(st.shard1, ns3, ns3EntryOriginal);
// The shards' chunk caches should have been updated with histories.
checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryFCV40);
checkCachedChunksEntry(st.shard0, ns2, undefined);
checkCachedChunksEntry(st.shard1, ns1, undefined);
checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryFCV40);
+ checkCachedChunksEntry(st.shard0, ns3, ns3ChunkEntryFCV40);
+ checkCachedChunksEntry(st.shard1, ns3, ns3ChunkEntryFCV40);
//
// setFCV 3.6 (downgrade)
@@ -139,23 +174,33 @@
// The UUID in the authoritative collection entries should still not have changed.
const ns1EntryFCV36 = st.s.getDB("config").getCollection("collections").findOne({_id: ns1});
const ns2EntryFCV36 = st.s.getDB("config").getCollection("collections").findOne({_id: ns2});
+ const ns3EntryFCV36 = st.s.getDB("config").getCollection("collections").findOne({_id: ns3});
assert.docEq(ns1EntryFCV36, ns1EntryOriginal);
assert.docEq(ns2EntryFCV36, ns2EntryOriginal);
+ assert.docEq(ns3EntryFCV36, ns3EntryOriginal);
const ns1ChunkEntryFCV36 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns1});
const ns2ChunkEntryFCV36 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns2});
+ const ns3ChunkEntryFCV36 = st.s.getDB("config").getCollection("chunks").findOne({ns: ns3});
assert.neq(null, ns1ChunkEntryFCV36);
assert(!ns1ChunkEntryFCV36.hasOwnProperty("history"));
assert.neq(null, ns2ChunkEntryFCV36);
assert(!ns2ChunkEntryFCV36.hasOwnProperty("history"));
+ assert.neq(null, ns3ChunkEntryFCV36);
+ assert(!ns3ChunkEntryFCV36.hasOwnProperty("history"));
st.s.getDB(db1Name).getCollection(collName).findOne();
st.s.getDB(db2Name).getCollection(collName).findOne();
+ st.s.getDB(db3Name).getCollection(collName).findOne();
assert.commandWorked(
st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns1, syncFromConfig: false}));
assert.commandWorked(
st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns2, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard0.adminCommand({_flushRoutingTableCacheUpdates: ns3, syncFromConfig: false}));
+ assert.commandWorked(
+ st.shard1.adminCommand({_flushRoutingTableCacheUpdates: ns3, syncFromConfig: false}));
// Also refresh the sessions collection so that the UUID consistency check at the end of
// ShardingTest, which will check for its UUID on the shards, passes.
@@ -169,12 +214,16 @@
checkCachedCollectionEntry(st.shard0, ns2, undefined);
checkCachedCollectionEntry(st.shard1, ns1, undefined);
checkCachedCollectionEntry(st.shard1, ns2, ns2EntryOriginal);
+ checkCachedCollectionEntry(st.shard0, ns3, ns3EntryOriginal);
+ checkCachedCollectionEntry(st.shard1, ns3, ns3EntryOriginal);
// The shards' chunk caches should have been updated with histories removed.
checkCachedChunksEntry(st.shard0, ns1, ns1ChunkEntryFCV40);
checkCachedChunksEntry(st.shard0, ns2, undefined);
checkCachedChunksEntry(st.shard1, ns1, undefined);
checkCachedChunksEntry(st.shard1, ns2, ns2ChunkEntryFCV40);
+ checkCachedChunksEntry(st.shard0, ns3, ns3ChunkEntryFCV40);
+ checkCachedChunksEntry(st.shard1, ns3, ns3ChunkEntryFCV40);
st.stop();
})();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 033a96c6a25..564b3dcf4e7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -51,6 +51,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/stdx/unordered_set.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -940,6 +941,7 @@ Status ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
0,
currentCollectionVersion.getValue().epoch());
+ stdx::unordered_set<ShardId, ShardId::Hasher> bumpedShards;
for (const auto& chunk : chunksVector) {
auto swChunk = ChunkType::fromConfigBSON(chunk);
if (!swChunk.isOK()) {
@@ -948,10 +950,14 @@ Status ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
auto& upgradeChunk = swChunk.getValue();
if (upgradeChunk.getHistory().empty()) {
-
- // Bump the version.
- upgradeChunk.setVersion(newCollectionVersion);
- newCollectionVersion.incMajor();
+ // Bump the version for only one chunk per shard to satisfy the requirement imposed by
+ // SERVER-33356
+ const auto& shardId = upgradeChunk.getShard();
+ if (!bumpedShards.count(shardId)) {
+ upgradeChunk.setVersion(newCollectionVersion);
+ newCollectionVersion.incMajor();
+ bumpedShards.emplace(shardId);
+ }
// Construct the fresh history.
upgradeChunk.setHistory({ChunkHistory{validAfter, upgradeChunk.getShard()}});
@@ -1004,16 +1010,6 @@ Status ShardingCatalogManager::downgradeChunksHistory(OperationContext* opCtx,
<< ", but found no chunks"};
}
- const auto currentCollectionVersion = _findCollectionVersion(opCtx, nss, collectionEpoch);
- if (!currentCollectionVersion.isOK()) {
- return currentCollectionVersion.getStatus();
- }
-
- // Bump the version.
- auto newCollectionVersion = ChunkVersion(currentCollectionVersion.getValue().majorVersion() + 1,
- 0,
- currentCollectionVersion.getValue().epoch());
-
for (const auto& chunk : chunksVector) {
auto swChunk = ChunkType::fromConfigBSON(chunk);
if (!swChunk.isOK()) {
@@ -1021,10 +1017,6 @@ Status ShardingCatalogManager::downgradeChunksHistory(OperationContext* opCtx,
}
auto& downgradeChunk = swChunk.getValue();
- // Bump the version.
- downgradeChunk.setVersion(newCollectionVersion);
- newCollectionVersion.incMajor();
-
// Clear the history.
downgradeChunk.setHistory({});