summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergi Mateo Bellido <sergi.mateo-bellido@mongodb.com>2021-07-22 15:51:24 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-07-23 11:10:02 +0000
commit186c59cdf182e3d26c3443e6e2a0a17197aa8602 (patch)
tree61cd8ab5ea5d7c7debeb3036525bd89b485dcbce
parent7a8df38d284e16a60884569a90ddfa07586d9ec0 (diff)
downloadmongo-186c59cdf182e3d26c3443e6e2a0a17197aa8602.tar.gz
SERVER-58792 Disable the UUID-based logic that handles config.cache.chunks.* collections on the ShardServerCatalogCacheLoader
-rw-r--r--jstests/sharding/drop_collection.js4
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js3
-rw-r--r--jstests/sharding/lookup_from_config_cache_chunks.js3
-rw-r--r--jstests/sharding/refine_collection_shard_key_drops_chunks.js4
-rw-r--r--jstests/sharding/refine_shardkey_config_cache_refresh.js3
-rw-r--r--jstests/sharding/rename_sharded.js3
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp15
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp2
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp162
10 files changed, 13 insertions, 188 deletions
diff --git a/jstests/sharding/drop_collection.js b/jstests/sharding/drop_collection.js
index 6f02bf72cec..506fbaaedbe 100644
--- a/jstests/sharding/drop_collection.js
+++ b/jstests/sharding/drop_collection.js
@@ -349,9 +349,7 @@ jsTest.log("Test that dropping a sharded collection, the cached metadata on shar
// Get the chunks cache collection name
const configCollDoc = st.s0.getDB('config').collections.findOne({_id: coll.getFullName()});
- const chunksCollName = 'cache.chunks.' +
- (configCollDoc.hasOwnProperty('timestamp') ? extractUUIDFromObject(configCollDoc.uuid)
- : coll.getFullName());
+ const chunksCollName = 'cache.chunks.' + coll.getFullName();
// Drop the collection
assert.commandWorked(db.runCommand({drop: coll.getName()}));
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
index beec643c535..088952bec10 100644
--- a/jstests/sharding/initial_split_validate_shard_collections.js
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -65,8 +65,7 @@ assert.commandWorked(
const chunksOnConfigCount = findChunksUtil.countChunksForNs(st.config, 'test.user');
assert.eq(2, chunksOnConfigCount);
-const chunksCollName = "cache.chunks." +
- (collEntry.hasOwnProperty("timestamp") ? extractUUIDFromObject(collEntry.uuid) : 'test.user');
+const chunksCollName = "cache.chunks.test.user";
const chunksOnShard0 = st.shard0.getDB("config").getCollection(chunksCollName).find().toArray();
const chunksOnShard1 = st.shard1.getDB("config").getCollection(chunksCollName).find().toArray();
assert.eq(chunksOnConfigCount, chunksOnShard0.length);
diff --git a/jstests/sharding/lookup_from_config_cache_chunks.js b/jstests/sharding/lookup_from_config_cache_chunks.js
index f34d4331b99..2b66b5504a4 100644
--- a/jstests/sharding/lookup_from_config_cache_chunks.js
+++ b/jstests/sharding/lookup_from_config_cache_chunks.js
@@ -49,8 +49,7 @@ const setUp = () => {
const collNs = dbName + "." + collName;
const collEntry = st.config.collections.findOne({_id: collNs});
- chunksCollName = "cache.chunks." +
- (collEntry.hasOwnProperty("timestamp") ? extractUUIDFromObject(collEntry.uuid) : collNs);
+ chunksCollName = "cache.chunks." + collNs;
flushRoutersAndRefreshShardMetadata(st, {collNs});
};
diff --git a/jstests/sharding/refine_collection_shard_key_drops_chunks.js b/jstests/sharding/refine_collection_shard_key_drops_chunks.js
index c4226442ed3..76092bfc18c 100644
--- a/jstests/sharding/refine_collection_shard_key_drops_chunks.js
+++ b/jstests/sharding/refine_collection_shard_key_drops_chunks.js
@@ -42,9 +42,7 @@ assert.commandWorked(mongos.adminCommand({split: kNsName, middle: {a: 5, b: 5}})
// before refineCollectionShardKey.
assert.commandWorked(shard.adminCommand({_flushRoutingTableCacheUpdates: kNsName}));
-let collEntry = st.config.collections.findOne({_id: kNsName});
-let configCacheChunks = "config.cache.chunks." +
- (collEntry.hasOwnProperty("timestamp") ? extractUUIDFromObject(collEntry.uuid) : kNsName);
+let configCacheChunks = "config.cache.chunks." + kNsName;
let chunkArr = shard.getCollection(configCacheChunks).find({}).sort({min: 1}).toArray();
assert.eq(3, chunkArr.length);
assert.eq({a: MinKey, b: MinKey}, chunkArr[0]._id);
diff --git a/jstests/sharding/refine_shardkey_config_cache_refresh.js b/jstests/sharding/refine_shardkey_config_cache_refresh.js
index 0cb3271bde3..5418215c4cc 100644
--- a/jstests/sharding/refine_shardkey_config_cache_refresh.js
+++ b/jstests/sharding/refine_shardkey_config_cache_refresh.js
@@ -32,8 +32,7 @@ assert.commandWorked(
priConn.adminCommand({_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: true}));
let collEntry = st.config.collections.findOne({_id: 'test.user'});
-let chunksCollName = "cache.chunks." +
- (collEntry.hasOwnProperty("timestamp") ? extractUUIDFromObject(collEntry.uuid) : 'test.user');
+let chunksCollName = "cache.chunks.test.user";
let chunkCache = priConn.getDB('config').getCollection(chunksCollName);
let preRefineChunks = chunkCache.find().toArray();
assert.eq(3, preRefineChunks.length);
diff --git a/jstests/sharding/rename_sharded.js b/jstests/sharding/rename_sharded.js
index 037d0468b1b..05bcacbeb6b 100644
--- a/jstests/sharding/rename_sharded.js
+++ b/jstests/sharding/rename_sharded.js
@@ -55,8 +55,7 @@ function testRename(st, dbName, toNs, dropTarget, mustFail) {
// namespace and get the actual chunks cache collection name of the target collection
const toConfigCollDoc = mongos.getDB('config').collections.findOne({_id: toNs});
const chunksNameByUUID = toConfigCollDoc.hasOwnProperty('timestamp');
- const toChunksCollName =
- 'cache.chunks.' + (chunksNameByUUID ? extractUUIDFromObject(toConfigCollDoc.uuid) : toNs);
+ const toChunksCollName = 'cache.chunks.' + toNs;
// Validate the correctness of the collections metadata in the catalog cache on shards
for (let db of [st.shard0.getDB('config'), st.shard1.getDB('config')]) {
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 38315ea0d1c..07bcb7c22bd 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -282,9 +282,7 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
boost::optional<long long> limit,
const OID& epoch,
const boost::optional<Timestamp>& timestamp) {
- const NamespaceString chunksNss{
- ChunkType::ShardNSPrefix +
- (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns())};
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
try {
DBDirectClient client(opCtx);
@@ -324,9 +322,7 @@ Status updateShardChunks(OperationContext* opCtx,
const OID& currEpoch) {
invariant(!chunks.empty());
- const NamespaceString chunksNss{
- ChunkType::ShardNSPrefix +
- (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns())};
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
try {
DBDirectClient client(opCtx);
@@ -457,12 +453,11 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp
void dropChunks(OperationContext* opCtx,
const NamespaceString& nss,
const boost::optional<UUID>& uuid) {
- const auto chunksNs = ChunkType::ShardNSPrefix +
- (!nss.isTemporaryReshardingCollection() && uuid ? uuid->toString() : nss.ns());
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
DBDirectClient client(opCtx);
BSONObj result;
- if (!client.dropCollection(chunksNs, kLocalWriteConcern, &result)) {
+ if (!client.dropCollection(chunksNss.ns(), kLocalWriteConcern, &result)) {
auto status = getStatusFromCommandResult(result);
if (status != ErrorCodes::NamespaceNotFound) {
uassertStatusOK(status);
@@ -472,7 +467,7 @@ void dropChunks(OperationContext* opCtx,
LOGV2_DEBUG(3463201,
1,
"Dropped chunks cache",
- "chunksNamespace"_attr = chunksNs,
+ "chunksNamespace"_attr = chunksNss,
"collectionNamespace"_attr = nss,
"error"_attr = getStatusFromCommandResult(result));
}
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index c4a30df04fd..3fc733cd333 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -140,7 +140,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
<< chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
- NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + uuid.toString()};
+ NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + kNss.ns()};
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
ASSERT(cursor);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index ee4ff25d751..a94d360bcf5 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -827,7 +827,7 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
<< "'."};
}
- if (maxLoaderVersion.isSet() &&
+ if (false && maxLoaderVersion.isSet() &&
maxLoaderVersion.getTimestamp().is_initialized() !=
collAndChunks.creationTime.is_initialized() &&
maxLoaderVersion.epoch() == collAndChunks.epoch) {
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
index edf5e1a435f..675fd1ee336 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp
@@ -438,168 +438,6 @@ TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindMixedChun
}
}
-TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedWithChangeOnMetadataFormat) {
- // First set up the shard chunk loader as sharded.
- auto collAndChunks = setUpChunkLoaderWithFiveChunks();
- auto& collType = collAndChunks.first;
- auto& chunks = collAndChunks.second;
-
- auto changeMetadataFormat = [&](const boost::optional<Timestamp>& timestamp) {
- auto lastChunk = chunks.back();
- lastChunk.setVersion([&]() {
- const auto v = lastChunk.getVersion();
- return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
- }());
-
- collType.setTimestamp(timestamp);
- _remoteLoaderMock->setCollectionRefreshReturnValue(collType);
- _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{lastChunk});
-
- auto collAndChunksRes = _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- ASSERT_EQUALS(collAndChunksRes.epoch, collType.getEpoch());
- ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
- ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 5UL);
- for (const auto& changedChunk : collAndChunksRes.changedChunks) {
- ASSERT_EQUALS(changedChunk.getVersion().getTimestamp(), timestamp);
- ASSERT_EQUALS(changedChunk.getVersion().epoch(), collAndChunksRes.epoch);
- }
- };
-
- // Upgrading the metadata format to 5.0
- changeMetadataFormat(Timestamp(42));
- // Downgrading the medata format to 4.4
- changeMetadataFormat(boost::none /* timestamp */);
-}
-
-TEST_F(ShardServerCatalogCacheLoaderTest,
- PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseUpgrade) {
- const auto timestamp = Timestamp(42);
- ChunkVersion collectionVersion(1, 0, OID::gen(), boost::none /* timestamp */);
- CollectionType collectionType = makeCollectionType(collectionVersion);
- vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
-
- // 1st refresh as if we were in 4.4: the loader discovers one new chunk without timestamp
- {
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]});
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none);
- ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), boost::none);
- }
-
- // 2nd refresh as if we were in the phase 1 of the setFCV process to upgrade to 5.0: the loader
- // discovers a few new chunks with timestamp but the collection doesn't have it yet.
- {
- for (size_t i = 1; i < chunks.size() - 1; ++i) {
- chunks[i].setVersion([&]() {
- const auto v = chunks[i].getVersion();
- return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
- }());
- }
-
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(
- std::vector<ChunkType>(chunks.begin() + 1, chunks.end() - 1));
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- const auto& changedChunks = collAndChunksRes.changedChunks;
- ASSERT_EQUALS(changedChunks.size(), 4UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none);
- ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), boost::none);
- for (size_t i = 1; i < chunks.size() - 1; ++i)
- ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp);
- }
-
- // 3rd refresh as if we were in 5.0: the loader discovers a new chunk. All chunks and the
- // collection have timestamps.
- {
- chunks.back().setVersion([&]() {
- const auto v = chunks.back().getVersion();
- return ChunkVersion(v.majorVersion(), v.minorVersion(), v.epoch(), timestamp);
- }());
- collectionType.setTimestamp(timestamp);
-
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()});
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- const auto& changedChunks = collAndChunksRes.changedChunks;
- ASSERT_EQUALS(changedChunks.size(), 5UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
- for (size_t i = 0; i < chunks.size(); ++i)
- ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(), timestamp);
- }
-}
-
-TEST_F(ShardServerCatalogCacheLoaderTest,
- PrimaryLoadFromShardedWithChangeOnMetadataFormatBecauseDowngrade) {
- const auto timestamp = Timestamp(42);
- ChunkVersion collectionVersion(1, 0, OID::gen(), timestamp);
- CollectionType collectionType = makeCollectionType(collectionVersion);
- vector<ChunkType> chunks = makeFiveChunks(collectionVersion);
-
- // 1st refresh as if we were in 5.0: the loader discovers one new chunk with timestamp. The
- // collection also has timestamps.
- {
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks[0]});
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- ASSERT_EQUALS(collAndChunksRes.changedChunks.size(), 1UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
- ASSERT_EQUALS(collAndChunksRes.changedChunks[0].getVersion().getTimestamp(), timestamp);
- }
-
- // 2nd refresh: the loader discovers a few new chunks without timestamp but the collection still
- // has it.
- {
- for (size_t i = 1; i < chunks.size() - 1; ++i) {
- chunks[i].setVersion([&]() {
- const auto v = chunks[i].getVersion();
- return ChunkVersion(
- v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */);
- }());
- }
-
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(
- std::vector<ChunkType>(chunks.begin() + 1, chunks.end() - 1));
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- const auto& changedChunks = collAndChunksRes.changedChunks;
- ASSERT_EQUALS(changedChunks.size(), 4UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, timestamp);
- ASSERT_EQUALS(changedChunks[0].getVersion().getTimestamp(), timestamp);
- for (size_t i = 1; i < chunks.size() - 1; ++i)
- ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(),
- boost::none /* timestamp */);
- }
-
- // 3rd refresh as if we were in 4.4: the loader discovers a new chunk. All chunks and the
- // collection don't have timestamps.
- {
- chunks.back().setVersion([&]() {
- const auto v = chunks.back().getVersion();
- return ChunkVersion(
- v.majorVersion(), v.minorVersion(), v.epoch(), boost::none /* timestamp */);
- }());
- collectionType.setTimestamp(boost::none /* timestamp */);
-
- _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType);
- _remoteLoaderMock->setChunkRefreshReturnValue(std::vector{chunks.back()});
- const auto collAndChunksRes =
- _shardLoader->getChunksSince(kNss, chunks[0].getVersion()).get();
- const auto& changedChunks = collAndChunksRes.changedChunks;
- ASSERT_EQUALS(changedChunks.size(), 5UL);
- ASSERT_EQUALS(collAndChunksRes.creationTime, boost::none /* timestamp */);
- for (size_t i = 0; i < chunks.size(); ++i)
- ASSERT_EQUALS(changedChunks[i].getVersion().getTimestamp(),
- boost::none /* timestamp */);
- }
-}
-
TEST_F(ShardServerCatalogCacheLoaderTest, PrimaryLoadFromShardedAndFindDbMetadataFormatChanged) {
const std::string dbName("dbName");
DatabaseVersion version(UUID::gen());