summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorAntonio Fuschetto <antonio.fuschetto@mongodb.com>2021-08-17 06:55:08 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-08-18 07:19:28 +0000
commit1d6b7bab47918cbf1b6361e9f29f3f91e8441698 (patch)
tree65a6c8dd77e75ab30475bdd42ed17e55ffce6a61 /src/mongo
parent090204664c59243ac6bd2e8629b969c6837df2d9 (diff)
downloadmongo-1d6b7bab47918cbf1b6361e9f29f3f91e8441698.tar.gz
SERVER-58575 Replace the logic that decides whether the SSCCL should use namespaces or uuids for config.cache.chunks collections
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp79
-rw-r--r--src/mongo/db/s/shard_metadata_util.h25
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp20
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp160
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.h17
-rw-r--r--src/mongo/db/s/type_shard_collection.h1
-rw-r--r--src/mongo/s/catalog/type_collection.h1
7 files changed, 183 insertions, 120 deletions
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 07bcb7c22bd..d7bf11091bf 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -276,13 +276,18 @@ Status updateShardDatabasesEntry(OperationContext* opCtx,
StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit,
const OID& epoch,
const boost::optional<Timestamp>& timestamp) {
- const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
+ const auto chunksNsPostfix{supportingLongName == SupportingLongNameStatusEnum::kDisabled ||
+ nss.isTemporaryReshardingCollection()
+ ? nss.ns()
+ : uuid.toString()};
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + chunksNsPostfix};
try {
DBDirectClient client(opCtx);
@@ -317,12 +322,17 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
Status updateShardChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName,
const std::vector<ChunkType>& chunks,
const OID& currEpoch) {
invariant(!chunks.empty());
- const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
+ const auto chunksNsPostfix{supportingLongName == SupportingLongNameStatusEnum::kDisabled ||
+ nss.isTemporaryReshardingCollection()
+ ? nss.ns()
+ : uuid.toString()};
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + chunksNsPostfix};
try {
DBDirectClient client(opCtx);
@@ -390,6 +400,27 @@ Status updateShardChunks(OperationContext* opCtx,
}
}
+void updateSupportingLongNameOnShardCollections(OperationContext* opCtx,
+ const NamespaceString& nss,
+ SupportingLongNameStatusEnum supportingLongName) {
+ write_ops::UpdateCommandRequest commandRequest(
+ NamespaceString::kShardConfigCollectionsNamespace, [&] {
+ BSONObj modifiers = supportingLongName != SupportingLongNameStatusEnum::kDisabled
+ ? BSON("$set" << BSON(CollectionType::kSupportingLongNameFieldName
+ << SupportingLongNameStatus_serializer(supportingLongName)))
+ : BSON("$unset" << BSON(CollectionType::kSupportingLongNameFieldName << 1));
+
+ write_ops::UpdateOpEntry updateOp;
+ updateOp.setQ(BSON(ShardCollectionType::kNssFieldName << nss.ns()));
+ updateOp.setU(write_ops::UpdateModification::parseFromClassicUpdate(modifiers));
+ return std::vector{updateOp};
+ }());
+
+ DBDirectClient dbClient(opCtx);
+ const auto commandResponse = dbClient.runCommand(commandRequest.serialize({}));
+ uassertStatusOK(getStatusFromWriteCommandReply(commandResponse->getCommandReply()));
+}
+
void updateTimestampOnShardCollections(OperationContext* opCtx,
const NamespaceString& nss,
const boost::optional<Timestamp>& timestamp) {
@@ -411,17 +442,17 @@ void updateTimestampOnShardCollections(OperationContext* opCtx,
}
Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const NamespaceString& nss) {
- try {
- const auto localUuid = [&] {
- const auto statusWithCollectionEntry = readShardCollectionsEntry(opCtx, nss);
- if (!statusWithCollectionEntry.isOK()) {
- return boost::optional<UUID>(boost::none);
- }
- const auto collectionEntry = statusWithCollectionEntry.getValue();
- return collectionEntry.getTimestamp() ? collectionEntry.getUuid()
- : boost::optional<UUID>(boost::none);
- }();
+ // TODO (SERVER-58361): Reduce the access to local collections.
+ const auto statusWithCollectionEntry = readShardCollectionsEntry(opCtx, nss);
+ if (statusWithCollectionEntry.getStatus() == ErrorCodes::NamespaceNotFound) {
+ return Status::OK();
+ }
+ uassertStatusOKWithContext(statusWithCollectionEntry,
+ str::stream() << "Failed to read persisted collection entry for '"
+ << nss.ns() << "'.");
+ const auto& collectionEntry = statusWithCollectionEntry.getValue();
+ try {
DBDirectClient client(opCtx);
auto deleteCommandResponse = client.runCommand([&] {
write_ops::DeleteCommandRequest deleteOp(
@@ -437,12 +468,12 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp
uassertStatusOK(
getStatusFromWriteCommandResponse(deleteCommandResponse->getCommandReply()));
- dropChunks(opCtx, nss, localUuid);
+ dropChunks(opCtx, nss, collectionEntry.getUuid(), collectionEntry.getSupportingLongName());
LOGV2(3463200,
"Dropped chunks and collection caches",
"collectionNamespace"_attr = nss,
- "collectionUuid"_attr = localUuid);
+ "collectionUUID"_attr = collectionEntry.getUuid());
return Status::OK();
} catch (const DBException& ex) {
@@ -452,8 +483,13 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp
void dropChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid) {
- const NamespaceString chunksNss{ChunkType::ShardNSPrefix + nss.ns()};
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName) {
+ const auto chunksNsPostfix{supportingLongName == SupportingLongNameStatusEnum::kDisabled ||
+ nss.isTemporaryReshardingCollection()
+ ? nss.ns()
+ : uuid.toString()};
+ const NamespaceString chunksNss{ChunkType::ShardNSPrefix + chunksNsPostfix};
DBDirectClient client(opCtx);
BSONObj result;
@@ -463,13 +499,6 @@ void dropChunks(OperationContext* opCtx,
uassertStatusOK(status);
}
}
-
- LOGV2_DEBUG(3463201,
- 1,
- "Dropped chunks cache",
- "chunksNamespace"_attr = chunksNss,
- "collectionNamespace"_attr = nss,
- "error"_attr = getStatusFromCommandResult(result));
}
Status deleteDatabasesEntry(OperationContext* opCtx, StringData dbName) {
diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h
index 2e31f972a77..3bbfaa6123c 100644
--- a/src/mongo/db/s/shard_metadata_util.h
+++ b/src/mongo/db/s/shard_metadata_util.h
@@ -48,6 +48,7 @@ class ShardCollectionType;
class ShardDatabaseType;
template <typename T>
class StatusWith;
+enum class SupportingLongNameStatusEnum : std::int32_t;
/**
* Function helpers to locally, using a DBDirectClient, read and write sharding metadata on a shard.
@@ -165,13 +166,11 @@ Status updateShardDatabasesEntry(OperationContext* opCtx,
* 'query', returning at most 'limit' chunks in 'sort' order. 'epoch' populates the returned chunks'
* version fields, because we do not yet have UUIDs to replace epochs nor UUIDs associated with
* namespaces.
- *
- * Starting with FCV 5.0, the collection UUID is used to read chunk metadata. Instead, the
- * collection namespace is used with previous FCV.
*/
StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit,
@@ -184,13 +183,12 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
* as the updated chunk document is inserted. If the epoch of a chunk in 'chunks' does not match
* 'currEpoch', a ConflictingOperationInProgress error is returned and no more updates are applied.
*
- * Starting with FCV 5.0, the collection UUID is used to update chunk metadata. Insteed, the
- * collection namespace is used with previous FCV.
- *
* Note: two threads running this function in parallel for the same collection can corrupt the
* collection data!
*
* nss - the collection namespace for which chunk metadata is being updated.
+ * supportingLongName - when enabled, chunk metadata is accessed by collection 'uuid' rather than
+ * 'nss'.
* uuid - the collection UUID for which chunk metadata is being updated.
* chunks - chunks retrieved from the config server, sorted in ascending chunk version order.
* currEpoch - what this shard server expects the collection epoch to be.
@@ -202,11 +200,19 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
*/
Status updateShardChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName,
const std::vector<ChunkType>& chunks,
const OID& currEpoch);
/**
+ * Update the 'supportingLongName' field of the 'nss' entry in config.cache.collections.
+ */
+void updateSupportingLongNameOnShardCollections(OperationContext* opCtx,
+ const NamespaceString& nss,
+ SupportingLongNameStatusEnum supportingLongName);
+
+/**
* Adds/removes the timestamp of the 'nss' entry in config.cache.collections
*/
void updateTimestampOnShardCollections(OperationContext* opCtx,
@@ -229,7 +235,8 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp
*/
void dropChunks(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid);
+ const UUID& uuid,
+ SupportingLongNameStatusEnum supportingLongName);
/**
* Deletes locally persisted database metadata associated with 'dbName': removes the databases
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 3fc733cd333..1739c32da5d 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -51,6 +51,8 @@ using std::vector;
using unittest::assertGet;
const NamespaceString kNss = NamespaceString("test.foo");
+const SupportingLongNameStatusEnum kSupportingLongName =
+ SupportingLongNameStatusEnum::kExplicitlyEnabled;
const NamespaceString kChunkMetadataNss = NamespaceString("config.cache.chunks.test.foo");
const ShardId kShardId = ShardId("shard0");
const bool kUnique = false;
@@ -68,6 +70,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
<< ShardCollectionType::kDefaultCollationFieldName << defaultCollation
<< ShardCollectionType::kUniqueFieldName << kUnique));
shardCollectionType.setRefreshing(true);
+ shardCollectionType.setSupportingLongName(kSupportingLongName);
ASSERT_OK(updateShardCollectionsEntry(operationContext(),
BSON(ShardCollectionType::kNssFieldName << kNss.ns()),
@@ -81,8 +84,8 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
* Inserts 'chunks' into the shard's chunks collection.
*/
void setUpChunks(const std::vector<ChunkType> chunks) {
- ASSERT_OK(
- updateShardChunks(operationContext(), kNss, uuid, chunks, maxCollVersion.epoch()));
+ ASSERT_OK(updateShardChunks(
+ operationContext(), kNss, uuid, kSupportingLongName, chunks, maxCollVersion.epoch()));
}
/**
@@ -140,7 +143,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
<< chunk.getMin() << ChunkType::max() << chunk.getMax()));
query.readPref(ReadPreference::Nearest, BSONArray());
- NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + kNss.ns()};
+ NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + uuid.toString()};
std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
ASSERT(cursor);
@@ -227,7 +230,8 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) {
TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
std::vector<ChunkType> chunks = makeFourChunks();
- ASSERT_OK(updateShardChunks(operationContext(), kNss, uuid, chunks, maxCollVersion.epoch()));
+ ASSERT_OK(updateShardChunks(
+ operationContext(), kNss, uuid, kSupportingLongName, chunks, maxCollVersion.epoch()));
checkChunks(chunks);
// read all the chunks
@@ -236,6 +240,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
std::vector<ChunkType> readChunks = assertGet(readShardChunks(operationContext(),
kNss,
uuid,
+ kSupportingLongName,
allChunkDiff.query,
allChunkDiff.sort,
boost::none,
@@ -252,6 +257,7 @@ TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
readChunks = assertGet(readShardChunks(operationContext(),
kNss,
uuid,
+ kSupportingLongName,
oneChunkDiff.query,
oneChunkDiff.sort,
boost::none,
@@ -266,7 +272,8 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
// Load some chunk metadata.
std::vector<ChunkType> chunks = makeFourChunks();
- ASSERT_OK(updateShardChunks(operationContext(), kNss, uuid, chunks, maxCollVersion.epoch()));
+ ASSERT_OK(updateShardChunks(
+ operationContext(), kNss, uuid, kSupportingLongName, chunks, maxCollVersion.epoch()));
checkChunks(chunks);
// Load some changes and make sure it's applied correctly.
@@ -310,7 +317,8 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
frontChunkControl.setVersion(collVersion);
newChunks.push_back(frontChunkControl);
- ASSERT_OK(updateShardChunks(operationContext(), kNss, uuid, newChunks, collVersion.epoch()));
+ ASSERT_OK(updateShardChunks(
+ operationContext(), kNss, uuid, kSupportingLongName, newChunks, collVersion.epoch()));
chunks.push_back(splitChunkOne);
chunks.push_back(splitChunkTwoMoved);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 8756b9bf3df..16dc1fd545a 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -71,14 +71,24 @@ AtomicWord<unsigned long long> taskIdGenerator{0};
void dropChunksIfEpochChanged(OperationContext* opCtx,
const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
const CollectionAndChangedChunks& collAndChunks,
const ChunkVersion& maxLoaderVersion) {
if (collAndChunks.epoch != maxLoaderVersion.epoch() &&
maxLoaderVersion != ChunkVersion::UNSHARDED()) {
// If the collection has a new epoch, delete all existing chunks in the persisted routing
// table cache.
- dropChunks(opCtx, nss, uuid);
+
+ // TODO (SERVER-58361): Reduce the access to local collections.
+ const auto statusWithCollectionEntry = readShardCollectionsEntry(opCtx, nss);
+ if (statusWithCollectionEntry.getStatus() == ErrorCodes::NamespaceNotFound) {
+ return;
+ }
+ uassertStatusOKWithContext(
+ statusWithCollectionEntry,
+ str::stream() << "Failed to read persisted collection entry for '" << nss.ns() << "'.");
+ const auto& collectionEntry = statusWithCollectionEntry.getValue();
+
+ dropChunks(opCtx, nss, collectionEntry.getUuid(), collectionEntry.getSupportingLongName());
if (MONGO_unlikely(hangPersistCollectionAndChangedChunksAfterDropChunks.shouldFail())) {
LOGV2(22093, "Hit hangPersistCollectionAndChangedChunksAfterDropChunks failpoint");
@@ -103,15 +113,12 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
const NamespaceString& nss,
const CollectionAndChangedChunks& collAndChunks,
const ChunkVersion& maxLoaderVersion) {
- const auto localUuid = [&] {
- const auto statusWithCollectionEntry = readShardCollectionsEntry(opCtx, nss);
- if (!statusWithCollectionEntry.isOK()) {
- return boost::optional<UUID>(boost::none);
- }
- const auto collectionEntry = statusWithCollectionEntry.getValue();
- return collectionEntry.getTimestamp() ? collectionEntry.getUuid()
- : boost::optional<UUID>(boost::none);
- }();
+ // If the collection has a new epoch, delete all existing chunks in the persisted cache.
+ try {
+ dropChunksIfEpochChanged(opCtx, nss, collAndChunks, maxLoaderVersion);
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
// Update the collections collection entry for 'nss' in case there are any new updates.
ShardCollectionType update(nss,
@@ -141,15 +148,10 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
}
// Update the chunks.
- try {
- dropChunksIfEpochChanged(opCtx, nss, localUuid, collAndChunks, maxLoaderVersion);
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-
status = updateShardChunks(opCtx,
nss,
- collAndChunks.creationTime ? collAndChunks.uuid : boost::none,
+ *collAndChunks.uuid,
+ collAndChunks.supportingLongName,
collAndChunks.changedChunks,
collAndChunks.epoch);
if (!status.isOK()) {
@@ -189,21 +191,23 @@ Status persistDbVersion(OperationContext* opCtx, const DatabaseType& dbt) {
/**
* This function will throw on error!
*
- * Retrieves the persisted max chunk version for 'nss', if there are any persisted chunks. If there
- * are none -- meaning there's no persisted metadata for 'nss' --, returns a
- * ChunkVersion::UNSHARDED() version.
+ * Retrieves the max chunk version and the lastest status of the the support for long names of the
+ * persisted metadata. If there is no persisted collection entry or no chunks associated, returns
+ * 'ChunkVersion::UNSHARDED()' version and 'SupportingLongNameStatusEnum::kDisabled' supporting long
+ * name.
*
* It is unsafe to call this when a task for 'nss' is running concurrently because the collection
* could be dropped and recreated or have its shard key refined between reading the collection epoch
* and retrieving the chunk, which would make the returned ChunkVersion corrupt.
*/
-ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const NamespaceString& nss) {
+const auto getPersistedMaxChunkVersionAndLastestSupportingLongName(OperationContext* opCtx,
+ const NamespaceString& nss) {
// Must read the collections entry to get the epoch to pass into ChunkType for shard's chunk
// collection.
auto statusWithCollection = readShardCollectionsEntry(opCtx, nss);
if (statusWithCollection == ErrorCodes::NamespaceNotFound) {
// There is no persisted metadata.
- return ChunkVersion::UNSHARDED();
+ return std::make_tuple(ChunkVersion::UNSHARDED(), SupportingLongNameStatusEnum::kDisabled);
}
uassertStatusOKWithContext(statusWithCollection,
@@ -220,26 +224,28 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac
// Therefore, we have no choice but to just throw away the cache and start from scratch.
uassertStatusOK(dropChunksAndDeleteCollectionsEntry(opCtx, nss));
- return ChunkVersion::UNSHARDED();
+ return std::make_tuple(ChunkVersion::UNSHARDED(), SupportingLongNameStatusEnum::kDisabled);
}
- auto statusWithChunk = shardmetadatautil::readShardChunks(
- opCtx,
- nss,
- cachedCollection.getTimestamp() ? boost::optional<UUID>(cachedCollection.getUuid())
- : boost::none,
- BSONObj(),
- BSON(ChunkType::lastmod() << -1),
- 1LL,
- cachedCollection.getEpoch(),
- cachedCollection.getTimestamp());
+ auto statusWithChunk =
+ shardmetadatautil::readShardChunks(opCtx,
+ nss,
+ cachedCollection.getUuid(),
+ cachedCollection.getSupportingLongName(),
+ BSONObj(),
+ BSON(ChunkType::lastmod() << -1),
+ 1LL,
+ cachedCollection.getEpoch(),
+ cachedCollection.getTimestamp());
uassertStatusOKWithContext(
statusWithChunk,
str::stream() << "Failed to read highest version persisted chunk for collection '"
<< nss.ns() << "'.");
- return statusWithChunk.getValue().empty() ? ChunkVersion::UNSHARDED()
- : statusWithChunk.getValue().front().getVersion();
+ const auto maxChunkVersion = statusWithChunk.getValue().empty()
+ ? ChunkVersion::UNSHARDED()
+ : statusWithChunk.getValue().front().getVersion();
+ return std::make_tuple(maxChunkVersion, cachedCollection.getSupportingLongName());
}
/**
@@ -269,16 +275,16 @@ CollectionAndChangedChunks getPersistedMetadataSinceVersion(OperationContext* op
QueryAndSort diff = createShardChunkDiffQuery(startingVersion);
- auto changedChunks = uassertStatusOK(readShardChunks(
- opCtx,
- nss,
- shardCollectionEntry.getTimestamp() ? boost::optional<UUID>(shardCollectionEntry.getUuid())
- : boost::none,
- diff.query,
- diff.sort,
- boost::none,
- startingVersion.epoch(),
- startingVersion.getTimestamp()));
+ auto changedChunks =
+ uassertStatusOK(readShardChunks(opCtx,
+ nss,
+ shardCollectionEntry.getUuid(),
+ shardCollectionEntry.getSupportingLongName(),
+ diff.query,
+ diff.sort,
+ boost::none,
+ startingVersion.epoch(),
+ startingVersion.getTimestamp()));
return CollectionAndChangedChunks{shardCollectionEntry.getEpoch(),
shardCollectionEntry.getTimestamp(),
@@ -712,7 +718,7 @@ void ShardServerCatalogCacheLoader::_waitForTasksToCompleteAndRenameChunks(
OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
- const boost::optional<Timestamp>& timestamp) {
+ SupportingLongNameStatusEnum supportingLongName) {
LOGV2_DEBUG(3463205,
1,
@@ -725,16 +731,17 @@ void ShardServerCatalogCacheLoader::_waitForTasksToCompleteAndRenameChunks(
waitForCollectionFlush(opCtx, nss);
- // Determine the renaming logic according to the current FCV. Namely:
- // - FCV 5.0 (or higher): NS-based chunks collection to be converted to UUID-based one
- // - FCV 4.4 (or lower): UUID-based chunks collection to be converted to NS-based one
+ // Determine the renaming logic according to the current collection setup. Namely:
+ // - kImplicitlyEnabled or kExplicitlyEnabled: NS-based chunks collection to be converted to
+ // UUID-based one
+ // - kDisabled: UUID-based chunks collection to be converted to NS-based one
const auto [fromChunksNss, toChunksNss] = [&] {
- if (timestamp) {
- return std::make_tuple(NamespaceString{ChunkType::ShardNSPrefix + nss.toString()},
- NamespaceString{ChunkType::ShardNSPrefix + uuid.toString()});
- } else {
+ if (supportingLongName == SupportingLongNameStatusEnum::kDisabled) {
return std::make_tuple(NamespaceString{ChunkType::ShardNSPrefix + uuid.toString()},
NamespaceString{ChunkType::ShardNSPrefix + nss.toString()});
+ } else {
+ return std::make_tuple(NamespaceString{ChunkType::ShardNSPrefix + nss.toString()},
+ NamespaceString{ChunkType::ShardNSPrefix + uuid.toString()});
}
}();
@@ -742,10 +749,9 @@ void ShardServerCatalogCacheLoader::_waitForTasksToCompleteAndRenameChunks(
uassertStatusOK(renameCollection(opCtx, fromChunksNss, toChunksNss, {}));
}
- // Update the timestamp on the specific shard collection according to the current FCV. This is
- // necessary to allow access to the shards cache using the correct namespace, i.e. based on
- // collection namespace or UUID.
- updateTimestampOnShardCollections(opCtx, nss, timestamp);
+ // Update the support for long name of the specific shard collection to allow cache access
+ // using the correct namespace, i.e., by collection namespace or UUID.
+ updateSupportingLongNameOnShardCollections(opCtx, nss, supportingLongName);
WriteConcernResult ignoreResult;
const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
@@ -767,7 +773,7 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
long long termScheduled) {
// Get the max version the loader has.
- const ChunkVersion maxLoaderVersion = [&] {
+ const auto [maxLoaderVersion, lastSupportLongName] = [&] {
{
stdx::lock_guard<Latch> lock(_mutex);
auto taskListIt = _collAndChunkTaskLists.find(nss);
@@ -775,12 +781,13 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
if (taskListIt != _collAndChunkTaskLists.end() &&
taskListIt->second.hasTasksFromThisTerm(termScheduled)) {
// Enqueued tasks have the latest metadata
- return taskListIt->second.getHighestVersionEnqueued();
+ return std::make_tuple(taskListIt->second.getHighestVersionEnqueued(),
+ taskListIt->second.getLastSupportingLongNameEnqueued());
}
}
// If there are no enqueued tasks, get the max persisted
- return getPersistedMaxChunkVersion(opCtx, nss);
+ return getPersistedMaxChunkVersionAndLastestSupportingLongName(opCtx, nss);
}();
// Refresh the loader's metadata from the config server. The caller's request will
@@ -821,12 +828,10 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
<< "'."};
}
- if (false && maxLoaderVersion.isSet() &&
- maxLoaderVersion.getTimestamp().is_initialized() !=
- collAndChunks.creationTime.is_initialized() &&
- maxLoaderVersion.epoch() == collAndChunks.epoch) {
+ if (maxLoaderVersion.isSet() && maxLoaderVersion.epoch() == collAndChunks.epoch &&
+ lastSupportLongName != collAndChunks.supportingLongName) {
_waitForTasksToCompleteAndRenameChunks(
- opCtx, nss, *collAndChunks.uuid, collAndChunks.creationTime);
+ opCtx, nss, *collAndChunks.uuid, collAndChunks.supportingLongName);
// This task will update the metadata format of the collection and all its chunks.
// It doesn't apply the changes of the ChangedChunks, we will do that in the next task
@@ -838,14 +843,13 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
termScheduled,
true /* metadataFormatChanged */});
- LOGV2_FOR_CATALOG_REFRESH(5310400,
- 1,
- "Cache loader update metadata format for collection {namespace}"
- "{oldTimestamp} and {newTimestamp}",
- "Cache loader update metadata format for collection",
- "namespace"_attr = nss,
- "oldTimestamp"_attr = maxLoaderVersion.getTimestamp(),
- "newTimestamp"_attr = collAndChunks.creationTime);
+ LOGV2_FOR_CATALOG_REFRESH(
+ 5857500,
+ 1,
+ "Chunks cache collection renamed due to a change of the long name support",
+ "namespace"_attr = nss,
+ "oldSupportingLongName"_attr = lastSupportLongName,
+ "newSupportingLongName"_attr = collAndChunks.supportingLongName);
}
if ((collAndChunks.epoch != maxLoaderVersion.epoch()) ||
@@ -1566,6 +1570,14 @@ ChunkVersion ShardServerCatalogCacheLoader::CollAndChunkTaskList::getHighestVers
return _tasks.back().maxQueryVersion;
}
+SupportingLongNameStatusEnum
+ShardServerCatalogCacheLoader::CollAndChunkTaskList::getLastSupportingLongNameEnqueued() const {
+ invariant(!_tasks.empty());
+ const auto lastCollectionAndChangedChunks = _tasks.back().collectionAndChangedChunks;
+ return lastCollectionAndChangedChunks ? lastCollectionAndChangedChunks->supportingLongName
+ : SupportingLongNameStatusEnum::kDisabled;
+}
+
ShardServerCatalogCacheLoader::EnqueuedMetadataResults
ShardServerCatalogCacheLoader::CollAndChunkTaskList::getEnqueuedMetadataForTerm(
const long long term) const {
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h
index 5d673ec6855..f686e9737bb 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.h
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h
@@ -239,8 +239,14 @@ private:
ChunkVersion getHighestVersionEnqueued() const;
/**
- * Iterates over the task list to retrieve the enqueued metadata. Only retrieves collects
- * data from tasks that have terms matching the specified 'term'.
+ * Gets the last task's supporting long name status -- this is the most up to date
+ * supporting long name status.
+ */
+ SupportingLongNameStatusEnum getLastSupportingLongNameEnqueued() const;
+
+ /**
+ * Iterates over the task list to retrieve the enqueued metadata. Only retrieves
+ * collects data from tasks that have terms matching the specified 'term'.
*/
EnqueuedMetadataResults getEnqueuedMetadataForTerm(long long term) const;
@@ -343,14 +349,13 @@ private:
typedef std::map<NamespaceString, CollAndChunkTaskList> CollAndChunkTaskLists;
/**
- * Waits for processing of any pending task on the namespace, then renames the chunks cache
- * based on the collection namespace with the collection UUID and vice versa. The renaming using
- * the collection UUID is applied in FCV 5.0, while the collection namespace in lower FCV.
+ * Waits for any pending task on the collection to be processed, then renames the chunks cache
+ * collection using the collection namespace or UUID.
*/
void _waitForTasksToCompleteAndRenameChunks(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
- const boost::optional<Timestamp>& timestamp);
+ SupportingLongNameStatusEnum supportingLongName);
/**
* Forces the primary to refresh its metadata for 'nss' and waits until this node's metadata
diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h
index a10de2ea999..f2d8123655e 100644
--- a/src/mongo/db/s/type_shard_collection.h
+++ b/src/mongo/db/s/type_shard_collection.h
@@ -46,6 +46,7 @@ public:
using ShardCollectionTypeBase::kNssFieldName;
using ShardCollectionTypeBase::kRefreshingFieldName;
using ShardCollectionTypeBase::kReshardingFieldsFieldName;
+ using ShardCollectionTypeBase::kSupportingLongNameFieldName;
using ShardCollectionTypeBase::kTimeseriesFieldsFieldName;
using ShardCollectionTypeBase::kUniqueFieldName;
using ShardCollectionTypeBase::kUuidFieldName;
diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h
index 1e1adbd8ae8..79a4896306a 100644
--- a/src/mongo/s/catalog/type_collection.h
+++ b/src/mongo/s/catalog/type_collection.h
@@ -88,6 +88,7 @@ public:
using CollectionTypeBase::kNoAutoSplitFieldName;
using CollectionTypeBase::kNssFieldName;
using CollectionTypeBase::kReshardingFieldsFieldName;
+ using CollectionTypeBase::kSupportingLongNameFieldName;
using CollectionTypeBase::kTimeseriesFieldsFieldName;
using CollectionTypeBase::kTimestampFieldName;
using CollectionTypeBase::kUniqueFieldName;