diff options
3 files changed, 123 insertions, 9 deletions
diff --git a/jstests/sharding/check_metadata_consistency_large.js b/jstests/sharding/check_metadata_consistency_large.js new file mode 100644 index 00000000000..a0fcdf08ce6 --- /dev/null +++ b/jstests/sharding/check_metadata_consistency_large.js @@ -0,0 +1,91 @@ +/* + * Tests to validate the correct behaviour of checkMetadataConsistency command with a lot of + * inconsistencies. + * + * @tags: [ + * featureFlagCheckMetadataConsistency, + * requires_fcv_71, + * resource_intensive, + * ] + */ + +(function() { +'use strict'; + +load("jstests/libs/fail_point_util.js"); +load("jstests/sharding/libs/create_sharded_collection_util.js"); + +// Configure initial sharding cluster +const st = new ShardingTest({}); +const mongos = st.s; + +const dbName = "testCheckMetadataConsistencyDB"; +var dbCounter = 0; + +function getNewDb() { + return mongos.getDB(dbName + dbCounter++); +} + +(function testManyInconsistencies() { + // Introduce a misplaced inconsistency + const db = getNewDb(); + assert.commandWorked( + mongos.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName})); + assert.commandWorked(st.shard1.getDB(db.getName()).coll.insert({_id: 'foo'})); + + const kFakeInconsistenciesPerShard = 1000; + const data = {numInconsistencies: NumberInt(kFakeInconsistenciesPerShard)}; + const fp1 = configureFailPoint(st.shard0, 'insertFakeInconsistencies', data); + const fp2 = configureFailPoint(st.shard1, 'insertFakeInconsistencies', data); + + // If catalog shard is enabled, there will be introduced inconsistencies in shard0, shard1 and + // config. Otherwise, only shard0 and shard1. + const kExpectedInconsistencies = TestData.configShard ? 3 * kFakeInconsistenciesPerShard + 1 + : 2 * kFakeInconsistenciesPerShard + 1; + + let inconsistencies = db.checkMetadataConsistency().toArray(); + assert.eq(kExpectedInconsistencies, inconsistencies.length, tojson(inconsistencies)); + + // Clean up the database to pass the hooks that detect inconsistencies + fp1.off(); + fp2.off(); + db.dropDatabase(); + inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); + assert.eq(0, inconsistencies.length, tojson(inconsistencies)); +})(); + +(function testMissingManyIndexes() { + const db = getNewDb(); + const checkOptions = {'checkIndexes': 1}; + const kIndexes = 60; + + assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()})); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); + CreateShardedCollectionUtil.shardCollectionWithChunks(db.coll, {x: 1}, [ + {min: {x: MinKey}, max: {x: 1}, shard: st.shard0.shardName}, + {min: {x: 1}, max: {x: MaxKey}, shard: st.shard1.shardName}, + ]); + + const shard0Coll = st.shard0.getDB(db.getName()).coll; + const shard1Coll = st.shard1.getDB(db.getName()).coll; + + const shard0Indexes = Array.from({length: kIndexes}, (_, i) => ({['index0' + i]: 1})); + const shard1Indexes = Array.from({length: kIndexes}, (_, i) => ({['index1' + i]: 1})); + assert.commandWorked(shard0Coll.createIndexes(shard0Indexes)); + assert.commandWorked(shard1Coll.createIndexes(shard1Indexes)); + + // Check that the number of inconsistencies is correct + let inconsistencies = db.checkMetadataConsistency(checkOptions).toArray(); + assert.eq(kIndexes * 2, inconsistencies.length, tojson(inconsistencies)); + inconsistencies.forEach(inconsistency => { + assert.eq("InconsistentIndex", inconsistency.type, tojson(inconsistency)); + }); + + // Clean up the database to pass the hooks that detect inconsistencies + assert.commandWorked(db.coll.dropIndexes()); + inconsistencies = db.checkMetadataConsistency({'checkIndexes': 1}).toArray(); + assert.eq(0, inconsistencies.length, tojson(inconsistencies)); +})(); + +st.stop(); +})(); diff --git a/src/mongo/db/s/metadata_consistency_util.cpp b/src/mongo/db/s/metadata_consistency_util.cpp index 42481d41fe4..f98b2fd35a6 100644 --- a/src/mongo/db/s/metadata_consistency_util.cpp +++ b/src/mongo/db/s/metadata_consistency_util.cpp @@ -49,6 +49,8 @@ namespace metadata_consistency_util { namespace { +MONGO_FAIL_POINT_DEFINE(insertFakeInconsistencies); + /* * Emit a warning log containing information about the given inconsistency */ @@ -153,6 +155,15 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makeQueuedPlanExecutor( auto ws = std::make_unique<WorkingSet>(); auto root = std::make_unique<QueuedDataStage>(expCtx.get(), ws.get()); + insertFakeInconsistencies.execute([&](const BSONObj& data) { + const auto numInconsistencies = data["numInconsistencies"].safeNumberLong(); + for (int i = 0; i < numInconsistencies; i++) { + inconsistencies.emplace_back(makeInconsistency( + MetadataInconsistencyTypeEnum::kCollectionUUIDMismatch, + CollectionUUIDMismatchDetails{nss, ShardId{"shard"}, UUID::gen(), UUID::gen()})); + } + }); + for (auto&& inconsistency : inconsistencies) { // Every inconsistency encountered need to be logged with the same format // to allow log injestion systems to correctly detect them. diff --git a/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp b/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp index 4fe358478a0..0d57f8494ed 100644 --- a/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp @@ -32,13 +32,13 @@ #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" -#include "mongo/db/cursor_manager.h" #include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/metadata_consistency_util.h" #include "mongo/db/s/sharding_state.h" #include "mongo/logv2/log.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_aggregate.h" +#include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/stale_shard_version_helpers.h" @@ -157,15 +157,27 @@ std::vector<MetadataInconsistencyItem> checkIndexesInconsistencies( return; } - auto cursorPin = uassertStatusOK( - CursorManager::get(opCtx)->pinCursor(opCtx, indexStatsCursor.getCursorId())); - auto exec = cursorPin->getExecutor(); + const auto authzSession = AuthorizationSession::get(opCtx->getClient()); + const auto authChecker = + [&authzSession](const boost::optional<UserName>& userName) -> Status { + return authzSession->isCoauthorizedWith(userName) + ? Status::OK() + : Status(ErrorCodes::Unauthorized, "User not authorized to access cursor"); + }; + + // Check out the cursor. If the cursor is not found, all data was retrieve in the + // first batch. + const auto cursorManager = Grid::get(opCtx)->getCursorManager(); + auto pinnedCursor = uassertStatusOK(cursorManager->checkOutCursor( + indexStatsCursor.getCursorId(), opCtx, authChecker)); + while (true) { + auto next = pinnedCursor->next(); + if (!next.isOK() || next.getValue().isEOF()) { + break; + } - BSONObj nextDoc; - while (!exec->isEOF()) { - auto state = exec->getNext(&nextDoc, nullptr); - if (state == PlanExecutor::ADVANCED) { - results.emplace_back(nextDoc); + if (auto data = next.getValue().getResult()) { + results.emplace_back(data.get().getOwned()); } } }); |