diff options
11 files changed, 282 insertions, 150 deletions
diff --git a/jstests/core/list_catalog.js b/jstests/core/list_catalog.js index d3b3f31bb20..a9554bcacb9 100644 --- a/jstests/core/list_catalog.js +++ b/jstests/core/list_catalog.js @@ -2,10 +2,6 @@ * Basic tests for the $listCatalog aggregation stage. * * @tags: [ - * # This test creates views and time-series collections, and drops these namespaces - * # as a pre-processing step. We should not allow the test framework to create these - * # namespaces as regular collections. - * assumes_no_implicit_collection_creation_after_drop, * # Time-series collection inserts are not supported in multi-document transactions. * does_not_support_transactions, * requires_fcv_60, @@ -16,73 +12,95 @@ (function() { 'use strict'; -const documentSourceListCatalogEnabled = - assert - .commandWorked( - db.getMongo().adminCommand({getParameter: 1, featureFlagDocumentSourceListCatalog: 1})) - .featureFlagDocumentSourceListCatalog.value; +load('jstests/libs/fixture_helpers.js'); -if (!documentSourceListCatalogEnabled) { - jsTestLog('Skipping test because the $listCatalog aggregation stage feature flag is disabled.'); - return; -} - -const collNamePrefix = 'list_catalog_'; +const testDB = db.getSiblingDB(jsTestName()); +assert.commandWorked(testDB.dropDatabase()); // Simple collection with one secondary index. -const collSimple = db.getCollection(collNamePrefix + 'simple'); -collSimple.drop(); +const collSimple = testDB.simple; assert.commandWorked(collSimple.createIndex({a: 1})); assert.commandWorked(collSimple.insert({_id: 0, a: 0})); -// Simple view with no pipeline. -const viewSimple = db.getCollection(collNamePrefix + 'simple_view'); -viewSimple.drop(); -assert.commandWorked(db.createView(viewSimple.getName(), collSimple.getName(), [])); +// Simple view. +const viewSimpleName = 'simple_view'; +assert.commandWorked(testDB.createView(viewSimpleName, collSimple.getName(), [{$project: {a: 0}}])); // Time-series collection. -const collTimeseries = db.getCollection(collNamePrefix + 'ts'); -collTimeseries.drop(); -assert.commandWorked( - db.createCollection(collTimeseries.getName(), {timeseries: {timeField: 'tt'}})); +assert.commandWorked(testDB.createCollection('ts', {timeseries: {timeField: 'tt'}})); +const collTimeseries = testDB.ts; assert.commandWorked(collTimeseries.insert({_id: 1, tt: ISODate(), x: 123})); // Collection with clustered index. -const collClustered = db.getCollection(collNamePrefix + 'clustered'); -collClustered.drop(); assert.commandWorked( - db.createCollection(collClustered.getName(), {clusteredIndex: {key: {_id: 1}, unique: true}})); + testDB.createCollection('clustered', {clusteredIndex: {key: {_id: 1}, unique: true}})); +const collClustered = testDB.clustered; assert.commandWorked(collClustered.insert({_id: 2, y: 'abc'})); -const adminDB = db.getSiblingDB('admin'); -const result = adminDB.aggregate([{$listCatalog: {}}]).toArray(); -jsTestLog('$listCatalog result: ' + tojson(result)); - -const catalogEntries = Object.assign({}, ...result.map(doc => ({[doc.ns]: doc}))); -jsTestLog('Catalog entries keyed by namespace: ' + tojson(catalogEntries)); - -const entryCollSimple = catalogEntries[collSimple.getFullName()]; -assert(entryCollSimple, 'simple collection not found: ' + tojson(result)); -assert.eq('collection', entryCollSimple.type, tojson(entryCollSimple)); -const hasIdIndexCollSimple = !entryCollSimple.md.options.clusteredIndex; -assert.eq(hasIdIndexCollSimple ? 2 : 1, entryCollSimple.md.indexes.length, tojson(entryCollSimple)); - -const entryViewSimple = catalogEntries[viewSimple.getFullName()]; -assert(entryViewSimple, 'simple view not found: ' + tojson(result)); -assert.eq('view', entryViewSimple.type, tojson(entryViewSimple)); -assert.eq(collSimple.getName(), entryViewSimple.viewOn, tojson(entryViewSimple)); - -const entryTimeseries = catalogEntries[collTimeseries.getFullName()]; -assert(entryTimeseries, 'time-series collection not found: ' + tojson(result)); -assert.eq('timeseries', entryTimeseries.type, tojson(entryTimeseries)); -const bucketsCollectionName = 'system.buckets.' + collTimeseries.getName(); -assert.eq(bucketsCollectionName, entryTimeseries.viewOn, tojson(entryTimeseries)); -const entryBucketsCollection = - catalogEntries[db.getCollection(bucketsCollectionName).getFullName()]; -assert(entryBucketsCollection, 'buckets collection not found: ' + tojson(result)); - -const entryCollClustered = catalogEntries[collClustered.getFullName()]; -assert(entryCollClustered, 'clustered collection not found: ' + tojson(result)); -assert.eq('collection', entryCollClustered.type, tojson(entryCollClustered)); -assert.sameMembers([], entryCollClustered.md.indexes, tojson(entryCollClustered)); +const numIndexes = function(coll, entry, numSecondaryIndexes) { + let numIndexes = numSecondaryIndexes + 1; + if (entry.md.options.clusteredIndex) { + --numIndexes; + } + if (FixtureHelpers.isSharded(coll)) { + ++numIndexes; + } + return numIndexes; +}; + +const checkEntries = function(collName, entries, type, {numSecondaryIndexes, viewOn}) { + const ns = testDB.getName() + '.' + collName; + assert(entries.some((entry) => entry.ns === ns)); + for (const entry of entries) { + if (entry.ns !== ns) { + continue; + } + + assert.eq(entry.db, testDB.getName()); + assert.eq(entry.name, collName); + assert.eq(entry.type, type); + if (FixtureHelpers.isMongos(testDB)) { + assert(entry.shard); + } + if (type === 'collection') { + assert.eq(entry.md.indexes.length, + numIndexes(testDB[collName], entry, numSecondaryIndexes)); + } + if (type === 'view' || type === 'timeseries') { + assert.eq(entry.viewOn, viewOn); + } + } +}; + +let result = collSimple.aggregate([{$listCatalog: {}}]).toArray(); +jsTestLog(collSimple.getFullName() + ' $listCatalog: ' + tojson(result)); +checkEntries(collSimple.getName(), result, 'collection', {numSecondaryIndexes: 1}); + +result = collClustered.aggregate([{$listCatalog: {}}]).toArray(); +jsTestLog(collClustered.getFullName() + ' $listCatalog: ' + tojson(result)); +checkEntries(collClustered.getName(), result, 'collection', {numSecondaryIndexes: 0}); + +assert.commandFailedWithCode( + testDB.runCommand({aggregate: viewSimpleName, pipeline: [{$listCatalog: {}}], cursor: {}}), + 40602); + +assert.commandFailedWithCode( + testDB.runCommand( + {aggregate: collTimeseries.getName(), pipeline: [{$listCatalog: {}}], cursor: {}}), + 40602); + +assert.commandFailedWithCode( + testDB.runCommand({aggregate: 1, pipeline: [{$listCatalog: {}}], cursor: {}}), + ErrorCodes.InvalidNamespace); + +const adminDB = testDB.getSiblingDB('admin'); +result = adminDB.aggregate([{$listCatalog: {}}]).toArray(); +jsTestLog('Collectionless $listCatalog: ' + tojson(result)); + +checkEntries(collSimple.getName(), result, 'collection', {numSecondaryIndexes: 1}); +checkEntries(collClustered.getName(), result, 'collection', {numSecondaryIndexes: 0}); +checkEntries(viewSimpleName, result, 'view', {viewOn: collSimple.getName()}); +checkEntries(collTimeseries.getName(), result, 'timeseries', { + viewOn: 'system.buckets.' + collTimeseries.getName() +}); })(); diff --git a/jstests/noPassthrough/list_catalog_read_concern.js b/jstests/noPassthrough/list_catalog_read_concern.js index 5391d497d1a..23806897f4d 100644 --- a/jstests/noPassthrough/list_catalog_read_concern.js +++ b/jstests/noPassthrough/list_catalog_read_concern.js @@ -1,5 +1,6 @@ /** - * Tests listCatalog aggregation stage with local and majority read concerns. + * Tests the $listCatalog aggregation stage with local and majority read concerns. + * * @tags: [ * requires_majority_read_concern, * requires_replication, @@ -8,87 +9,65 @@ (function() { 'use strict'; -load("jstests/libs/fail_point_util.js"); // For configureFailPoint +load('jstests/libs/write_concern_util.js'); -const rst = new ReplSetTest({nodes: 3}); +const rst = new ReplSetTest({nodes: 2}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); -const documentSourceListCatalogEnabled = - assert - .commandWorked( - primary.adminCommand({getParameter: 1, featureFlagDocumentSourceListCatalog: 1})) - .featureFlagDocumentSourceListCatalog.value; +const db = primary.getDB(jsTestName()); +const coll1 = db.coll_1; +const coll2 = db.coll_2; +const view = db.view; -if (!documentSourceListCatalogEnabled) { - jsTestLog('Skipping test because the $listCatalog aggregation stage feature flag is disabled.'); - rst.stopSet(); - return; -} +assert.commandWorked( + db.runCommand({createIndexes: coll1.getName(), indexes: [{key: {a: 1}, name: 'a_1'}]})); +assert.commandWorked( + db.runCommand({create: view.getName(), viewOn: coll1.getName(), pipeline: []})); -const testDB = primary.getDB('test'); -const coll = testDB.getCollection('t'); -assert.commandWorked(coll.insert({_id: 0})); -const view = testDB.getCollection('view1'); -assert.commandWorked(testDB.createView(view.getName(), coll.getName(), [])); -rst.awaitReplication(); +stopReplicationOnSecondaries(rst); -const secondaries = rst.getSecondaries(); -assert.eq(2, secondaries.length); +assert.commandWorked(db.runCommand({ + createIndexes: coll1.getName(), + indexes: [{key: {b: 1}, name: 'b_1'}], + writeConcern: {w: 1}, + commitQuorum: 0, +})); +assert.commandWorked(db.runCommand({create: coll2.getName(), writeConcern: {w: 1}})); +assert.commandWorked(db.runCommand({collMod: view.getName(), viewOn: coll2.getName()})); -let failpoints = []; -try { - failpoints.push(configureFailPoint(secondaries[0], 'rsSyncApplyStop')); - failpoints.push(configureFailPoint(secondaries[1], 'rsSyncApplyStop')); +let entries = coll1.aggregate([{$listCatalog: {}}], {readConcern: {level: 'local'}}).toArray(); +jsTestLog(coll1.getFullName() + ' local $listCatalog: ' + tojson(entries)); +assert.eq(entries.length, 1); +assert.eq(entries[0].ns, coll1.getFullName()); +assert.eq(entries[0].md.indexes.length, 3); - const collOnPrimaryOnly = testDB.getCollection('w'); - assert.commandWorked(collOnPrimaryOnly.insert({_id: 1}, {writeConcern: {w: 1}})); +entries = coll1.aggregate([{$listCatalog: {}}], {readConcern: {level: 'majority'}}).toArray(); +jsTestLog(coll1.getFullName() + ' majority $listCatalog: ' + tojson(entries)); +assert.eq(entries.length, 1); +assert.eq(entries[0].ns, coll1.getFullName()); +assert.eq(entries[0].md.indexes.length, 2); - const viewOnPrimaryOnly = testDB.getCollection('view2'); - assert.commandWorked( - testDB.createView(viewOnPrimaryOnly.getName(), coll.getName(), [], {writeConcern: {w: 1}})); +const adminDB = primary.getDB('admin'); - const adminDB = testDB.getSiblingDB('admin'); - const resultLocal = adminDB - .aggregate([{$listCatalog: {}}, {$match: {db: testDB.getName()}}], - {readConcern: {level: 'local'}}) - .toArray(); - const resultMajority = adminDB - .aggregate([{$listCatalog: {}}, {$match: {db: testDB.getName()}}], - {readConcern: {level: 'majority'}}) - .toArray(); +entries = adminDB + .aggregate([{$listCatalog: {}}, {$match: {db: db.getName()}}], + {readConcern: {level: 'local'}}) + .toArray(); +jsTestLog('Collectionless local $listCatalog: ' + tojson(entries)); +assert.eq(entries.length, 4); +assert.eq(entries.find((entry) => entry.name === view.getName()).viewOn, coll2.getName()); - jsTestLog('$listCatalog result (local read concern): ' + tojson(resultLocal)); - jsTestLog('$listCatalog result (majority read concern): ' + tojson(resultMajority)); +entries = adminDB + .aggregate([{$listCatalog: {}}, {$match: {db: db.getName()}}], + {readConcern: {level: 'majority'}}) + .toArray(); +jsTestLog('Collectionless majority $listCatalog: ' + tojson(entries)); +assert.eq(entries.length, 3); +assert.eq(entries.find((entry) => entry.name === view.getName()).viewOn, coll1.getName()); - const catalogEntriesLocal = Object.assign({}, ...resultLocal.map(doc => ({[doc.ns]: doc}))); - const catalogEntriesMajority = - Object.assign({}, ...resultMajority.map(doc => ({[doc.ns]: doc}))); - jsTestLog('Catalog entries keyed by namespace (local read concern): ' + - tojson(catalogEntriesLocal)); - jsTestLog('Catalog entries keyed by namespace (majority read concern): ' + - tojson(catalogEntriesMajority)); - - // $listCatalog result should have all the collections and views we have created. - assert.hasFields(catalogEntriesLocal, [ - coll.getFullName(), - view.getFullName(), - collOnPrimaryOnly.getFullName(), - viewOnPrimaryOnly.getFullName() - ]); - - // $listCatalog result should not contain the namespaces not replicated to the secondaries. - assert.hasFields(catalogEntriesMajority, [coll.getFullName(), view.getFullName()]); - assert(!catalogEntriesMajority.hasOwnProperty(collOnPrimaryOnly.getFullName()), - tojson(catalogEntriesMajority)); - assert(!catalogEntriesMajority.hasOwnProperty(viewOnPrimaryOnly.getFullName()), - tojson(catalogEntriesMajority)); -} finally { - for (const fp of failpoints) { - fp.off(); - } -} +restartReplicationOnSecondaries(rst); rst.stopSet(); })(); diff --git a/jstests/sharding/list_catalog.js b/jstests/sharding/list_catalog.js new file mode 100644 index 00000000000..6de0aa35af3 --- /dev/null +++ b/jstests/sharding/list_catalog.js @@ -0,0 +1,58 @@ +/** + * Tests that $listCatalog only returns entries from chunk-owning shards. + * + * TODO (SERVER-64980): Extend test for collectionless $listCatalog once it only returns entries + * from chunk-owning shards. + * + * @tags: [ + * requires_fcv_60, + * ] + */ +(function() { +'use strict'; + +const st = new ShardingTest({shards: 2}); + +const db = st.s.getDB(jsTestName()); +assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()})); +st.ensurePrimaryShard(db.getName(), st.shard0.shardName); + +const coll = db.coll; +assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); + +// Split at {_id: 1}, moving {_id: 0} to shard0 and {_id: 1} to shard1. +assert.commandWorked(st.splitAt(coll.getFullName(), {_id: 1})); +assert.commandWorked( + st.s.adminCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard0.shardName})); +assert.commandWorked( + st.s.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName})); + +const checkResult = function(res) { + for (const entry of res) { + assert.eq(entry.db, db.getName()); + assert.eq(entry.name, coll.getName()); + assert.eq(entry.ns, coll.getFullName()); + assert.eq(entry.type, "collection"); + assert.eq(entry.md.indexes.length, entry.md.options.clusteredIndex ? 0 : 1); + } +}; + +let res = coll.aggregate([{$listCatalog: {}}]).toArray(); +jsTestLog('$listCatalog with multiple chunk-owning shards: ' + tojson(res)); +assert.eq(res.length, 2); +assert(res.some((entry) => entry.shard === st.shard0.shardName)); +assert(res.some((entry) => entry.shard === st.shard1.shardName)); +checkResult(res); + +// Move {_id: 0} to shard1 so that shard0 does not own any chunks for the collection. +assert.commandWorked( + st.s.adminCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName})); + +res = coll.aggregate([{$listCatalog: {}}]).toArray(); +jsTestLog('$listCatalog with one chunk-owning shard: ' + tojson(res)); +assert.eq(res.length, 1); +assert.eq(res[0].shard, st.shard1.shardName); +checkResult(res); + +st.stop(); +})(); diff --git a/src/mongo/db/pipeline/aggregate_command.idl b/src/mongo/db/pipeline/aggregate_command.idl index 98ab3572924..0e69d47c6ba 100644 --- a/src/mongo/db/pipeline/aggregate_command.idl +++ b/src/mongo/db/pipeline/aggregate_command.idl @@ -126,6 +126,10 @@ commands: action_type: listCachedAndActiveUsers - privilege: # $listCatalog agg_stage: listCatalog + resource_pattern: exact_namespace + action_type: [listCollections, listIndexes] + - privilege: # $listCatalog + agg_stage: listCatalog resource_pattern: cluster action_type: listDatabases - privilege: # $listCatalog diff --git a/src/mongo/db/pipeline/document_source_list_catalog.cpp b/src/mongo/db/pipeline/document_source_list_catalog.cpp index a1163c8b2b3..1e4e9fde9f1 100644 --- a/src/mongo/db/pipeline/document_source_list_catalog.cpp +++ b/src/mongo/db/pipeline/document_source_list_catalog.cpp @@ -47,7 +47,7 @@ REGISTER_DOCUMENT_SOURCE_WITH_MIN_VERSION(listCatalog, DocumentSourceListCatalog::LiteParsed::parse, DocumentSourceListCatalog::createFromBson, AllowedWithApiStrict::kNeverInVersion1, - multiversion::FeatureCompatibilityVersion::kVersion_5_3); + multiversion::FeatureCompatibilityVersion::kVersion_6_0); const char* DocumentSourceListCatalog::getSourceName() const { return kStageName.rawData(); @@ -60,22 +60,34 @@ PrivilegeVector DocumentSourceListCatalog::LiteParsed::requiredPrivileges( // See builtin_roles.cpp. ActionSet listCollectionsAndIndexesActions{ActionType::listCollections, ActionType::listIndexes}; - return {Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases), - Privilege(ResourcePattern::forAnyNormalResource(), listCollectionsAndIndexesActions), - Privilege(ResourcePattern::forCollectionName("system.js"), - listCollectionsAndIndexesActions), - Privilege(ResourcePattern::forAnySystemBuckets(), listCollectionsAndIndexesActions)}; + return _ns.isCollectionlessAggregateNS() + ? PrivilegeVector{Privilege(ResourcePattern::forClusterResource(), + ActionType::listDatabases), + Privilege(ResourcePattern::forAnyNormalResource(), + listCollectionsAndIndexesActions), + Privilege(ResourcePattern::forCollectionName("system.js"), + listCollectionsAndIndexesActions), + Privilege(ResourcePattern::forAnySystemBuckets(), + listCollectionsAndIndexesActions)} + : PrivilegeVector{ + Privilege(ResourcePattern::forExactNamespace(_ns), listCollectionsAndIndexesActions)}; } DocumentSource::GetNextResult DocumentSourceListCatalog::doGetNext() { - if (!_catalogDocsInitialized) { - _catalogDocs = pExpCtx->mongoProcessInterface->listCatalog(pExpCtx->opCtx); - _catalogDocsInitialized = true; + if (!_catalogDocs) { + if (pExpCtx->ns.isCollectionlessAggregateNS()) { + _catalogDocs = pExpCtx->mongoProcessInterface->listCatalog(pExpCtx->opCtx); + } else if (auto catalogDoc = pExpCtx->mongoProcessInterface->getCatalogEntry(pExpCtx->opCtx, + pExpCtx->ns)) { + _catalogDocs = {{std::move(*catalogDoc)}}; + } else { + _catalogDocs.emplace(); + } } - if (!_catalogDocs.empty()) { - Document doc{_catalogDocs.front()}; - _catalogDocs.pop_front(); + if (!_catalogDocs->empty()) { + Document doc{std::move(_catalogDocs->front())}; + _catalogDocs->pop_front(); return doc; } @@ -94,9 +106,10 @@ intrusive_ptr<DocumentSource> DocumentSourceListCatalog::createFromBson( const NamespaceString& nss = pExpCtx->ns; - uassert(ErrorCodes::InvalidNamespace, - "$listCatalog must be run against the 'admin' database with {aggregate: 1}", - nss.db() == NamespaceString::kAdminDb && nss.isCollectionlessAggregateNS()); + uassert( + ErrorCodes::InvalidNamespace, + "Collectionless $listCatalog must be run against the 'admin' database with {aggregate: 1}", + nss.db() == NamespaceString::kAdminDb || !nss.isCollectionlessAggregateNS()); uassert(ErrorCodes::QueryFeatureNotAllowed, fmt::format("The {} aggregation stage is not enabled", kStageName), diff --git a/src/mongo/db/pipeline/document_source_list_catalog.h b/src/mongo/db/pipeline/document_source_list_catalog.h index c0cb3829a00..c3bcb52fe77 100644 --- a/src/mongo/db/pipeline/document_source_list_catalog.h +++ b/src/mongo/db/pipeline/document_source_list_catalog.h @@ -51,11 +51,11 @@ public: public: static std::unique_ptr<LiteParsed> parse(const NamespaceString& nss, const BSONElement& spec) { - return std::make_unique<LiteParsed>(spec.fieldName()); + return std::make_unique<LiteParsed>(spec.fieldName(), nss); } - explicit LiteParsed(std::string parseTimeName) - : LiteParsedDocumentSource(std::move(parseTimeName)) {} + explicit LiteParsed(std::string parseTimeName, NamespaceString ns) + : LiteParsedDocumentSource(std::move(parseTimeName)), _ns(std::move(ns)) {} stdx::unordered_set<NamespaceString> getInvolvedNamespaces() const final { return stdx::unordered_set<NamespaceString>(); @@ -68,6 +68,9 @@ public: bool isInitialSource() const final { return true; } + + private: + NamespaceString _ns; }; // virtuals from DocumentSource @@ -84,7 +87,7 @@ public: LookupRequirement::kAllowed, UnionRequirement::kAllowed); - constraints.isIndependentOfAnyCollection = true; + constraints.isIndependentOfAnyCollection = pExpCtx->ns.isCollectionlessAggregateNS(); constraints.requiresInputDocSource = false; return constraints; } @@ -100,8 +103,7 @@ private: DocumentSourceListCatalog(const boost::intrusive_ptr<ExpressionContext>& pExpCtx); GetNextResult doGetNext() final; - bool _catalogDocsInitialized = false; - std::deque<BSONObj> _catalogDocs; + boost::optional<std::deque<BSONObj>> _catalogDocs; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp index cf6225dddac..1532cfbf4ed 100644 --- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp @@ -119,6 +119,7 @@ void assertIgnorePrepareConflictsBehavior(const boost::intrusive_ptr<ExpressionC * <db>.system.views namespaces found. */ void listDurableCatalog(OperationContext* opCtx, + StringData shardName, std::deque<BSONObj>* docs, std::vector<NamespaceStringOrUUID>* systemViewsNamespaces) { auto durableCatalog = DurableCatalog::get(opCtx); @@ -146,6 +147,9 @@ void listDurableCatalog(OperationContext* opCtx, builder.append("db", ns.db()); builder.append("name", ns.coll()); builder.append("type", "collection"); + if (!shardName.empty()) { + builder.append("shard", shardName); + } builder.appendElements(obj); docs->push_back(builder.obj()); } @@ -224,7 +228,7 @@ std::deque<BSONObj> CommonMongodProcessInterface::listCatalog(OperationContext* std::vector<NamespaceStringOrUUID> systemViewsNamespaces; { Lock::GlobalLock globalLock(opCtx, MODE_IS); - listDurableCatalog(opCtx, &docs, &systemViewsNamespaces); + listDurableCatalog(opCtx, getShardName(opCtx), &docs, &systemViewsNamespaces); } if (systemViewsNamespaces.empty()) { @@ -257,7 +261,8 @@ std::deque<BSONObj> CommonMongodProcessInterface::listCatalog(OperationContext* // we read it, we should discard this set of results and retry from the top (with the // global read lock) of this loop. std::vector<NamespaceStringOrUUID> systemViewsNamespacesFromSecondCatalogRead; - listDurableCatalog(opCtx, &docs, &systemViewsNamespacesFromSecondCatalogRead); + listDurableCatalog( + opCtx, getShardName(opCtx), &docs, &systemViewsNamespacesFromSecondCatalogRead); if (!std::equal( systemViewsNamespaces.cbegin(), systemViewsNamespaces.cend(), @@ -288,6 +293,9 @@ std::deque<BSONObj> CommonMongodProcessInterface::listCatalog(OperationContext* } else { builder.append("type", "view"); } + if (auto shardName = getShardName(opCtx); !shardName.empty()) { + builder.append("shard", shardName); + } builder.appendAs(obj["_id"], "ns"); builder.appendElements(obj); docs.push_back(builder.obj()); @@ -298,6 +306,37 @@ std::deque<BSONObj> CommonMongodProcessInterface::listCatalog(OperationContext* } } +boost::optional<BSONObj> CommonMongodProcessInterface::getCatalogEntry( + OperationContext* opCtx, const NamespaceString& ns) const { + Lock::GlobalLock globalLock{opCtx, MODE_IS}; + + auto rs = DurableCatalog::get(opCtx)->getRecordStore(); + if (!rs) { + return boost::none; + } + + auto cursor = rs->getCursor(opCtx); + while (auto record = cursor->next()) { + auto obj = record->data.toBson(); + if (NamespaceString{obj.getStringField("ns")} != ns) { + continue; + } + + BSONObjBuilder builder; + builder.append("db", ns.db()); + builder.append("name", ns.coll()); + builder.append("type", "collection"); + if (auto shardName = getShardName(opCtx); !shardName.empty()) { + builder.append("shard", shardName); + } + builder.appendElements(obj); + + return builder.obj(); + } + + return boost::none; +} + void CommonMongodProcessInterface::appendLatencyStats(OperationContext* opCtx, const NamespaceString& nss, bool includeHistograms, diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h index 108c3cbdf39..79b455e8e6d 100644 --- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h @@ -59,6 +59,9 @@ public: std::deque<BSONObj> listCatalog(OperationContext* opCtx) const final; + boost::optional<BSONObj> getCatalogEntry(OperationContext* opCtx, + const NamespaceString& ns) const final; + void appendLatencyStats(OperationContext* opCtx, const NamespaceString& nss, bool includeHistograms, diff --git a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h index 371021f492f..89e47af3d74 100644 --- a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h @@ -196,6 +196,12 @@ public: virtual std::deque<BSONObj> listCatalog(OperationContext* opCtx) const = 0; /** + * Returns the catalog entry for the given namespace, if it exists. + */ + virtual boost::optional<BSONObj> getCatalogEntry(OperationContext* opCtx, + const NamespaceString& ns) const = 0; + + /** * Appends operation latency statistics for collection "nss" to "builder" */ virtual void appendLatencyStats(OperationContext* opCtx, diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface.h b/src/mongo/db/pipeline/process_interface/mongos_process_interface.h index f3ae8cae7b3..dba29a99e3d 100644 --- a/src/mongo/db/pipeline/process_interface/mongos_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface.h @@ -102,6 +102,11 @@ public: MONGO_UNREACHABLE; } + boost::optional<BSONObj> getCatalogEntry(OperationContext* opCtx, + const NamespaceString& ns) const final { + MONGO_UNREACHABLE; + } + void appendLatencyStats(OperationContext* opCtx, const NamespaceString& nss, bool includeHistograms, diff --git a/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h b/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h index 1e4f0dccb85..eaabb1f64a9 100644 --- a/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h @@ -93,6 +93,11 @@ public: MONGO_UNREACHABLE; } + boost::optional<BSONObj> getCatalogEntry(OperationContext* opCtx, + const NamespaceString& ns) const override { + MONGO_UNREACHABLE; + } + void appendLatencyStats(OperationContext* opCtx, const NamespaceString& nss, bool includeHistograms, |