diff options
34 files changed, 987 insertions, 44 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index bf36b923932..5227dda8ebc 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -6003,6 +6003,77 @@ var authCommandsLib = { }, ] }, + { + testname: "validate_db_metadata_command_specific_db", + command: { + validateDBMetadata: 1, + db: secondDbName, + collection: "test", + apiParameters: {version: "1", strict: true} + }, + skipSharded: true, + setup: function(db) { + assert.commandWorked(db.getSiblingDB(firstDbName).createCollection("test")); + assert.commandWorked(db.getSiblingDB(secondDbName).createCollection("test")); + assert.commandWorked(db.getSiblingDB("ThirdDB").createCollection("test")); + }, + teardown: function(db) { + assert.commandWorked(db.getSiblingDB(firstDbName).dropDatabase()); + assert.commandWorked(db.getSiblingDB(secondDbName).dropDatabase()); + assert.commandWorked(db.getSiblingDB("ThirdDB").dropDatabase()); + }, + testcases: [ + { + runOnDb: secondDbName, + privileges: [{resource: {db: secondDbName, collection: ""}, actions: ["validate"]}] + }, + { + // Need to have permission on firstDBName to be able to the command on the db. + runOnDb: firstDbName, + privileges: [{resource: {db: secondDbName, collection: ""}, actions: ["validate"]}], + expectAuthzFailure: true + }, + { + runOnDb: firstDbName, + privileges: [ + {resource: {db: firstDbName, collection: ""}, actions: ["validate"]}, + {resource: {db: secondDbName, collection: ""}, actions: ["validate"]} + ] + }, + ] + }, + { + testname: "validate_db_metadata_command_all_dbs", + command: {validateDBMetadata: 1, apiParameters: {version: "1", strict: true}}, + skipSharded: true, + setup: function(db) { + assert.commandWorked(db.getSiblingDB(firstDbName).createCollection("test")); + assert.commandWorked(db.getSiblingDB(secondDbName).createCollection("test")); + }, + teardown: function(db) { + assert.commandWorked(db.getSiblingDB(firstDbName).dropDatabase()); + assert.commandWorked(db.getSiblingDB(secondDbName).dropDatabase()); + }, + testcases: [ + { + // Since the command didn't specify a 'db', it validates all dbs and hence require + // permission to run on all dbs. + runOnDb: secondDbName, + privileges: [{resource: {db: secondDbName, collection: ""}, actions: ["validate"]}], + expectAuthzFailure: true + }, + { + runOnDb: secondDbName, + privileges: [ + {resource: {db: "admin", collection: ""}, actions: ["validate"]}, + {resource: {db: "config", collection: ""}, actions: ["validate"]}, + {resource: {db: "local", collection: ""}, actions: ["validate"]}, + {resource: {db: firstDbName, collection: ""}, actions: ["validate"]}, + {resource: {db: secondDbName, collection: ""}, actions: ["validate"]} + ] + }, + ] + }, ], /************* SHARED TEST LOGIC ****************/ diff --git a/jstests/core/api_version_pipeline_stages.js b/jstests/core/api_version_pipeline_stages.js index b72f7336d3a..425e08730cd 100644 --- a/jstests/core/api_version_pipeline_stages.js +++ b/jstests/core/api_version_pipeline_stages.js @@ -26,6 +26,9 @@ const pipelines = [ [{$listLocalSessions: {}}], [{$listSessions: {}}], [{$planCacheStats: {}}], + [{$unionWith: {coll: "coll2", pipeline: [{$collStats: {count: {}}}]}}], + [{$lookup: {from: "coll2", pipeline: [{$indexStats: {}}]}}], + [{$facet: {field1: [], field2: [{$indexStats: {}}]}}], ]; for (let pipeline of pipelines) { diff --git a/jstests/core/api_version_test_expression.js b/jstests/core/api_version_test_expression.js index 5259fdc3dfa..fc2488f321f 100644 --- a/jstests/core/api_version_test_expression.js +++ b/jstests/core/api_version_test_expression.js @@ -135,16 +135,27 @@ assert.commandFailedWithCode( {aggregate: "view", pipeline: pipeline, cursor: {}, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); -// Create a view with {unstable: true}. +// Create a view with 'unstable' parameter should fail with 'apiStrict'. db.unstableView.drop(); -assert.commandWorked(db.runCommand({ +assert.commandFailedWithCode(db.runCommand({ create: "unstableView", viewOn: collName, pipeline: pipeline, apiStrict: true, apiVersion: "1" +}), + ErrorCodes.APIStrictError); + +// Create a view with 'unstable' should be allowed without 'apiStrict'. +assert.commandWorked(db.runCommand({ + create: "unstableView", + viewOn: collName, + pipeline: pipeline, + apiVersion: "1", + apiStrict: false })); assert.commandWorked(db.runCommand({aggregate: "unstableView", pipeline: [], cursor: {}})); + // This commmand will fail even with the empty pipeline because of the view. assert.commandFailedWithCode( db.runCommand( @@ -198,18 +209,13 @@ assert.commandWorked(db[validatedCollName].runCommand({ // Test that API version parameters are inherited into the inner command of the explain command. function checkExplainInnerCommandGetsAPIVersionParameters(explainedCmd, errCode) { - let explainRes = db.runCommand( - {explain: explainedCmd, apiVersion: "1", apiDeprecationErrors: true, apiStrict: true}); - - assert(explainRes.hasOwnProperty('executionStats'), explainRes); - const execStats = explainRes['executionStats']; - - // 'execStats' will return APIStrictError if the inner command gets the APIVersionParameters. - assert.eq(execStats['executionSuccess'], false, execStats); - assert.eq(execStats['errorCode'], errCode, execStats); + assert.commandFailedWithCode( + db.runCommand( + {explain: explainedCmd, apiVersion: "1", apiDeprecationErrors: true, apiStrict: true}), + errCode); // If 'apiStrict: false' the inner aggregate command will execute successfully. - explainRes = db.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false}); + const explainRes = db.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false}); assert(explainRes.hasOwnProperty('executionStats'), explainRes); assert.eq(explainRes['executionStats']['executionSuccess'], true, explainRes); } @@ -234,4 +240,7 @@ findCmd = { projection: {v: {$_testApiVersion: {deprecated: true}}} }; checkExplainInnerCommandGetsAPIVersionParameters(findCmd, ErrorCodes.APIDeprecationError); + +db[validatedCollName].drop(); +db.unstableView.drop(); })(); diff --git a/jstests/core/validate_db_metadata_command.js b/jstests/core/validate_db_metadata_command.js new file mode 100644 index 00000000000..c0a3e21e6a8 --- /dev/null +++ b/jstests/core/validate_db_metadata_command.js @@ -0,0 +1,126 @@ +/** + * Tests the validateDBMetaData commands with various input parameters. + * @tags: [ + * requires_fcv_49, + * ] + */ +(function() { +"use strict"; + +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. + +const dbName = jsTestName(); + +const testDB = db.getSiblingDB(dbName); +assert.commandWorked(testDB.dropDatabase()); +const coll1 = testDB.coll1; + +// Drop all the unstable data that the other tests might have created. This will ensure that the +// validateDBMetadata command is validating only the data generated by this test. +(function dropAllUnstableData() { + const listDBRes = assert.commandWorked(db.adminCommand({listDatabases: 1, nameOnly: true})); + for (let listDBOutput of listDBRes.databases) { + // Skip non-user databases. + if (Array.contains(["admin", "config", "local", "$external"], listDBOutput.name)) { + continue; + } + const currentDB = db.getSiblingDB(listDBOutput.name); + for (let collInfo of currentDB.getCollectionInfos()) { + if (collInfo.type == "collection" && !collInfo.name.startsWith("system")) { + assert.commandWorked(currentDB[collInfo.name].dropIndexes()); + } + } + } +})(); + +// Verify that the 'apiParameters' field is required. +const res = assert.commandFailedWithCode(testDB.runCommand({validateDBMetadata: 1}), 40414); + +function validate({dbName, coll, apiStrict, error}) { + dbName = dbName ? dbName : null; + coll = coll ? coll : null; + const res = assert.commandWorked(testDB.runCommand({ + validateDBMetadata: 1, + db: dbName, + collection: coll, + apiParameters: {version: "1", strict: apiStrict} + })); + + assert(res.apiVersionErrors); + const foundError = res.apiVersionErrors.length > 0; + + // Verify that 'apiVersionErrors' is not empty when 'error' is true, and vice versa. + assert((!error && !foundError) || (error && foundError), res); + + if (error) { + for (let apiError of res.apiVersionErrors) { + assert(apiError.ns); + if (error.code) { + assert.eq(apiError.code, error.code); + } + + if (FixtureHelpers.isMongos(testDB)) { + // Check that every error has an additional 'shard' field on sharded clusters. + assert(apiError.shard); + } + } + } +} + +// +// Tests for indexes. +// +assert.commandWorked(coll1.createIndex({p: "text"})); + +validate({apiStrict: false}); + +// All dbs but different collection name. +validate({coll: "coll2", apiStrict: true}); + +// Different db, and collection which has unstable index should not error. +validate({dbName: "new", coll: "coll1", apiStrict: true}); +validate({ + dbName: "new", + apiStrict: true, +}); + +// Cases where the command returns an error. +validate({apiStrict: true, error: true}); +validate({coll: "coll1", apiStrict: true, error: true}); +validate({ + dbName: testDB.getName(), + coll: "coll1", + apiStrict: true, + error: {code: ErrorCodes.APIStrictError} +}); +validate({dbName: testDB.getName(), apiStrict: true, error: true}); + +// +// Tests for views. +// +assert.commandWorked(coll1.dropIndexes()); +validate({apiStrict: true}); + +// Create a view which uses unstable expression and verify that validateDBMetadata commands throws +// an assertion. +const view = + testDB.createView("view1", "coll2", [{$project: {v: {$_testApiVersion: {unstable: true}}}}]); + +validate({apiStrict: true, error: true}); +validate({dbName: dbName, apiStrict: true, error: true}); + +validate({dbName: "otherDB", apiStrict: true}); +validate({dbName: dbName, coll: "coll", apiStrict: true}); + +// With view name in the input. +validate({coll: "view1", apiStrict: true, error: {code: ErrorCodes.APIStrictError}}); +validate( + {dbName: dbName, coll: "view1", apiStrict: true, error: {code: ErrorCodes.APIStrictError}}); + +validate({dbName: "new", coll: "view1", apiStrict: true}); + +// Collection named same as the view name in another db. +const testDB2 = db.getSiblingDB("testDB2"); +const collWithViewName = testDB2.view1; +validate({coll: "view1", apiStrict: true, error: {code: ErrorCodes.APIStrictError}}); +}());
\ No newline at end of file diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index 93dc1803ffc..acd53276fc6 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -578,6 +578,8 @@ let viewsCommandTests = { updateZoneKeyRange: {skip: isUnrelated}, usersInfo: {skip: isUnrelated}, validate: {command: {validate: "view"}, expectFailure: true}, + validateDBMetadata: + {command: {validateDBMetadata: 1, apiParameters: {version: "1", strict: true}}}, waitForOngoingChunkSplits: {skip: isUnrelated}, voteCommitImportCollection: {skip: isUnrelated}, voteCommitIndexBuild: {skip: isUnrelated}, diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js index c17dd67c0bb..511ba98b1cd 100644 --- a/jstests/libs/parallelTester.js +++ b/jstests/libs/parallelTester.js @@ -221,6 +221,9 @@ if (typeof _threadInject != "undefined") { // This test updates global memory usage counters in the bucket catalog in a way that // may affect other time-series tests running concurrently. "timeseries/timeseries_idle_buckets.js", + + // Assumes that other tests are not creating API version 1 incompatible data. + "validate_db_metadata_command.js", ]); // Get files, including files in subdirectories. diff --git a/jstests/noPassthrough/validate_db_metadata_limits.js b/jstests/noPassthrough/validate_db_metadata_limits.js new file mode 100644 index 00000000000..dce8af08b6f --- /dev/null +++ b/jstests/noPassthrough/validate_db_metadata_limits.js @@ -0,0 +1,28 @@ +/** + * Tests to verify that the validateDBMetadata command returns response correctly when the expected + * output data is larger than the max BSON size. + */ +(function() { +"use strict"; + +const conn = MongoRunner.runMongod(); +const testDB = conn.getDB("validate_db_metadaba"); +const coll = testDB.getCollection("test"); + +for (let i = 0; i < 100; i++) { + // Create a large index name. As the index name is returned in the output validateDBMetadata + // command, it can cause the output size to exceed max BSON size. + let largeName = "a".repeat(200000); + assert.commandWorked(testDB.runCommand( + {createIndexes: "test" + i, indexes: [{key: {p: 1}, name: largeName, sparse: true}]})); +} + +const res = assert.commandWorked( + testDB.runCommand({validateDBMetadata: 1, apiParameters: {version: "1", strict: true}})); + +assert(res.hasMoreErrors, res); +assert(res.apiVersionErrors, res); +assert(res.apiVersionErrors.length < 100, res); + +MongoRunner.stopMongod(conn); +})();
\ No newline at end of file diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js index 78a557b4649..2e117b7138d 100644 --- a/jstests/replsets/db_reads_while_recovering_all_commands.js +++ b/jstests/replsets/db_reads_while_recovering_all_commands.js @@ -311,6 +311,11 @@ const allCommands = { updateUser: {skip: isPrimaryOnly}, usersInfo: {skip: isPrimaryOnly}, validate: {skip: isNotAUserDataRead}, + validateDBMetadata: { + command: {validateDBMetadata: 1, apiParameters: {version: "1", strict: true}}, + expectFailure: true, + expectedErrorCode: ErrorCodes.NotPrimaryOrSecondary, + }, voteCommitImportCollection: {skip: isNotAUserDataRead}, voteCommitIndexBuild: {skip: isNotAUserDataRead}, waitForFailPoint: {skip: isNotAUserDataRead}, diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js index 2474c572502..1e5973d3654 100644 --- a/jstests/sharding/database_versioning_all_commands.js +++ b/jstests/sharding/database_versioning_all_commands.js @@ -666,6 +666,16 @@ let testCases = { }, } }, + validateDBMetadata: { + run: { + // validateDBMetadata is always broadcast to all shards. + sendsDbVersion: false, + explicitlyCreateCollection: true, + command: function(dbName, collName) { + return {validateDBMetadata: 1, apiParameters: {version: "1"}}; + }, + } + }, waitForFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"}, whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"}, }; diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js index 78881616721..f0beeb00a2c 100644 --- a/jstests/sharding/read_write_concern_defaults_application.js +++ b/jstests/sharding/read_write_concern_defaults_application.js @@ -663,6 +663,7 @@ let testCases = { updateZoneKeyRange: {skip: "does not accept read or write concern"}, usersInfo: {skip: "does not accept read or write concern"}, validate: {skip: "does not accept read or write concern"}, + validateDBMetadata: {skip: "does not accept read or write concern"}, voteCommitImportCollection: {skip: "internal command"}, voteCommitIndexBuild: {skip: "internal command"}, waitForFailPoint: {skip: "does not accept read or write concern"}, diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js index b207d25d91c..62c5558db53 100644 --- a/jstests/sharding/safe_secondary_reads_drop_recreate.js +++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js @@ -309,6 +309,7 @@ let testCases = { updateZoneKeyRange: {skip: "primary only"}, usersInfo: {skip: "primary only"}, validate: {skip: "does not return user data"}, + validateDBMetadata: {skip: "does not return user data"}, waitForFailPoint: {skip: "does not return user data"}, waitForOngoingChunkSplits: {skip: "does not return user data"}, whatsmyuri: {skip: "does not return user data"} diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js index f90dbb90f21..526e0617cf4 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js @@ -381,6 +381,7 @@ let testCases = { updateZoneKeyRange: {skip: "primary only"}, usersInfo: {skip: "primary only"}, validate: {skip: "does not return user data"}, + validateDBMetadata: {skip: "does not return user data"}, waitForFailPoint: {skip: "does not return user data"}, waitForOngoingChunkSplits: {skip: "does not return user data"}, whatsmyuri: {skip: "does not return user data"} diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js index a81d7c0b47e..039071310a2 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js @@ -316,6 +316,7 @@ let testCases = { updateZoneKeyRange: {skip: "primary only"}, usersInfo: {skip: "primary only"}, validate: {skip: "does not return user data"}, + validateDBMetadata: {skip: "does not return user data"}, waitForOngoingChunkSplits: {skip: "does not return user data"}, waitForFailPoint: {skip: "does not return user data"}, whatsmyuri: {skip: "does not return user data"} diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 44af31437c5..60f30b88eb2 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -282,7 +282,10 @@ void DatabaseImpl::getStats(OperationContext* opCtx, BSONObjBuilder* output, dou return true; }); - ViewCatalog::get(this)->iterate(opCtx, [&](const ViewDefinition& view) { nViews += 1; }); + ViewCatalog::get(this)->iterate(opCtx, [&](const ViewDefinition& view) { + nViews += 1; + return true; + }); output->appendNumber("collections", nCollections); output->appendNumber("views", nViews); diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp index 1cfbd968f5f..fdc6c9d58fe 100644 --- a/src/mongo/db/catalog/index_key_validate.cpp +++ b/src/mongo/db/catalog/index_key_validate.cpp @@ -703,6 +703,12 @@ Status validateIndexSpecTTL(const BSONObj& indexSpec) { return Status::OK(); } +bool isIndexAllowedInAPIVersion1(const IndexDescriptor& indexDesc) { + const auto indexName = IndexNames::findPluginName(indexDesc.keyPattern()); + return indexName != IndexNames::TEXT && indexName != IndexNames::GEO_HAYSTACK && + !indexDesc.isSparse(); +} + GlobalInitializerRegisterer filterAllowedIndexFieldNamesInitializer( "FilterAllowedIndexFieldNames", [](InitializerContext* service) { if (filterAllowedIndexFieldNames) diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h index e3e29f08bef..a63be7ca0fd 100644 --- a/src/mongo/db/catalog/index_key_validate.h +++ b/src/mongo/db/catalog/index_key_validate.h @@ -93,6 +93,11 @@ StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx, Status validateIndexSpecTTL(const BSONObj& indexSpec); /** + * Returns whether an index is allowed in API version 1. + */ +bool isIndexAllowedInAPIVersion1(const IndexDescriptor& indexDesc); + +/** * Optional filtering function to adjust allowed index field names at startup. * Set it in a MONGO_INITIALIZER with 'FilterAllowedIndexFieldNames' as a dependant. */ diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript index dc5712ad1d7..441e9daa54d 100644 --- a/src/mongo/db/commands/SConscript +++ b/src/mongo/db/commands/SConscript @@ -282,6 +282,17 @@ env.Library( ], ) +env.Library( + target='validate_db_metadata_command', + source=[ + 'validate_db_metadata.idl', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/idl/idl_parser', + ], +) + # Commands that are present in both mongod and embedded env.Library( target="standalone", @@ -314,6 +325,7 @@ env.Library( "run_aggregate.cpp", "sleep_command.cpp", "validate.cpp", + "validate_db_metadata_cmd.cpp", "whats_my_sni_command.cpp", "write_commands/write_commands.cpp", ], @@ -361,6 +373,7 @@ env.Library( 'list_databases_command', 'rename_collection_idl', 'test_commands_enabled', + 'validate_db_metadata_command', 'write_commands_common', ], ) diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp index aa294c308eb..7a155f3e780 100644 --- a/src/mongo/db/commands/list_collections.cpp +++ b/src/mongo/db/commands/list_collections.cpp @@ -379,7 +379,7 @@ public: if (authorizedCollections && !as->isAuthorizedForAnyActionOnResource( ResourcePattern::forExactNamespace(view.name()))) { - return; + return true; } BSONObj viewBson = buildViewBson(view, nameOnly); @@ -387,6 +387,7 @@ public: _addWorkingSetMember( opCtx, viewBson, matcher.get(), ws.get(), root.get()); } + return true; }); } } diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp index 15fe66d7c37..37a930a5304 100644 --- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp @@ -359,6 +359,7 @@ public: "downgrading. First detected time-series collection: " << view.name(), !view.timeseries()); + return true; }); } diff --git a/src/mongo/db/commands/validate_db_metadata.idl b/src/mongo/db/commands/validate_db_metadata.idl new file mode 100644 index 00000000000..c44a9dd1807 --- /dev/null +++ b/src/mongo/db/commands/validate_db_metadata.idl @@ -0,0 +1,83 @@ +# Copyright (C) 2021-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# <http://www.mongodb.com/licensing/server-side-public-license>. +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + cpp_includes: + - "mongo/util/uuid.h" + +imports: + - "mongo/idl/basic_types.idl" + +structs: + ErrorReplyElement: + description: "Error reply element of validateDBMetadata command's reply." + fields: + shard: + type: string + optional: true + ns: string + code: int + codeName: string + errmsg: string + + ValidateDBMetadataReply: + description: "The validateDBMetadata command's reply." + fields: + apiVersionErrors: + type: array<ErrorReplyElement> + optional: true + hasMoreErrors: + type: optionalBool + APIParamsForCmd: + description: "Structure defining the API parameters for validateDBMetadata." + fields: + version: + type: string + strict: + type: optionalBool + deprecationErrors: + type: optionalBool + +commands: + validateDBMetadata: + description: "Input request for validateDBMetadata command." + command_name: validateDBMetadata + namespace: ignored + api_version: "" + reply_type: ValidateDBMetadataReply + fields: + db: + type: string + optional: true + collection: + type: string + optional: true + apiParameters: + type: APIParamsForCmd + diff --git a/src/mongo/db/commands/validate_db_metadata_cmd.cpp b/src/mongo/db/commands/validate_db_metadata_cmd.cpp new file mode 100644 index 00000000000..ec468c9adde --- /dev/null +++ b/src/mongo/db/commands/validate_db_metadata_cmd.cpp @@ -0,0 +1,245 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + +#include "mongo/platform/basic.h" + +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog_helper.h" +#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/validate_db_metadata_common.h" +#include "mongo/db/commands/validate_db_metadata_gen.h" +#include "mongo/db/db_raii.h" +#include "mongo/db/views/view_catalog.h" +#include "mongo/logv2/log.h" +namespace mongo { +namespace { +void assertUserCanRunValidateOnDb(OperationContext* opCtx, StringData dbName) { + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to run validateDBMetadata command on database '" + << dbName << "'", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnNamespace(NamespaceString(dbName), ActionType::validate)); +} + +void overrideAPIParams(OperationContext* opCtx, const APIParamsForCmd& params) { + APIParameters apiParameters; + apiParameters.setAPIVersion(params.getVersion()); + apiParameters.setAPIStrict(params.getStrict()); + apiParameters.setAPIDeprecationErrors(params.getDeprecationErrors()); + APIParameters::get(opCtx) = std::move(apiParameters); +} + +} // namespace + +/** + * Example validate command: + * { + * validateDBMeta: 1, + * db: <string>, + * collection: <string>, + * apiParameters: {version: <string>, strict: <bool>, deprecationErrors: <bool>} + * } + */ +class ValidateDBMetadataCmd : public TypedCommand<ValidateDBMetadataCmd> { + using _TypedCommandInvocationBase = + typename TypedCommand<ValidateDBMetadataCmd>::InvocationBase; + +public: + using Request = ValidateDBMetadata; + using Reply = ValidateDBMetadataReply; + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kAlways; + } + + bool maintenanceOk() const { + return false; + } + + std::string help() const override { + return str::stream() + << "validateDBMetadata checks that the stored metadata of a database/collection is " + "valid within a particular API version. If 'db' parameter is specified, only runs " + "validation against that database, if not the validation will be run againt all " + "dbs. Similarly if 'collection' parameter is specified, the validation is only " + "run against that collection, if not the validation is run against all collections."; + } + class Invocation : public _TypedCommandInvocationBase { + public: + using _TypedCommandInvocationBase::_TypedCommandInvocationBase; + + bool supportsWriteConcern() const final { + return false; + } + NamespaceString ns() const final { + return NamespaceString(request().getDbName()); + } + void doCheckAuthorization(OperationContext* opCtx) const final { + // Note that we need to do addditional authorization checks if 'db' field is not + // specified. This is done while iterating through the indiviual databases. + assertUserCanRunValidateOnDb(opCtx, request().getDbName()); + } + + Reply typedRun(OperationContext* opCtx) { + overrideAPIParams(opCtx, request().getApiParameters()); + runApiVersionValidation(opCtx); + + _reply.setApiVersionErrors(std::move(apiVersionErrors)); + + // Reset API parameters. + APIParameters::get(opCtx) = APIParameters(); + return _reply; + } + + private: + void runApiVersionValidation(OperationContext* opCtx) { + auto collectionCatalog = CollectionCatalog::get(opCtx); + auto validateCmdRequest = this->request(); + + // If there is no database name present in the input, run validation against all the + // databases. + auto dbNames = validateCmdRequest.getDb() + ? std::vector<std::string>{validateCmdRequest.getDb()->toString()} + : collectionCatalog->getAllDbNames(); + + for (const auto& dbName : dbNames) { + assertUserCanRunValidateOnDb(opCtx, dbName); + + AutoGetDb autoDb(opCtx, dbName, LockMode::MODE_IS); + if (!autoDb.getDb()) { + continue; + } + + if (validateCmdRequest.getCollection()) { + if (!_validateNamespace( + opCtx, NamespaceString(dbName, *validateCmdRequest.getCollection()))) { + return; + } + continue; + } + + // If there is no collection name present in the input, run validation against all + // the collections. + if (auto viewCatalog = DatabaseHolder::get(opCtx)->getViewCatalog(opCtx, dbName)) { + viewCatalog->iterate(opCtx, [this, opCtx](const ViewDefinition& view) { + return _validateView(opCtx, view); + }); + } + + for (auto collIt = collectionCatalog->begin(opCtx, dbName); + collIt != collectionCatalog->end(opCtx); + ++collIt) { + if (!_validateNamespace( + opCtx, + collectionCatalog->lookupNSSByUUID(opCtx, collIt.uuid().get()).get())) { + return; + } + } + } + } + + /** + * Returns false, if the evaluation needs to be aborted. + */ + bool _validateView(OperationContext* opCtx, const ViewDefinition& view) { + auto pipelineStatus = ViewCatalog::validatePipeline(opCtx, view); + if (!pipelineStatus.isOK()) { + ErrorReplyElement error(view.name().ns(), + ErrorCodes::APIStrictError, + ErrorCodes::errorString(ErrorCodes::APIStrictError), + pipelineStatus.getStatus().reason()); + if (!_sizeTracker.incrementAndCheckOverflow(error)) { + _reply.setHasMoreErrors(true); + return false; + } + apiVersionErrors.push_back(error); + } + return true; + } + + /** + * Returns false, if the evaluation needs to be aborted. + */ + bool _validateNamespace(OperationContext* opCtx, const NamespaceStringOrUUID& coll) { + bool apiStrict = APIParameters::get(opCtx).getAPIStrict().value_or(false); + auto apiVersion = APIParameters::get(opCtx).getAPIVersion().value_or(""); + + // We permit views here so that user requested views can be allowed. + AutoGetCollection collection( + opCtx, coll, LockMode::MODE_IS, AutoGetCollectionViewMode::kViewsPermitted); + + // If it view, just do the validations for view. + if (auto viewDef = collection.getView()) { + return _validateView(opCtx, *viewDef); + } + + // TODO SERVER-53218: Add validation for collection validator. + + const auto* collPtr = collection.getCollection().get(); + if (!collPtr) { + return true; + } + + // Ensure there are no unstable indexes. + const auto* indexCatalog = collPtr->getIndexCatalog(); + std::unique_ptr<IndexCatalog::IndexIterator> ii = + indexCatalog->getIndexIterator(opCtx, true /* includeUnfinishedIndexes */); + while (ii->more()) { + // Check if the index is allowed in API version 1. + const IndexDescriptor* desc = ii->next()->descriptor(); + if (apiStrict && apiVersion == "1" && + !index_key_validate::isIndexAllowedInAPIVersion1(*desc)) { + ErrorReplyElement error(coll.nss()->ns(), + ErrorCodes::APIStrictError, + ErrorCodes::errorString(ErrorCodes::APIStrictError), + str::stream() + << "The index with name " << desc->indexName() + << " is not allowed in API version 1."); + if (!_sizeTracker.incrementAndCheckOverflow(error)) { + _reply.setHasMoreErrors(true); + return false; + } + apiVersionErrors.push_back(error); + } + } + return true; + } + + ValidateDBMetadataSizeTracker _sizeTracker; + std::vector<ErrorReplyElement> apiVersionErrors; + ValidateDBMetadataReply _reply; + }; +} validateDBMetadataCmd; +} // namespace mongo diff --git a/src/mongo/db/commands/validate_db_metadata_common.h b/src/mongo/db/commands/validate_db_metadata_common.h new file mode 100644 index 00000000000..400121a06a1 --- /dev/null +++ b/src/mongo/db/commands/validate_db_metadata_common.h @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/bson/util/builder.h" +#include "mongo/db/commands/validate_db_metadata_gen.h" + +namespace mongo { + +struct ValidateDBMetadataSizeTracker { + bool incrementAndCheckOverflow(const ErrorReplyElement& obj) { + // The field name in the array should be at most 7 digits. In addition each element will use + // 2 additional bytes for type byte, and null termination of the field name. Note that we + // are intentionally over estmating the size the delta here, so that we have sufficient + // space for other fields in the output. + currentSize += obj.toBSON().objsize() + 15; + return currentSize < BSONObjMaxUserSize; + } + +private: + size_t currentSize = 0; +}; +} // namespace mongo diff --git a/src/mongo/db/pipeline/expression_test_api_version.cpp b/src/mongo/db/pipeline/expression_test_api_version.cpp index 8428d6d78ab..9c7cfbb1439 100644 --- a/src/mongo/db/pipeline/expression_test_api_version.cpp +++ b/src/mongo/db/pipeline/expression_test_api_version.cpp @@ -69,6 +69,16 @@ boost::intrusive_ptr<Expression> ExpressionTestApiVersion::parse(ExpressionConte str::stream() << field << " is not a valid argument for $_testApiVersion"); } + const auto& apiParams = expCtx->apiParameters; + if (apiParams.getAPIStrict().value_or(false) && unstableField) { + uasserted(ErrorCodes::APIStrictError, + "Provided apiStrict is true with an unstable command."); + } + if (apiParams.getAPIDeprecationErrors().value_or(false) && deprecatedField) { + uasserted(ErrorCodes::APIDeprecationError, + "Provided apiDeprecatedErrors is true with a deprecated command."); + } + return new ExpressionTestApiVersion(expCtx, unstableField, deprecatedField); } @@ -79,18 +89,6 @@ Value ExpressionTestApiVersion::serialize(bool explain) const { } Value ExpressionTestApiVersion::evaluate(const Document& root, Variables* variables) const { - APIParameters apiParams = getExpressionContext()->apiParameters; - - if (apiParams.getAPIStrict().value_or(false) && _unstable) { - uasserted(ErrorCodes::APIStrictError, - "Provided apiStrict is true with an unstable command."); - } - - if (apiParams.getAPIDeprecationErrors().value_or(false) && _deprecated) { - uasserted(ErrorCodes::APIDeprecationError, - "Provided apiDeprecatedErrors is true with a deprecated command."); - } - return Value(1); } diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp index 65bb32194d9..c737d837ba7 100644 --- a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp +++ b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp @@ -132,6 +132,10 @@ void LiteParsedPipeline::validatePipelineStagesIfAPIStrict(const std::string& ve << " is not allowed with 'apiStrict: true' in API Version " << version, isStageInAPIVersion1(stage->getParseTimeName())); + + for (auto&& subPipeline : stage->getSubPipelines()) { + subPipeline.validatePipelineStagesIfAPIStrict(version); + } } } } diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.h b/src/mongo/db/pipeline/lite_parsed_pipeline.h index fbe50b26adf..00a01ff6ba5 100644 --- a/src/mongo/db/pipeline/lite_parsed_pipeline.h +++ b/src/mongo/db/pipeline/lite_parsed_pipeline.h @@ -172,7 +172,7 @@ public: /** * Returns true if 'stageName' is in API Version 1. */ - bool isStageInAPIVersion1(const std::string& stageName) const { + static bool isStageInAPIVersion1(const std::string& stageName) { // These stages are excluded from API Version1 with 'apiStrict: true'. static const stdx::unordered_set<std::string> stagesExcluded = {"$collStats", "$currentOp", diff --git a/src/mongo/db/views/view_catalog.cpp b/src/mongo/db/views/view_catalog.cpp index 298a4b26ca5..0132c61ee2c 100644 --- a/src/mongo/db/views/view_catalog.cpp +++ b/src/mongo/db/views/view_catalog.cpp @@ -269,7 +269,9 @@ void ViewCatalog::_requireValidCatalog() const { void ViewCatalog::iterate(OperationContext* opCtx, ViewIteratorCallback callback) const { _requireValidCatalog(); for (auto&& view : _viewMap) { - callback(*view.second); + if (!callback(*view.second)) { + break; + } } } @@ -338,7 +340,7 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* opCtx, const ViewDefiniti auto doInsert = [this, &opCtx](const ViewDefinition& viewDef, bool needsValidation) -> Status { // Validate that the pipeline is eligible to serve as a view definition. If it is, this // will also return the set of involved namespaces. - auto pipelineStatus = _validatePipeline(opCtx, viewDef); + auto pipelineStatus = validatePipeline(opCtx, viewDef); if (!pipelineStatus.isOK()) { if (needsValidation) { uassertStatusOKWithContext(pipelineStatus.getStatus(), @@ -390,8 +392,8 @@ Status ViewCatalog::_upsertIntoGraph(OperationContext* opCtx, const ViewDefiniti return doInsert(viewDef, true); } -StatusWith<stdx::unordered_set<NamespaceString>> ViewCatalog::_validatePipeline( - OperationContext* opCtx, const ViewDefinition& viewDef) const { +StatusWith<stdx::unordered_set<NamespaceString>> ViewCatalog::validatePipeline( + OperationContext* opCtx, const ViewDefinition& viewDef) { const LiteParsedPipeline liteParsedPipeline(viewDef.viewOn(), viewDef.pipeline()); const auto involvedNamespaces = liteParsedPipeline.getInvolvedNamespaces(); diff --git a/src/mongo/db/views/view_catalog.h b/src/mongo/db/views/view_catalog.h index cc07ba8a6ae..1b728819bd8 100644 --- a/src/mongo/db/views/view_catalog.h +++ b/src/mongo/db/views/view_catalog.h @@ -66,7 +66,7 @@ class Database; class ViewCatalog { public: using ViewMap = StringMap<std::shared_ptr<ViewDefinition>>; - using ViewIteratorCallback = std::function<void(const ViewDefinition& view)>; + using ViewIteratorCallback = std::function<bool(const ViewDefinition& view)>; static std::shared_ptr<const ViewCatalog> get(const Database* db); static void set(Database* db, std::unique_ptr<ViewCatalog> catalog); @@ -77,7 +77,8 @@ public: /** * Iterates through the catalog, applying 'callback' to each view. This callback function * executes under the catalog's mutex, so it must not access other methods of the catalog, - * acquire locks or run for a long time. + * acquire locks or run for a long time. If the 'callback' returns false, the iterator exists + * early. */ void iterate(OperationContext* opCtx, ViewIteratorCallback callback) const; @@ -139,6 +140,13 @@ public: StatusWith<ResolvedView> resolveView(OperationContext* opCtx, const NamespaceString& nss) const; /** + * Returns Status::OK with the set of involved namespaces if the given pipeline is eligible to + * act as a view definition. Otherwise, returns ErrorCodes::OptionNotSupportedOnView. + */ + static StatusWith<stdx::unordered_set<NamespaceString>> validatePipeline( + OperationContext* opCtx, const ViewDefinition& viewDef); + + /** * Reloads the in-memory state of the view catalog from the 'system.views' collection catalog. * If the 'lookupBehavior' is 'kValidateDurableViews', then the durable view definitions will be * validated. Reading stops on the first invalid entry with errors logged and returned. Performs @@ -177,13 +185,6 @@ private: Status _upsertIntoGraph(OperationContext* opCtx, const ViewDefinition& viewDef); /** - * Returns Status::OK with the set of involved namespaces if the given pipeline is eligible to - * act as a view definition. Otherwise, returns ErrorCodes::OptionNotSupportedOnView. - */ - StatusWith<stdx::unordered_set<NamespaceString>> _validatePipeline( - OperationContext* opCtx, const ViewDefinition& viewDef) const; - - /** * Returns Status::OK if each view namespace in 'refs' has the same default collation as 'view'. * Otherwise, returns ErrorCodes::OptionNotSupportedOnView. */ diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp index dc7b25d4334..48539c03415 100644 --- a/src/mongo/db/views/view_catalog_test.cpp +++ b/src/mongo/db/views/view_catalog_test.cpp @@ -653,6 +653,7 @@ TEST_F(ViewCatalogFixture, Iterate) { std::string name = view.name().toString(); ASSERT(viewNames.end() != viewNames.find(name)); viewNames.erase(name); + return true; }); ASSERT(viewNames.empty()); diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp index 0e44243cd78..dfe8705528d 100644 --- a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp +++ b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp @@ -602,6 +602,7 @@ TEST_F(MongodbCAPITest, RunListCommands) { "startSession", "update", "validate", + "validateDBMetadata", "waitForFailPoint", "whatsmysni"}; diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript index 0d20f144e62..ac43a4527a2 100644 --- a/src/mongo/s/commands/SConscript +++ b/src/mongo/s/commands/SConscript @@ -93,6 +93,7 @@ env.Library( 'cluster_update_zone_key_range_cmd.cpp', 'cluster_user_management_commands.cpp', 'cluster_validate_cmd.cpp', + 'cluster_validate_db_metadata_cmd.cpp', 'cluster_whats_my_uri_cmd.cpp', 'cluster_write_cmd.cpp', 'document_shard_key_update_util.cpp', @@ -123,6 +124,7 @@ env.Library( '$BUILD_DIR/mongo/db/commands/set_index_commit_quorum_idl', '$BUILD_DIR/mongo/db/commands/shutdown_idl', '$BUILD_DIR/mongo/db/commands/test_commands_enabled', + '$BUILD_DIR/mongo/db/commands/validate_db_metadata_command', '$BUILD_DIR/mongo/db/commands/write_commands_common', '$BUILD_DIR/mongo/db/ftdc/ftdc_server', '$BUILD_DIR/mongo/db/initialize_api_parameters', @@ -165,6 +167,7 @@ env.CppUnitTest( "cluster_find_test.cpp", "cluster_insert_test.cpp", "cluster_update_test.cpp", + "cluster_validate_db_metadata_cmd_test.cpp", "document_shard_key_update_test.cpp", ], LIBDEPS=[ diff --git a/src/mongo/s/commands/cluster_command_test_fixture.cpp b/src/mongo/s/commands/cluster_command_test_fixture.cpp index 76f294518b5..d798e3543b1 100644 --- a/src/mongo/s/commands/cluster_command_test_fixture.cpp +++ b/src/mongo/s/commands/cluster_command_test_fixture.cpp @@ -127,10 +127,10 @@ DbResponse ClusterCommandTestFixture::runCommand(BSONObj cmd) { return Strategy::clientCommand(std::move(rec)).get(); } -void ClusterCommandTestFixture::runCommandSuccessful(BSONObj cmd, bool isTargeted) { +DbResponse ClusterCommandTestFixture::runCommandSuccessful(BSONObj cmd, bool isTargeted) { auto future = launchAsync([&] { // Shouldn't throw. - runCommand(cmd); + return runCommand(cmd); }); size_t numMocks = isTargeted ? 1 : numShards; @@ -138,7 +138,7 @@ void ClusterCommandTestFixture::runCommandSuccessful(BSONObj cmd, bool isTargete expectReturnsSuccess(i % numShards); } - future.default_timed_get(); + return future.default_timed_get(); } void ClusterCommandTestFixture::runTxnCommandOneError(BSONObj cmd, diff --git a/src/mongo/s/commands/cluster_command_test_fixture.h b/src/mongo/s/commands/cluster_command_test_fixture.h index 9c5406c073b..6a2f869afe6 100644 --- a/src/mongo/s/commands/cluster_command_test_fixture.h +++ b/src/mongo/s/commands/cluster_command_test_fixture.h @@ -60,7 +60,7 @@ protected: DbResponse runCommand(BSONObj cmd); - void runCommandSuccessful(BSONObj cmd, bool isTargeted); + DbResponse runCommandSuccessful(BSONObj cmd, bool isTargeted); void runTxnCommandOneError(BSONObj cmd, ErrorCodes::Error code, bool isTargeted); diff --git a/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp new file mode 100644 index 00000000000..2cf49097cce --- /dev/null +++ b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + +#include "mongo/platform/basic.h" + +#include "mongo/db/commands.h" +#include "mongo/db/commands/validate_db_metadata_common.h" +#include "mongo/db/commands/validate_db_metadata_gen.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/grid.h" + +namespace mongo { +namespace { + +class ValidateDBMetadataCmd : public BasicCommand { +public: + ValidateDBMetadataCmd() : BasicCommand("validateDBMetadata") {} + + std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override { + return NamespaceString(dbname).ns(); + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kAlways; + } + + bool adminOnly() const override { + return false; + } + + void addRequiredPrivileges(const std::string& dbname, + const BSONObj& cmdObj, + std::vector<Privilege>* out) const override { + ActionSet actions; + actions.addAction(ActionType::validate); + out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); + } + + bool supportsWriteConcern(const BSONObj& cmd) const override { + return false; + } + + bool run(OperationContext* opCtx, + const std::string& dbName, + const BSONObj& cmdObj, + BSONObjBuilder& output) override { + auto shardResponses = scatterGatherUnversionedTargetAllShards( + opCtx, + dbName, + applyReadWriteConcern( + opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), + ReadPreferenceSetting::get(opCtx), + Shard::RetryPolicy::kNotIdempotent); + + bool hasMoreErrors = false; + std::vector<ErrorReplyElement> apiVersionErrorsToReturn; + ValidateDBMetadataSizeTracker sizeTracker; + for (auto&& shardRes : shardResponses) { + // Re-throw errors from any shard. + auto shardOutput = uassertStatusOK(shardRes.swResponse).data; + uassertStatusOK(getStatusFromCommandResult(shardOutput)); + + auto apiVersionErrors = + shardOutput[ValidateDBMetadataReply::kApiVersionErrorsFieldName]; + tassert(5287400, + "The 'apiVersionErrors' field returned from shards should be an array ", + apiVersionErrors && apiVersionErrors.type() == Array); + for (auto&& error : apiVersionErrors.Array()) { + tassert(5287401, + "The array element in 'apiVersionErrors' should be object", + error.type() == Object); + ErrorReplyElement apiVersionError = ErrorReplyElement::parse( + IDLParserErrorContext("ErrorReplyElement"), error.Obj()); + + // Ensure that the final output doesn't exceed max BSON size. + apiVersionError.setShard(StringData(shardRes.shardId.toString())); + if (!sizeTracker.incrementAndCheckOverflow(apiVersionError)) { + hasMoreErrors = true; + break; + } + + apiVersionErrorsToReturn.push_back(std::move(apiVersionError)); + } + if (hasMoreErrors || + shardOutput.getField(ValidateDBMetadataReply::kHasMoreErrorsFieldName) + .trueValue()) { + hasMoreErrors = true; + break; + } + } + + ValidateDBMetadataReply reply; + reply.setApiVersionErrors(std::move(apiVersionErrorsToReturn)); + if (hasMoreErrors) { + reply.setHasMoreErrors(true); + } + reply.serialize(&output); + return true; + } + +} validateDBMetadataCmd; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp b/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp new file mode 100644 index 00000000000..d3567e3a87b --- /dev/null +++ b/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +#include "mongo/platform/basic.h" + +#include "mongo/s/commands/cluster_command_test_fixture.h" + +namespace mongo { +namespace { + +class ClusterValidateDBMetadataTest : public ClusterCommandTestFixture { +protected: + const BSONObj kCommand{fromjson("{validateDBMetadata: 1}")}; + + void expectInspectRequest(int shardIndex, InspectionCallback cb) override { + onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { + cb(request); + BSONObjBuilder bob; + appendTxnResponseMetadata(bob); + return bob.obj(); + }); + } + + void expectReturnsSuccess(int shardIndex) override { + onCommandForPoolExecutor([this, shardIndex](const executor::RemoteCommandRequest& request) { + BSONObjBuilder bob; + if (shardIndex == 0) { + bob.append("apiVersionErrors", apiVersionErrorsShard1); + } else { + bob.append("apiVersionErrors", apiVersionErrorsShard2); + } + if (hasMoreErrors) { + bob.append("hasMoreErrors", true); + } + + appendTxnResponseMetadata(bob); + return bob.obj(); + }); + } + + std::vector<BSONObj> apiVersionErrorsShard1; + std::vector<BSONObj> apiVersionErrorsShard2; + bool hasMoreErrors = false; +}; + +TEST_F(ClusterValidateDBMetadataTest, AppendsErrorsFromShards) { + apiVersionErrorsShard1 = {BSON("ns" + << "test.ns" + << "code" << 9 << "codeName" + << "APIStrictError" + << "errmsg" + << " Error")}; + apiVersionErrorsShard2 = {BSON("ns" + << "test.ns" + << "code" << 19 << "codeName" + << "APIStrictError" + << "errmsg" + << " Error"), + BSON("ns" + << "test.ns" + << "code" << 19 << "codeName" + << "APIStrictError" + << "errmsg" + << " Error")}; + auto res = runCommandSuccessful(kCommand, false); + + const auto outputFromMongos = OpMsg::parse(res.response).body; + ASSERT(outputFromMongos.getField("apiVersionErrors").type() == Array); + ASSERT_EQ(outputFromMongos.getField("apiVersionErrors").Array().size(), 3); + ASSERT_FALSE(outputFromMongos.hasField("hasMoreErrors")); +} + +TEST_F(ClusterValidateDBMetadataTest, MaxBSONSizeAfterAccumulation) { + const auto errorObj = BSON("ns" + << "test.ns" + << "code" << 9 << "codeName" + << "APIStrictError" + << "errmsg" + << " Error"); + + // Create two arrays whose size is less than BSONObjMaxUserSize / 2, and verify that the mongos + // still returns 'hasMoreErrors' flag. This is because we add additional fields like 'shard' to + // the response from mongos. + BSONArrayBuilder bob; + while (bob.len() < BSONObjMaxUserSize / 2) { + bob.append(errorObj); + apiVersionErrorsShard1.push_back(errorObj); + } + + apiVersionErrorsShard2 = apiVersionErrorsShard1; + + auto res = runCommandSuccessful(kCommand, false); + + const auto outputFromMongos = OpMsg::parse(res.response).body; + ASSERT(outputFromMongos.getField("apiVersionErrors").type() == Array); + ASSERT(outputFromMongos.getField("apiVersionErrors").Array().size() < + 2 * apiVersionErrorsShard1.size()); + ASSERT(outputFromMongos.hasField("hasMoreErrors")); +} + + +} // namespace +} // namespace mongo |