summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIsrael Hsu <israel.hsu@mongodb.com>2022-10-03 16:43:15 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-10-03 18:12:16 +0000
commitde38a0768992b14b13a010f80a350bc5f7f1d942 (patch)
treee5281fe2e1ceb7d272abbc5694d536ebfcf3420c
parenta14665e02e841e43d2bc057e004aeacf0c01c719 (diff)
downloadmongo-de38a0768992b14b13a010f80a350bc5f7f1d942.tar.gz
SERVER-69150 implement configureQueryAnalyzer on mongod
-rw-r--r--jstests/sharding/analyze_shard_key/config_query_analyzer_persistence.js203
-rw-r--r--src/mongo/db/namespace_string.cpp3
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/db/s/configure_query_analyzer_cmd.cpp153
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/drop_collection_coordinator_document.idl2
-rw-r--r--src/mongo/db/s/drop_database_coordinator.cpp3
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp37
-rw-r--r--src/mongo/db/s/sharding_ddl_util.h7
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/analyze_shard_key_documents.idl49
-rw-r--r--src/mongo/s/configure_query_analyzer_cmd.idl8
12 files changed, 401 insertions, 70 deletions
diff --git a/jstests/sharding/analyze_shard_key/config_query_analyzer_persistence.js b/jstests/sharding/analyze_shard_key/config_query_analyzer_persistence.js
new file mode 100644
index 00000000000..94251101ab4
--- /dev/null
+++ b/jstests/sharding/analyze_shard_key/config_query_analyzer_persistence.js
@@ -0,0 +1,203 @@
+/**
+ * Tests that the configureQueryAnalyzer command persists the configuration in a document
+ * in config.queryAnalyzers and that the document is deleted when the associated collection
+ * is dropped.
+ *
+ * @tags: [requires_fcv_62, featureFlagAnalyzeShardKey]
+ */
+
+(function() {
+"use strict";
+
+const dbName = "testDb";
+
+/**
+ * TestCase: {
+ * command: {ns : "coll namespace",
+ * mode : "full"|"off",
+ * sampleRate : 1.2},
+ * }
+ *
+ */
+
+/**
+ * Create documents represting all combinations of options for configureQueryAnalyzer command.
+ * @returns array of documents
+ */
+function optionsAllCombinations() {
+ const testCases = [];
+ const collName = "collection";
+ for (const mode of ["off", "full"]) {
+ for (const sampleRate of [null, -1.0, 0.0, 0.2]) {
+ let testCase =
+ Object.assign({}, {command: {ns: dbName + "." + collName, mode, sampleRate}});
+ if (sampleRate == null) {
+ delete testCase.command.sampleRate;
+ }
+ if ((mode == "off" && sampleRate !== null) ||
+ (mode == "full" &&
+ (sampleRate == null || typeof sampleRate !== "number" || sampleRate <= 0.0))) {
+ continue; // These cases are tested in configuer_query_analyzer_basic.js.
+ }
+ testCases.push(testCase);
+ }
+ }
+ return testCases;
+}
+
+function assertConfigQueryAnalyzerResponse(res, mode, sampleRate) {
+ assert.eq(res.ok, 1);
+ assert.eq(res.mode, mode);
+ assert.eq(res.sampleRate, sampleRate);
+}
+
+function assertQueryAnalyzerConfigDoc(configDb, db, collName, mode, sampleRate) {
+ const configColl = configDb.getCollection('queryAnalyzers');
+ const listCollRes =
+ assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}}));
+ const uuid = listCollRes.cursor.firstBatch[0].info.uuid;
+ const doc = configColl.findOne({_id: uuid});
+ assert.eq(doc.mode, mode, doc);
+ if (mode == "off") {
+ assert.eq(doc.hasOwnProperty("sampleRate"), false, doc);
+ } else if (mode == "full") {
+ assert.eq(doc.sampleRate, sampleRate, doc);
+ }
+}
+
+function assertNoQueryAnalyzerConfigDoc(configDb, db, collName) {
+ const configColl = configDb.getCollection('queryAnalyzers');
+ const listCollRes =
+ assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}}));
+ assert.eq(listCollRes.cursor.firstBatch, 0);
+}
+
+function testConfigurationOptions(conn, testCases) {
+ const collName = "collection";
+ const ns = dbName + "." + collName;
+ const db = conn.getDB(dbName);
+ const coll = db.getCollection(ns);
+ let config = conn.getDB('config');
+ assert.commandWorked(coll.remove({}));
+ assert.commandWorked(db.runCommand({insert: collName, documents: [{x: 1}]}));
+
+ testCases.forEach(testCase => {
+ jsTest.log(`Running configureQueryAnalyzer command on test case ${tojson(testCase)}`);
+
+ const res = conn.adminCommand({
+ configureQueryAnalyzer: testCase.command.ns,
+ mode: testCase.command.mode,
+ sampleRate: testCase.command.sampleRate
+ });
+ assert.commandWorked(res);
+ assertConfigQueryAnalyzerResponse(res, testCase.command.mode, testCase.command.sampleRate);
+ assertQueryAnalyzerConfigDoc(
+ config, db, collName, testCase.command.mode, testCase.command.sampleRate);
+ });
+}
+
+function testDropCollectionDeletesConfig(conn) {
+ const db = conn.getDB(dbName);
+
+ const collNameSh = "collection2DropSh";
+ const nsSh = dbName + "." + collNameSh;
+ const collSh = db.getCollection(collNameSh);
+ const collNameUnsh = "collection2DropUnsh";
+ const nsUnsh = dbName + "." + collNameUnsh;
+ const collUnsh = db.getCollection(collNameUnsh);
+
+ const config = conn.getDB('config');
+ const shardKey = {skey: 1};
+ const shardKeySplitPoint = {skey: 2};
+
+ jsTest.log('Testing drop collection deletes query analyzer config doc');
+
+ assert.commandWorked(conn.adminCommand({shardCollection: nsSh, key: shardKey}));
+ assert.commandWorked(conn.adminCommand({split: nsSh, middle: shardKeySplitPoint}));
+
+ assert.commandWorked(db.runCommand({insert: collNameSh, documents: [{skey: 1, y: 1}]}));
+ assert.commandWorked(db.runCommand({insert: collNameUnsh, documents: [{skey: 1, y: 1}]}));
+
+ // sharded collection
+
+ const mode = "full";
+ const sampleRate = 0.5;
+ const resSh =
+ conn.adminCommand({configureQueryAnalyzer: nsSh, mode: mode, sampleRate: sampleRate});
+ assert.commandWorked(resSh);
+ assertConfigQueryAnalyzerResponse(resSh, mode, sampleRate);
+ assertQueryAnalyzerConfigDoc(config, db, collNameSh, mode, sampleRate);
+
+ collSh.drop();
+ assertNoQueryAnalyzerConfigDoc(config, db, collNameSh);
+
+ // unsharded collection
+
+ const resUnsh =
+ conn.adminCommand({configureQueryAnalyzer: nsUnsh, mode: mode, sampleRate: sampleRate});
+ assert.commandWorked(resUnsh);
+ assertConfigQueryAnalyzerResponse(resUnsh, mode, sampleRate);
+ assertQueryAnalyzerConfigDoc(config, db, collNameUnsh, mode, sampleRate);
+
+ collUnsh.drop();
+ assertNoQueryAnalyzerConfigDoc(config, db, collNameUnsh);
+}
+
+function testDropDatabaseDeletesConfig(conn) {
+ let db = conn.getDB(dbName);
+ const collNameSh = "collection2DropSh";
+ const nsSh = dbName + "." + collNameSh;
+ const collSh = db.getCollection(nsSh);
+
+ const config = conn.getDB('config');
+ const shardKey = {skey: 1};
+ const shardKeySplitPoint = {skey: 2};
+
+ jsTest.log('Testing drop database deletes query analyzer config doc');
+ assert.commandWorked(conn.adminCommand({shardCollection: nsSh, key: shardKey}));
+ assert.commandWorked(conn.adminCommand({split: nsSh, middle: shardKeySplitPoint}));
+ assert.commandWorked(db.runCommand({insert: collNameSh, documents: [{skey: 1, y: 1}]}));
+
+ // sharded collection
+
+ const mode = "full";
+ const sampleRate = 0.5;
+ const resSh =
+ conn.adminCommand({configureQueryAnalyzer: nsSh, mode: mode, sampleRate: sampleRate});
+ assert.commandWorked(resSh);
+ assertConfigQueryAnalyzerResponse(resSh, mode, sampleRate);
+ assertQueryAnalyzerConfigDoc(config, db, collNameSh, mode, sampleRate);
+ db.dropDatabase();
+ assertNoQueryAnalyzerConfigDoc(config, db, collNameSh);
+
+ // unsharded collection
+
+ db = conn.getDB(dbName);
+ const collNameUnsh = "collection2DropUnsh";
+ const nsUnsh = dbName + "." + collNameUnsh;
+ const collUnsh = db.getCollection(nsUnsh);
+ assert.commandWorked(db.runCommand({insert: collNameUnsh, documents: [{skey: 1, y: 1}]}));
+
+ const resUnsh =
+ conn.adminCommand({configureQueryAnalyzer: nsUnsh, mode: mode, sampleRate: sampleRate});
+ assert.commandWorked(resUnsh);
+ assertConfigQueryAnalyzerResponse(resUnsh, mode, sampleRate);
+ assertQueryAnalyzerConfigDoc(config, db, collNameUnsh, mode, sampleRate);
+ db.dropDatabase();
+ assertNoQueryAnalyzerConfigDoc(config, db, collNameUnsh);
+}
+
+{
+ const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+ st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.name});
+
+ const AllTestCases = optionsAllCombinations();
+ testConfigurationOptions(st.s, AllTestCases);
+
+ testDropCollectionDeletesConfig(st.s);
+ testDropDatabaseDeletesConfig(st.s);
+
+ st.stop();
+}
+})();
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 52ebe26faad..b5b64570504 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -192,6 +192,9 @@ const NamespaceString NamespaceString::kSetChangeStreamStateCoordinatorNamespace
const NamespaceString NamespaceString::kGlobalIndexClonerNamespace(
NamespaceString::kConfigDb, "localGlobalIndexOperations.cloner");
+const NamespaceString NamespaceString::kConfigQueryAnalyzersNamespace(NamespaceString::kConfigDb,
+ "queryAnalyzers");
+
NamespaceString NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(StringData ns) {
if (!gMultitenancySupport) {
return NamespaceString(ns, boost::none);
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 31689947d41..98928cb6a35 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -267,6 +267,9 @@ public:
// Namespace used for storing global index cloner state documents.
static const NamespaceString kGlobalIndexClonerNamespace;
+ // Namespace used for storing query analyzer settings.
+ static const NamespaceString kConfigQueryAnalyzersNamespace;
+
/**
* Constructs an empty NamespaceString.
*/
diff --git a/src/mongo/db/s/configure_query_analyzer_cmd.cpp b/src/mongo/db/s/configure_query_analyzer_cmd.cpp
index 34a4274eea9..73f8441c1db 100644
--- a/src/mongo/db/s/configure_query_analyzer_cmd.cpp
+++ b/src/mongo/db/s/configure_query_analyzer_cmd.cpp
@@ -33,7 +33,10 @@
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/list_collections_gen.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/persistent_task_store.h"
#include "mongo/logv2/log.h"
+#include "mongo/s/analyze_shard_key_documents_gen.h"
#include "mongo/s/analyze_shard_key_feature_flag_gen.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/configure_query_analyzer_cmd_gen.h"
@@ -46,63 +49,6 @@ namespace mongo {
namespace {
-void validateCommandOptions(OperationContext* opCtx,
- const NamespaceString& nss,
- QueryAnalyzerModeEnum mode,
- boost::optional<double> sampleRate) {
- uassert(ErrorCodes::InvalidOptions,
- "Cannot specify 'sampleRate' when 'mode' is \"off\"",
- mode != QueryAnalyzerModeEnum::kOff || !sampleRate);
-
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "'sampleRate' must be greater than 0",
- !sampleRate || (*sampleRate > 0));
-
- if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- auto dbInfo =
- uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db()));
-
- ListCollections listCollections;
- listCollections.setDbName(nss.db());
- listCollections.setFilter(BSON("name" << nss.coll()));
-
- auto cmdResponse = executeCommandAgainstDatabasePrimary(
- opCtx,
- nss.db(),
- dbInfo,
- CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({})),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- Shard::RetryPolicy::kIdempotent);
- auto remoteResponse = uassertStatusOK(cmdResponse.swResponse);
- uassertStatusOK(getStatusFromCommandResult(remoteResponse.data));
-
- auto firstBatch = remoteResponse.data.firstElement()["firstBatch"].Obj();
- BSONObjIterator it(firstBatch);
-
- uassert(ErrorCodes::NamespaceNotFound,
- str::stream() << "Cannot analyze queries for a non-existing collection",
- it.more());
-
- auto collection = it.next().Obj().getOwned();
- uassert(ErrorCodes::CommandNotSupportedOnView,
- "Cannot analyze queries for a view",
- collection.getStringField("type") != "view");
-
- uassert(6875000,
- str::stream() << "Found multiple collections with the same name '" << nss << "'",
- !it.more());
- } else {
- uassert(ErrorCodes::CommandNotSupportedOnView,
- "Cannot analyze queries for a view",
- !CollectionCatalog::get(opCtx)->lookupView(opCtx, nss));
-
- AutoGetCollectionForReadCommand collection(opCtx, nss);
- uassert(ErrorCodes::NamespaceNotFound,
- str::stream() << "Cannot analyze queries for a non-existing collection",
- collection);
- }
-}
-
class ConfigureQueryAnalyzerCmd : public TypedCommand<ConfigureQueryAnalyzerCmd> {
public:
using Request = ConfigureQueryAnalyzer;
@@ -120,15 +66,90 @@ public:
const auto& nss = ns();
const auto mode = request().getMode();
const auto sampleRate = request().getSampleRate();
- validateCommandOptions(opCtx, nss, mode, sampleRate);
-
- LOGV2(6875002,
- "Configuring query analysis",
- "nss"_attr = nss,
- "mode"_attr = mode,
- "sampleRate"_attr = sampleRate);
-
- return {};
+ uassert(ErrorCodes::InvalidOptions,
+ "Cannot specify 'sampleRate' when 'mode' is \"off\"",
+ mode != QueryAnalyzerModeEnum::kOff || !sampleRate);
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "'sampleRate' must be greater than 0",
+ mode != QueryAnalyzerModeEnum::kFull || (sampleRate && *sampleRate > 0));
+
+ auto newConfig = request().getConfiguration();
+
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ auto dbInfo =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db()));
+
+ ListCollections listCollections;
+ listCollections.setDbName(nss.db());
+ listCollections.setFilter(BSON("name" << nss.coll()));
+
+ auto cmdResponse = executeCommandAgainstDatabasePrimary(
+ opCtx,
+ nss.db(),
+ dbInfo,
+ CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({})),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ Shard::RetryPolicy::kIdempotent);
+ auto remoteResponse = uassertStatusOK(cmdResponse.swResponse);
+ uassertStatusOK(getStatusFromCommandResult(remoteResponse.data));
+
+ auto firstBatch = remoteResponse.data.firstElement()["firstBatch"].Obj();
+ BSONObjIterator it(firstBatch);
+
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Cannot analyze queries for a non-existing collection",
+ it.more());
+
+ auto doc = it.next().Obj().getOwned();
+
+ uassert(ErrorCodes::CommandNotSupportedOnView,
+ "Cannot analyze queries for a view",
+ doc.getStringField("type") != "view");
+ uassert(6875000,
+ str::stream()
+ << "Found multiple collections with the same name '" << nss << "'",
+ !it.more());
+
+ auto listCollRepItem = ListCollectionsReplyItem::parse(
+ IDLParserContext("ListCollectionsReplyItem"), doc);
+ auto info = listCollRepItem.getInfo();
+ invariant(info);
+ auto uuid = info->getUuid();
+
+ QueryAnalyzerDocument qad;
+ qad.setNs(nss);
+ qad.setCollectionUuid(*uuid);
+ qad.setConfiguration(newConfig);
+ // TODO SERVER-69804: Implement start/stop timestamp in config.queryAnalyzers
+ // document.
+ LOGV2(6915001,
+ "Persisting query analyzer configuration",
+ "nss"_attr = nss,
+ "collectionUuid"_attr = uuid,
+ "mode"_attr = mode,
+ "sampleRate"_attr = sampleRate);
+ PersistentTaskStore<QueryAnalyzerDocument> store{
+ NamespaceString::kConfigQueryAnalyzersNamespace};
+ store.upsert(opCtx,
+ BSON(QueryAnalyzerDocument::kCollectionUuidFieldName
+ << qad.getCollectionUuid()),
+ qad.toBSON(),
+ WriteConcerns::kMajorityWriteConcernNoTimeout);
+ } else {
+ uassert(ErrorCodes::CommandNotSupportedOnView,
+ "Cannot analyze queries for a view",
+ !CollectionCatalog::get(opCtx)->lookupView(opCtx, nss));
+
+ AutoGetCollectionForReadCommand collection(opCtx, nss);
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Cannot analyze queries for a non-existing collection",
+ collection);
+ }
+
+ Response response;
+ // TODO SERVER-70019: Make configQueryAnalyzer return old configuration.
+ response.setNewConfiguration(newConfig);
+ return response;
}
private:
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index 8c95d2074de..449d800368a 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -182,6 +182,8 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
"namespace"_attr = nss(),
"sharded"_attr = collIsSharded);
+ sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss(), boost::none);
+
if (collIsSharded) {
invariant(_doc.getCollInfo());
const auto& coll = _doc.getCollInfo().value();
diff --git a/src/mongo/db/s/drop_collection_coordinator_document.idl b/src/mongo/db/s/drop_collection_coordinator_document.idl
index a0388729cac..4c71ec33d5c 100644
--- a/src/mongo/db/s/drop_collection_coordinator_document.idl
+++ b/src/mongo/db/s/drop_collection_coordinator_document.idl
@@ -72,6 +72,6 @@ structs:
optional: true
collectionUUID:
type: uuid
- description: "The expected UUID of the collection."
+ description: "The expected UUID of the collection, only set and used in C2C replication."
optional: true
diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp
index 349d9ae4314..a492cee54de 100644
--- a/src/mongo/db/s/drop_database_coordinator.cpp
+++ b/src/mongo/db/s/drop_database_coordinator.cpp
@@ -130,6 +130,9 @@ void DropDatabaseCoordinator::_dropShardedCollection(
sharding_ddl_util::removeCollAndChunksMetadataFromConfig(
opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern);
+ // Remove collection's query analyzer configuration document, if it exists.
+ sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss, coll.getUuid());
+
_updateSession(opCtx);
sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getCurrentSession());
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 5beca502d6a..37c0de48493 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/commands/feature_compatibility_version.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/remove_tags_gen.h"
@@ -44,6 +45,7 @@
#include "mongo/db/write_block_bypass.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/metadata/impersonated_user_metadata.h"
+#include "mongo/s/analyze_shard_key_documents_gen.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_tags.h"
@@ -313,6 +315,40 @@ void removeTagsMetadataFromConfig(OperationContext* opCtx,
str::stream() << "Error removing tags for collection " << nss.toString());
}
+void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID>& uuid) {
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ write_ops::DeleteCommandRequest deleteCmd(NamespaceString::kConfigQueryAnalyzersNamespace);
+ if (uuid) {
+ deleteCmd.setDeletes({[&] {
+ write_ops::DeleteOpEntry entry;
+ entry.setQ(BSON(QueryAnalyzerDocument::kCollectionUuidFieldName << uuid->toString()));
+ entry.setMulti(false);
+ return entry;
+ }()});
+ } else {
+ deleteCmd.setDeletes({[&] {
+ write_ops::DeleteOpEntry entry;
+ entry.setQ(BSON(QueryAnalyzerDocument::kNsFieldName << nss.toString()));
+ entry.setMulti(true);
+ return entry;
+ }()});
+ }
+
+ const auto deleteResult = configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ NamespaceString::kConfigDb.toString(),
+ CommandHelpers::appendMajorityWriteConcern(deleteCmd.toBSON({})),
+ Shard::RetryPolicy::kIdempotent);
+
+ uassertStatusOKWithContext(Shard::CommandResponse::getEffectiveStatus(std::move(deleteResult)),
+ str::stream()
+ << "Error removing query analyzer configurations for collection "
+ << nss.toString());
+}
+
void removeTagsMetadataFromConfig_notIdempotent(OperationContext* opCtx,
const NamespaceString& nss,
const WriteConcernOptions& writeConcern) {
@@ -603,6 +639,5 @@ BSONObj getCriticalSectionReasonForRename(const NamespaceString& from, const Nam
<< "rename"
<< "from" << from.toString() << "to" << to.toString());
}
-
} // namespace sharding_ddl_util
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h
index 157a65399a1..d65998494a7 100644
--- a/src/mongo/db/s/sharding_ddl_util.h
+++ b/src/mongo/db/s/sharding_ddl_util.h
@@ -91,6 +91,13 @@ bool removeCollAndChunksMetadataFromConfig_notIdempotent(OperationContext* opCtx
const WriteConcernOptions& writeConcern);
/**
+ * Delete the config query analyzer document for the given collection, if it exists.
+ */
+void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID>& uuid);
+
+/**
* Rename sharded collection metadata as part of a renameCollection operation.
*
* - Update namespace associated with tags (FROM -> TO)
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 14ec26da8a1..142287abd0f 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -136,6 +136,7 @@ env.Library(
target='common_s',
source=[
'analyze_shard_key_cmd.idl',
+ 'analyze_shard_key_documents.idl',
'analyze_shard_key_feature_flag.idl',
'analyze_shard_key_server_parameters.idl',
'cannot_implicitly_create_collection_info.cpp',
diff --git a/src/mongo/s/analyze_shard_key_documents.idl b/src/mongo/s/analyze_shard_key_documents.idl
new file mode 100644
index 00000000000..c7254322a9e
--- /dev/null
+++ b/src/mongo/s/analyze_shard_key_documents.idl
@@ -0,0 +1,49 @@
+# Copyright (C) 2022-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/db/basic_types.idl"
+ - "mongo/s/configure_query_analyzer_cmd.idl"
+
+structs:
+ QueryAnalyzerDocument:
+ description: "Represents settings for query sampling for one collection."
+ fields:
+ _id:
+ type: uuid
+ description: "The UUID of the collection being sampled."
+ cpp_name: collectionUuid
+ ns:
+ type: namespacestring
+ description: "The namespace of the collectiong being sampled."
+ inline_chained_structs: true
+ chained_structs:
+ queryAnalyzerConfiguration: configuration
diff --git a/src/mongo/s/configure_query_analyzer_cmd.idl b/src/mongo/s/configure_query_analyzer_cmd.idl
index c0564c23d01..6dd0cc194f3 100644
--- a/src/mongo/s/configure_query_analyzer_cmd.idl
+++ b/src/mongo/s/configure_query_analyzer_cmd.idl
@@ -56,15 +56,19 @@ structs:
configureQueryAnalyzerResponse:
description: "The response for the 'configureQueryAnalyzer' command."
+ strict: false
+ inline_chained_structs: true
+ chained_structs:
+ queryAnalyzerConfiguration: newConfiguration
commands:
configureQueryAnalyzer:
description: "The command for setting the query analyzer configuration for a collection."
command_name: configureQueryAnalyzer
- strict: true
+ strict: false
namespace: type
api_version: ""
type: namespacestring
inline_chained_structs: true
chained_structs:
- queryAnalyzerConfiguration: queryAnalyzerConfiguration
+ queryAnalyzerConfiguration: configuration