summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Banala <arun.banala@10gen.com>2019-09-24 16:50:09 +0000
committerevergreen <evergreen@mongodb.com>2019-09-24 16:50:09 +0000
commit60518c8920064b30df53129ea880dacfcb04be71 (patch)
treeaa9054360e25e2b3505dbced16bfb5922e606bb9
parent7edbbc86d4ac06fddd3ab3482d2985392811032b (diff)
downloadmongo-60518c8920064b30df53129ea880dacfcb04be71.tar.gz
SERVER-29794 Adding a comment to all commands
-rw-r--r--buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml6
-rw-r--r--buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml6
-rw-r--r--jstests/auth/lib/commands_lib.js16
-rw-r--r--jstests/core/comment_field.js161
-rw-r--r--jstests/core/system_profile.js1
-rw-r--r--jstests/noPassthrough/comment_field_passthrough.js61
-rw-r--r--jstests/replsets/read_operations_during_step_down.js10
-rw-r--r--jstests/sharding/comment_field.js506
-rw-r--r--jstests/sharding/explain_agg_read_pref.js11
-rw-r--r--jstests/sharding/mongos_query_comment.js39
-rw-r--r--src/mongo/db/command_generic_argument.cpp5
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp7
-rw-r--r--src/mongo/db/commands/find_cmd.cpp5
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp7
-rw-r--r--src/mongo/db/commands/map_reduce_agg.cpp9
-rw-r--r--src/mongo/db/curop.cpp35
-rw-r--r--src/mongo/db/curop.h9
-rw-r--r--src/mongo/db/curop_test.cpp4
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/operation_context.h13
-rw-r--r--src/mongo/db/pipeline/aggregation_request.cpp10
-rw-r--r--src/mongo/db/pipeline/aggregation_request.h12
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp15
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp4
-rw-r--r--src/mongo/db/pipeline/expression_context.h4
-rw-r--r--src/mongo/db/pipeline/expression_context_for_test.h1
-rw-r--r--src/mongo/db/pipeline/mongos_process_interface.cpp1
-rw-r--r--src/mongo/db/query/count_command_test.cpp22
-rw-r--r--src/mongo/db/query/find.cpp6
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp9
-rw-r--r--src/mongo/db/query/parsed_distinct_test.cpp6
-rw-r--r--src/mongo/db/query/query_request.cpp23
-rw-r--r--src/mongo/db/query/query_request.h10
-rw-r--r--src/mongo/db/query/query_request_test.cpp50
-rw-r--r--src/mongo/db/service_entry_point_common.cpp35
-rw-r--r--src/mongo/db/views/resolved_view.cpp1
-rw-r--r--src/mongo/db/views/resolved_view_test.cpp9
-rw-r--r--src/mongo/executor/remote_command_request.cpp7
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp7
-rw-r--r--src/mongo/s/commands/strategy.cpp11
-rw-r--r--src/mongo/s/query/cluster_find.cpp6
44 files changed, 912 insertions, 255 deletions
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
index a5f45bfc3bb..879a2f77736 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
@@ -22,6 +22,7 @@ selector:
- jstests/core/views/views_stats.js
# These test run commands using legacy queries, which are not supported on sessions.
+ - jstests/core/comment_field.js
- jstests/core/exhaust.js
- jstests/core/max_time_ms.js
- jstests/core/validate_cmd_ns.js
diff --git a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
index 1ee1a7dafae..320ae4d22a2 100644
--- a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
@@ -11,9 +11,6 @@ selector:
- jstests/core/opcounters_write_cmd.js
- jstests/core/read_after_optime.js
- jstests/core/capped_update.js
- # These tests attempt to read from the "system.profile" collection, which may be missing entries
- # if a write was performed on the primary of the replica set instead.
- - jstests/core/*profile*.js
# Tests that fail for Causal Consistency with default injected readPreference 'secondary'
# "TODO SERVER-30384: These tests assume that documents are returned in the same order they are
@@ -71,6 +68,9 @@ selector:
# collStats and dbStats are not causally consistent
- requires_collstats
- requires_dbstats
+ # These tests attempt to read from the "system.profile" collection, which may be missing entries
+ # if a write was performed on the primary of the replica set instead.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
index 4c709b1d53b..7dc37549ec4 100644
--- a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
@@ -8,6 +8,7 @@ selector:
- jstests/core/txns/**/*.js
# These test run commands using legacy queries, which are not supported on sessions.
+ - jstests/core/comment_field.js
- jstests/core/exhaust.js
- jstests/core/max_time_ms.js
- jstests/core/validate_cmd_ns.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 1bdf6fad452..81a68edf47b 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -24,10 +24,13 @@ selector:
- jstests/sharding/prepared_txn_metadata_refresh.js
# Enable when 4.4 becomes last stable
- jstests/sharding/bulk_insert.js
+ - jstests/sharding/comment_field.js
- jstests/sharding/covered_shard_key_indexes.js
+ - jstests/sharding/explain_agg_read_pref.js
- jstests/sharding/explain_exec_stats_on_shards.js
- jstests/sharding/extract_shard_key_values.js
- jstests/sharding/features1.js
+ - jstests/sharding/mongos_query_comment.js
- jstests/sharding/prefix_shard_key.js
- jstests/sharding/refine_collection_shard_key_basic.js
- jstests/sharding/refine_collection_shard_key_jumbo.js
diff --git a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
index 64d51fd6053..54c24d8630c 100644
--- a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
@@ -11,9 +11,6 @@ selector:
- jstests/core/opcounters_write_cmd.js
- jstests/core/read_after_optime.js
- jstests/core/capped_update.js
- # These tests attempt to read from the "system.profile" collection, which may be missing entries
- # if a write was performed on the primary of the replica set instead.
- - jstests/core/*profile*.js
# The shellkillop.js test spawns a parallel shell without using startParallelShell() and therefore
# doesn't inherit the w="majority" write concern when performing its writes.
- jstests/core/shellkillop.js
@@ -37,6 +34,9 @@ selector:
# "Cowardly refusing to run test with overridden read preference when it reads from a
# non-replicated collection: ..."
- assumes_read_preference_unchanged
+ # These tests attempt to read from the "system.profile" collection, which may be missing entries
+ # if a write was performed on the primary of the replica set instead.
+ - requires_profiling
executor:
archive:
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 907ea046dd3..4ab37824919 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -2039,6 +2039,7 @@ var authCommandsLib = {
skipSharded: true,
setup: function(db) {
db.x.save({});
+ db.y.drop();
},
teardown: function(db) {
db.x.drop();
@@ -3132,6 +3133,12 @@ var authCommandsLib = {
{
testname: "dataSize_2",
command: {dataSize: secondDbName + ".x"},
+ setup: function(db) {
+ db.x.insert({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
testcases: [{
runOnDb: secondDbName,
roles: roles_readAny,
@@ -4182,14 +4189,14 @@ var authCommandsLib = {
testname: "killAllSessionsByPattern",
command: {killAllSessionsByPattern: []},
testcases: [
- {runOnDb: adminDbName, roles: roles_hostManager},
+ {runOnDb: adminDbName, roles: roles_hostManager, expectFail: true},
]
},
{
testname: "listCommands",
command: {listCommands: 1},
testcases: [
- {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: adminDbName, roles: roles_all, privileges: [], expectFail: true},
{runOnDb: firstDbName, roles: roles_all, privileges: []},
{runOnDb: secondDbName, roles: roles_all, privileges: []}
]
@@ -5171,7 +5178,10 @@ var authCommandsLib = {
{
testname: "shutdown",
command: {shutdown: 1},
- testcases: [{runOnDb: firstDbName, roles: {}}, {runOnDb: secondDbName, roles: {}}]
+ testcases: [
+ {runOnDb: firstDbName, roles: {}, expectFail: true},
+ {runOnDb: secondDbName, roles: {}}
+ ]
},
{
testname: "split",
diff --git a/jstests/core/comment_field.js b/jstests/core/comment_field.js
new file mode 100644
index 00000000000..8ca3fddb325
--- /dev/null
+++ b/jstests/core/comment_field.js
@@ -0,0 +1,161 @@
+/**
+ * Test to verify that the 'comment' field set while running a command gets populated in $currentOp
+ * and profiler.
+ * @tags: [assumes_against_mongod_not_mongos, assumes_unsharded_collection,
+ * does_not_support_stepdowns, requires_profiling, assumes_read_concern_unchanged,
+ * assumes_write_concern_unchanged]
+ */
+
+(function() {
+"use strict";
+
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const testDB = db.getSiblingDB(jsTestName());
+const adminDB = db.getSiblingDB("admin");
+
+const coll = testDB.coll;
+coll.drop();
+
+assert.commandWorked(coll.insert({x: 1, _id: 1}));
+assert.commandWorked(coll.insert({x: 1, _id: 2}));
+
+function setPostCommandFailpoint({mode, options}) {
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "waitAfterCommandFinishesExecution", mode: mode, data: options}));
+}
+
+function restartProfiler() {
+ // Restart profiler.
+ testDB.setProfilingLevel(0);
+ testDB.system.profile.drop();
+ testDB.setProfilingLevel(2);
+}
+
+function runCommentParamTest({coll, command, commentObj}) {
+ const cmdName = Object.keys(command)[0];
+ if (!commentObj) {
+ commentObj = {testName: jsTestName(), commentField: "comment_" + cmdName};
+ command["comment"] = commentObj;
+ }
+ restartProfiler();
+
+ let parallelShell;
+ try {
+ setPostCommandFailpoint(
+ {mode: "alwaysOn", options: {ns: coll.getFullName(), commands: [cmdName]}});
+
+ const parallelFn = `
+ const sourceDB = db.getSiblingDB(jsTestName());
+ let cmdRes = sourceDB.runCommand(${tojson(command)});
+ assert.commandWorked(cmdRes); `;
+
+ // Run the 'command' in a parallel shell.
+ parallelShell = startParallelShell(parallelFn);
+
+ // Wait for the parallel shell to hit the failpoint and verify that the 'comment' field is
+ // present in $currentOp.
+ const filter = {[`command.${cmdName}`]: {$exists: true}, "command.comment": commentObj};
+
+ assert.soon(
+ () => adminDB.aggregate([{$currentOp: {}}, {$match: filter}]).toArray().length == 1,
+ () => tojson(adminDB.aggregate([{$currentOp: {}}]).toArray()));
+
+ } finally {
+ // Ensure that we unset the failpoint, regardless of the outcome of the test.
+ setPostCommandFailpoint({mode: "off", options: {}});
+ }
+ // Wait for the parallel shell to complete.
+ parallelShell();
+
+ // Verify that profile entry has 'comment' field.
+ profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: testDB, filter: {"command.comment": commentObj}});
+}
+
+// Verify that the comment attached to a find command appears in both currentOp and the profiler.
+runCommentParamTest({coll: coll, command: {find: coll.getName(), filter: {}}});
+
+// Verify that the comment attached to an insert command appears in both currentOp and the profiler.
+runCommentParamTest({
+ coll: coll,
+ command: {insert: coll.getName(), documents: [{x: 0.5}, {x: -0.5}], ordered: false}
+});
+
+// Verify that the comment attached to an aggregate command appears in both currentOp and the
+// profiler.
+runCommentParamTest({
+ coll: coll,
+ command: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 1}},
+});
+
+// Verify the 'comment' field on the aggreage command is propagated to the subsequent getMore
+// command.
+const comment = [{name: "agg_comment"}];
+const res = testDB.runCommand(
+ {aggregate: coll.getName(), pipeline: [], comment: comment, cursor: {batchSize: 1}});
+runCommentParamTest({
+ coll: coll,
+ command: {getMore: res.cursor.id, collection: coll.getName(), batchSize: 1},
+ commentObj: comment
+});
+
+// Verify the 'comment' field on the getMore command takes precedence over the 'comment' field on
+// the originating command.
+runCommentParamTest(
+ {coll: coll, command: {getMore: res.cursor.id, collection: coll.getName(), batchSize: 1}});
+
+// Verify that comment field gets populated on the profiler for aggregate with explain:true.
+runCommentParamTest({
+ coll: coll,
+ command: {aggregate: coll.getName(), pipeline: [], explain: true},
+});
+
+const innerComment = {
+ name: "innerComment_aggregation"
+};
+
+// Verify that a comment field attached to the inner command of an explain command gets populated in
+// profiler as top level 'comment'.
+runCommentParamTest({
+ coll: coll,
+ command:
+ {explain: {aggregate: coll.getName(), pipeline: [], cursor: {}, comment: innerComment}},
+ commentObj: innerComment
+});
+
+// Verify that when a comment field is attached to the inner command of an explain command and there
+// is another 'comment' field at the top level, top level comment takes precedence.
+runCommentParamTest({
+ coll: coll,
+ command: {explain: {aggregate: coll.getName(), pipeline: [], cursor: {}, comment: innerComment}}
+});
+
+//
+// Tests for Legacy query.
+//
+
+testDB.getMongo().forceReadMode("legacy");
+restartProfiler();
+
+// Verify that $comment meta-operator inside $query is not treated as a 'comment' field.
+assert.eq(testDB.coll.find({$query: {_id: 1, $comment: {a: 1}}}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: testDB,
+ filter: {"command.filter": {_id: 1, $comment: {a: 1}}, "command.comment": {$exists: false}}
+});
+
+// Verify that $comment at top level is treated as a 'comment' field.
+const expectedComment = {
+ commentName: "legacy_query"
+};
+assert.eq(testDB.coll.find({$query: {_id: 1}, $comment: expectedComment}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow(
+ {profileDB: testDB, filter: {"command.comment": expectedComment}});
+
+testDB.getMongo().forceReadMode("commands");
+})();
diff --git a/jstests/core/system_profile.js b/jstests/core/system_profile.js
index 2919f607fb6..2624853cd0a 100644
--- a/jstests/core/system_profile.js
+++ b/jstests/core/system_profile.js
@@ -4,6 +4,7 @@
// requires_collstats,
// requires_non_retryable_commands,
// requires_non_retryable_writes,
+// requires_profiling,
// uses_map_reduce_with_temp_collections,
// ]
diff --git a/jstests/noPassthrough/comment_field_passthrough.js b/jstests/noPassthrough/comment_field_passthrough.js
new file mode 100644
index 00000000000..de41d8a87f5
--- /dev/null
+++ b/jstests/noPassthrough/comment_field_passthrough.js
@@ -0,0 +1,61 @@
+/**
+ * Verify that adding 'comment' field to any command shouldn't cause unexpected failures.
+ */
+(function() {
+
+"use strict";
+
+load("jstests/auth/lib/commands_lib.js"); // Provides an exhaustive list of commands.
+
+const tests = authCommandsLib.tests;
+
+// The following commands require additional start up configuration and hence need to be skipped.
+const blacklistedTests =
+ ["startRecordingTraffic", "stopRecordingTraffic", "addShardToZone", "removeShardFromZone"];
+
+function runTests(tests, conn, impls) {
+ const firstDb = conn.getDB(firstDbName);
+ const secondDb = conn.getDB(secondDbName);
+ const isMongos = authCommandsLib.isMongos(conn);
+ for (const test of tests) {
+ if (!blacklistedTests.includes(test.testname)) {
+ authCommandsLib.runOneTest(conn, test, impls, isMongos);
+ }
+ }
+}
+
+const impls = {
+ runOneTest: function(conn, testObj) {
+ const testCase = testObj.testcases[0];
+
+ const runOnDb = conn.getDB(testCase.runOnDb);
+ const state = testObj.setup && testObj.setup(runOnDb);
+
+ const command = (typeof (testObj.command) === "function")
+ ? testObj.command(state, testCase.commandArgs)
+ : testObj.command;
+ command['comment'] = "comment";
+ const res = runOnDb.runCommand(command);
+ assert(res.ok == 1 || testCase.expectFail || res.code == ErrorCodes.CommandNotSupported,
+ tojson(res));
+
+ if (testObj.teardown) {
+ testObj.teardown(runOnDb, res);
+ }
+ }
+};
+
+let conn = MongoRunner.runMongod();
+
+// Test with standalone mongod.
+runTests(tests, conn, impls);
+
+MongoRunner.stopMongod(conn);
+
+conn = new ShardingTest({shards: 1, mongos: 2});
+
+// Test with a sharded cluster.
+runTests(tests, conn, impls);
+
+conn.stop();
+})();
diff --git a/jstests/replsets/read_operations_during_step_down.js b/jstests/replsets/read_operations_during_step_down.js
index d6bdd779be3..29269b24db6 100644
--- a/jstests/replsets/read_operations_during_step_down.js
+++ b/jstests/replsets/read_operations_during_step_down.js
@@ -78,11 +78,11 @@ const joinStepDownThread = startParallelShell(() => {
// Wait until the step down has started to kill user operations.
checkLog.contains(primary, "Starting to kill user operations");
-// Enable "waitAfterReadCommandFinishesExecution" fail point to make sure the find and get more
+// Enable "waitAfterCommandFinishesExecution" fail point to make sure the find and get more
// commands on database 'test' does not complete before step down.
assert.commandWorked(primaryAdmin.runCommand({
- configureFailPoint: "waitAfterReadCommandFinishesExecution",
- data: {db: dbName},
+ configureFailPoint: "waitAfterCommandFinishesExecution",
+ data: {ns: primaryColl.getFullName(), commands: ["find", "getMore"]},
mode: "alwaysOn"
}));
@@ -96,11 +96,11 @@ assert.commandWorked(primaryAdmin.runCommand(
joinStepDownThread();
rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-// We don't want to check if we have reached "waitAfterReadCommandFinishesExecution" fail point
+// We don't want to check if we have reached "waitAfterCommandFinishesExecution" fail point
// because we already know that the primary has stepped down successfully. This implies that
// the find and get more commands are still running even after the node stepped down.
assert.commandWorked(primaryAdmin.runCommand(
- {configureFailPoint: "waitAfterReadCommandFinishesExecution", mode: "off"}));
+ {configureFailPoint: "waitAfterCommandFinishesExecution", mode: "off"}));
// Wait for find & getmore thread to join.
joinGetMoreThread();
diff --git a/jstests/sharding/comment_field.js b/jstests/sharding/comment_field.js
new file mode 100644
index 00000000000..e90b014f18b
--- /dev/null
+++ b/jstests/sharding/comment_field.js
@@ -0,0 +1,506 @@
+/**
+ * Test to verify that the 'comment' field set while running a command gets populated in $currentOp
+ * and profiler. This test also verifies that for a sharded collection, the 'comment' fields gets
+ * passed on from mongos to the respective shards.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/check_log.js"); // For checkLog.* helper functions.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const st = new ShardingTest({shards: 2});
+const testDB = st.s.getDB(jsTestName());
+const shardedColl = testDB.coll;
+const unshardedColl = testDB.unsharded;
+const shard0DB = st.shard0.getDB(jsTestName());
+const shard1DB = st.shard1.getDB(jsTestName());
+
+assert.commandWorked(st.s0.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+
+// Shard shardedColl on {x:1}, split it at {x:0}, and move chunk {x:1} to shard1.
+st.shardColl(shardedColl, {x: 1}, {x: 0}, {x: 1});
+
+// Insert one document on each shard.
+assert.commandWorked(shardedColl.insert({x: 1, _id: 1}));
+assert.commandWorked(shardedColl.insert({x: -1, _id: 0}));
+
+assert.commandWorked(unshardedColl.insert({x: 1, _id: 1}));
+
+// changes the 'slowms' threshold to -1ms. This will log all the commands.
+assert.commandWorked(testDB.adminCommand({profile: 0, slowms: -1}));
+
+/**
+ * Verifies that there are 'expectedNumOccurrences' log lines contains every element of
+ * 'inputArray'.
+ */
+function verifyLogContains(connections, inputArray, expectedNumOccurrences) {
+ let numOccurrences = 0;
+ for (let conn of connections) {
+ const logs = checkLog.getGlobalLog(conn);
+ for (let logMsg of logs) {
+ let numMatches = 0;
+ for (let input of inputArray) {
+ numMatches += logMsg.includes(input) ? 1 : 0;
+ }
+ numOccurrences += ((numMatches == inputArray.length) ? 1 : 0);
+ }
+ }
+ assert.eq(expectedNumOccurrences, numOccurrences);
+}
+
+function setPostCommandFailpointOnShards({mode, options}) {
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: testDB.getSiblingDB("admin"),
+ cmdObj: {configureFailPoint: "waitAfterCommandFinishesExecution", data: options, mode: mode}
+ });
+}
+
+function runCommentParamTest(
+ {coll, command, expectedRunningOps, commentObj, cmdName, parallelFunction}) {
+ if (!cmdName) {
+ cmdName = Object.keys(command)[0];
+ }
+ setPostCommandFailpointOnShards(
+ {mode: "alwaysOn", options: {ns: coll.getFullName(), commands: [cmdName]}});
+
+ // Restart profiler.
+ for (let shardDB of [shard0DB, shard1DB]) {
+ shardDB.setProfilingLevel(0);
+ shardDB.system.profile.drop();
+
+ // Enable profiling and changes the 'slowms' threshold to -1ms. This will log all the
+ // commands.
+ shardDB.setProfilingLevel(2, -1);
+ }
+
+ if (!commentObj) {
+ commentObj = {
+ testName: jsTestName(),
+ commentField: "comment_" + cmdName,
+ uuid: UUID().toString()
+ };
+ command["comment"] = commentObj;
+ }
+
+ // If 'parallelFunction' is passed as a parameter, do not use the 'runCommand' to execute the
+ // command.
+ if (!parallelFunction) {
+ parallelFunction = `const sourceDB = db.getSiblingDB(jsTestName());
+ let cmdRes = sourceDB.runCommand(${tojson(command)});
+ assert.commandWorked(cmdRes); `;
+ }
+
+ // Run the 'command' in a parallel shell.
+ let outShell = startParallelShell(parallelFunction, st.s.port);
+
+ // Wait for the parallel shell to hit the failpoint and verify that the 'comment' field is
+ // present in $currentOp.
+ const filter = {
+ [`command.${cmdName}`]:
+ (cmdName == "explain" || cmdName == "getMore") ? {$exists: true} : coll.getName(),
+ "command.comment": commentObj
+ };
+ assert.soon(
+ () => testDB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: false}}, {$match: filter}])
+ .toArray()
+ .length == expectedRunningOps,
+ () => tojson(
+ testDB.getSiblingDB("admin").aggregate([{$currentOp: {localOps: false}}]).toArray()));
+
+ // Verify that MongoS also shows the comment field in $currentOp.
+ assert.eq(testDB.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: true}}, {$match: filter}])
+ .toArray()
+ .length,
+ 1);
+
+ // Unset the failpoint to unblock the command and join with the parallel shell.
+ setPostCommandFailpointOnShards({mode: "off", options: {}});
+ outShell();
+
+ // For 'update' and 'delete' commands log lines and profiler entries are added for each
+ // sub-operation.
+ let expectedAdditionalEntries = (cmdName === "update") ? command["updates"].length : 0;
+ expectedAdditionalEntries += (cmdName === "delete") ? command["deletes"].length : 0;
+
+ // Run the 'checkLog' only for commands with uuid so that the we know the log line belongs to
+ // current operation.
+ if (commentObj["uuid"]) {
+ let expectedLogLines = expectedRunningOps + 1; // +1 for log line on mongos.
+ expectedLogLines += expectedAdditionalEntries;
+ verifyLogContains(
+ [testDB, shard0DB, shard1DB],
+ // Verify that a field with 'comment' exists in the same line as the command.
+ [
+ ", comment: ",
+ checkLog.formatAsLogLine(commentObj),
+ 'appName: "MongoDB Shell" command: ' + ((cmdName === "getMore") ? cmdName : "")
+ ],
+ expectedLogLines);
+ }
+
+ // Reset log level to zero.
+ for (let shardDB of [shard0DB, shard1DB]) {
+ shardDB.getSiblingDB("admin").setLogLevel(0);
+ }
+
+ // Verify that profile entry has 'comment' field.
+ const profileFilter = {"command.comment": commentObj};
+ assert.eq(shard0DB.system.profile.find(profileFilter).itcount() +
+ shard1DB.system.profile.find(profileFilter).itcount(),
+ (expectedAdditionalEntries > 0) ? expectedAdditionalEntries : expectedRunningOps);
+}
+
+// For find command on a sharded collection, when all the shards are targetted.
+runCommentParamTest(
+ {coll: shardedColl, command: {find: shardedColl.getName(), filter: {}}, expectedRunningOps: 2});
+
+// For find command on a sharded collection, when a single shard is targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {find: shardedColl.getName(), filter: {x: 3}},
+ expectedRunningOps: 1
+});
+
+// For find command on an unsharded collection. Query targets only the primary shard.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {find: unshardedColl.getName(), filter: {}},
+ expectedRunningOps: 1
+});
+
+// For insert command on a sharded collection, where all the shards are targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {insert: shardedColl.getName(), documents: [{x: 0.5}, {x: -0.5}], ordered: false},
+ expectedRunningOps: 2
+});
+
+// For insert command on a sharded collection, where a single shard is targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {insert: shardedColl.getName(), documents: [{x: 4}]},
+ expectedRunningOps: 1
+});
+
+// For insert command on an unsharded collection, where only primary shard is targetted.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {insert: unshardedColl.getName(), documents: [{x: 3}]},
+ expectedRunningOps: 1
+});
+
+// For update command on a sharded collection, when all the shards are targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {
+ update: shardedColl.getName(),
+ updates: [
+ {q: {x: 3}, u: {$set: {a: 1}}},
+ {q: {x: 2}, u: {$set: {a: 1}}},
+ {q: {x: -3}, u: {$set: {a: 1}}}
+ ],
+ ordered: false
+ },
+ expectedRunningOps: 2
+});
+
+// For update command on a sharded collection, where a single shard is targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {update: shardedColl.getName(), updates: [{q: {x: 3}, u: {x: 3, a: 1}}]},
+ expectedRunningOps: 1
+});
+
+// For update command on an unsharded collection, where only primary shard is targetted.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ update: unshardedColl.getName(),
+ updates: [{q: {x: 1}, u: {x: 1}}, {q: {x: -1}, u: {x: -1, a: 1}}]
+ },
+ expectedRunningOps: 1
+});
+
+// For delete command on a sharded collection, where all the shards are targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {
+ delete: shardedColl.getName(),
+ deletes: [{q: {x: 3}, limit: 0}, {q: {x: -3}, limit: 0}, {q: {x: -1}, limit: 0}],
+ ordered: false
+ },
+ expectedRunningOps: 2
+});
+
+// For delete command on a sharded collection, where a single shard is targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command:
+ {delete: shardedColl.getName(), deletes: [{q: {x: 1}, limit: 0}, {q: {x: 3}, limit: 0}]},
+ expectedRunningOps: 1
+});
+
+// For delete command on an unsharded collection, where only primary shard is targetted.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ delete: unshardedColl.getName(),
+ deletes: [{q: {_id: 1}, limit: 0}, {q: {_id: 0}, limit: 0}]
+ },
+ expectedRunningOps: 1
+});
+
+// For createIndexes command on a sharded collection, where all the shards are targetted.
+runCommentParamTest({
+ coll: shardedColl,
+ command:
+ {createIndexes: shardedColl.getName(), indexes: [{name: "newField_1", key: {newField: 1}}]},
+ expectedRunningOps: 2
+});
+
+// For createIndexes command on an unsharded collection, where only primary shard is targetted.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ createIndexes: unshardedColl.getName(),
+ indexes: [{name: "newField_1", key: {newField: 1}}]
+ },
+ expectedRunningOps: 1
+});
+
+//
+// Tests for 'explain' on a sharded collection.
+//
+
+let innerComment = {name: "comment_field_explain", type: "innerComment"};
+let outerComment = {name: "comment_field_explain", type: "outerComment", uuid: UUID().toString()};
+
+// Verify that comment field gets populated on the profiler for the case where explain is within
+// aggregate.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {
+ aggregate: shardedColl.getName(),
+ pipeline: [],
+ comment: innerComment,
+ cursor: {},
+ explain: true
+ },
+ expectedRunningOps: 2,
+ commentObj: innerComment,
+ cmdName: "explain"
+});
+
+// Verify that a comment field attached to an inner command of explain get passed on to the shards
+// and is visible on currentOp and profiler entry.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {
+ explain: {aggregate: shardedColl.getName(), pipeline: [], comment: innerComment, cursor: {}}
+ },
+ expectedRunningOps: 2,
+ commentObj: innerComment
+});
+
+// Verify that when a comment field is attached to an inner command of explain and there is another
+// 'comment' field at the top level, top level comment gets the precedence.
+runCommentParamTest({
+ coll: shardedColl,
+ command: {
+ explain:
+ {aggregate: shardedColl.getName(), pipeline: [], comment: innerComment, cursor: {}},
+ comment: outerComment
+ },
+ expectedRunningOps: 2,
+ commentObj: outerComment
+});
+
+//
+// Tests for 'explain' on an unsharded collection.
+//
+
+innerComment = {
+ name: "comment_field_explain",
+ type: "innerComment_unsharded"
+};
+outerComment = {
+ name: "comment_field_explain",
+ type: "outerComment_unsharded",
+ uuid: UUID().toString()
+};
+
+// Verify that comment field gets populated on the profiler for the case where explain is within
+// aggregate.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ aggregate: unshardedColl.getName(),
+ pipeline: [],
+ comment: innerComment,
+ cursor: {},
+ explain: true
+ },
+ expectedRunningOps: 1,
+ commentObj: innerComment,
+ cmdName: "explain"
+});
+
+// Verify that a comment field attached to an inner command of explain get passed on to the shard
+// and is visible on currentOp and profiler entry.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ explain:
+ {aggregate: unshardedColl.getName(), pipeline: [], comment: innerComment, cursor: {}}
+ },
+ expectedRunningOps: 1,
+ commentObj: innerComment
+});
+
+// Verify that when a comment field is attached to an inner command of explain and there is another
+// / 'comment' field at the top level, top level comment gets the precedence.
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ explain:
+ {aggregate: unshardedColl.getName(), pipeline: [], comment: innerComment, cursor: {}},
+ comment: outerComment
+ },
+ expectedRunningOps: 1,
+ commentObj: outerComment
+});
+
+//
+// Tests for 'getMore' comment propagation on a sharded collection.
+//
+
+// Verify the 'comment' field on the aggregate command is propagated to the subsequent getMore
+// command.
+let comment = {comment: "aggregate_comment", commandName: "aggregate", uuid: UUID().toString()};
+let res = assert.commandWorked(
+ testDB.runCommand({aggregate: "coll", pipeline: [], comment: comment, cursor: {batchSize: 0}}));
+runCommentParamTest({
+ coll: shardedColl,
+ command: {getMore: res.cursor.id, collection: shardedColl.getName()},
+ expectedRunningOps: 2,
+ commentObj: comment
+});
+
+// Verify the 'comment' field on the getMore command takes precedence over the 'comment' field on
+// the originating command.
+res = assert.commandWorked(
+ testDB.runCommand({aggregate: "coll", pipeline: [], comment: comment, cursor: {batchSize: 0}}));
+comment = {
+ comment: "getmore_comment",
+ commandName: "getmore",
+ uuid: UUID().toString()
+};
+runCommentParamTest({
+ coll: shardedColl,
+ command: {getMore: res.cursor.id, collection: shardedColl.getName(), comment: comment},
+ expectedRunningOps: 2,
+ commentObj: comment
+});
+
+// Verify the 'comment' field on the find command is propagated to the subsequent getMore command.
+comment = {
+ comment: "find_comment",
+ commandName: "find",
+ uuid: UUID().toString()
+};
+res = assert.commandWorked(
+ testDB.runCommand({find: shardedColl.getName(), filter: {}, comment: comment, batchSize: 0}));
+runCommentParamTest({
+ coll: shardedColl,
+ command: {getMore: res.cursor.id, collection: shardedColl.getName()},
+ expectedRunningOps: 2,
+ commentObj: comment
+});
+
+// Verify the 'comment' field on the getMore command takes precedence over the 'comment' field on
+// the originating command.
+res = assert.commandWorked(
+ testDB.runCommand({find: shardedColl.getName(), filter: {}, comment: comment, batchSize: 0}));
+comment = {
+ comment: "getmore_comment",
+ commandName: "getmore",
+ uuid: UUID().toString()
+};
+runCommentParamTest({
+ coll: shardedColl,
+ command: {getMore: res.cursor.id, collection: shardedColl.getName(), comment: comment},
+ expectedRunningOps: 2,
+ commentObj: comment
+});
+
+//
+// Tests for 'getMore' comment propagation on an unsharded collection.
+//
+
+// Verify the 'comment' field on the aggregate command is propagated to the subsequent getMore
+// command.
+comment = {
+ comment: "unsharded_aggregate_comment",
+ commandName: "aggregate",
+ uuid: UUID().toString()
+};
+res = assert.commandWorked(testDB.runCommand(
+ {aggregate: unshardedColl.getName(), pipeline: [], comment: comment, cursor: {batchSize: 0}}));
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {getMore: res.cursor.id, collection: unshardedColl.getName(), batchSize: 1},
+ expectedRunningOps: 1,
+ commentObj: comment
+});
+
+// Verify the 'comment' field on the getMore command takes precedence over the 'comment' field on
+// the originating command.
+res = assert.commandWorked(testDB.runCommand(
+ {aggregate: unshardedColl.getName(), pipeline: [], comment: comment, cursor: {batchSize: 0}}));
+comment = {
+ comment: "unsharded_getmore_comment",
+ commandName: "getmore",
+ uuid: UUID().toString()
+};
+runCommentParamTest({
+ coll: unshardedColl,
+ command: {
+ getMore: res.cursor.id,
+ collection: unshardedColl.getName(),
+ comment: comment,
+ batchSize: 1
+ },
+ expectedRunningOps: 1,
+ commentObj: comment
+});
+
+//
+// Tests for Legacy query.
+//
+
+// Verify that $comment at top level is treated as a 'comment' field.
+const legacyComment = {
+ testName: jsTestName(),
+ commentField: "Legacy_find_comment"
+};
+runCommentParamTest({
+ coll: shardedColl,
+ expectedRunningOps: 2,
+ cmdName: "find",
+ commentObj: legacyComment,
+ parallelFunction: `const sourceDB = db.getSiblingDB(jsTestName());
+ sourceDB.getMongo().forceReadMode("legacy");
+ sourceDB.coll.find({$query: {a: 1}, $comment: ${tojson(legacyComment)}});`
+});
+
+st.stop();
+})();
diff --git a/jstests/sharding/explain_agg_read_pref.js b/jstests/sharding/explain_agg_read_pref.js
index 63e4f3362f7..57e5f639b1f 100644
--- a/jstests/sharding/explain_agg_read_pref.js
+++ b/jstests/sharding/explain_agg_read_pref.js
@@ -86,7 +86,7 @@ function confirmReadPreference(primary, secondary) {
filter: {
"ns": coll.getFullName(),
"command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
+ "command.comment": comment,
"command.$readPreference.mode": pref == 'primary' ? null : pref,
"errMsg": {"$exists": false}
}
@@ -99,12 +99,7 @@ function confirmReadPreference(primary, secondary) {
comment = name + "_explain_wrapped_agg";
assert.commandWorked(mongosDB.runCommand({
$query: {
- explain: {
- aggregate: "coll",
- pipeline: [],
- comment: comment,
- cursor: {},
- }
+ explain: {aggregate: "coll", pipeline: [], cursor: {}, comment: comment},
},
$readPreference: {mode: pref, tags: tagSets}
}));
@@ -122,7 +117,7 @@ function confirmReadPreference(primary, secondary) {
filter: {
"ns": coll.getFullName(),
"command.explain.aggregate": coll.getName(),
- "command.explain.comment": comment,
+ "command.comment": comment,
"command.$readPreference.mode": pref == 'primary' ? null : pref,
"errMsg": {"$exists": false}
}
diff --git a/jstests/sharding/mongos_query_comment.js b/jstests/sharding/mongos_query_comment.js
index a17500758ea..28fb8cb9c05 100644
--- a/jstests/sharding/mongos_query_comment.js
+++ b/jstests/sharding/mongos_query_comment.js
@@ -41,19 +41,37 @@ const profiler = shardDB.system.profile;
mongosDB.getMongo().forceReadMode("legacy");
shardDB.getMongo().forceReadMode("legacy");
+// TEST CASE: A legacy string $comment meta-operator inside $query is propagated to the shards via
+// mongos but not treated as a 'comment' field.
+assert.eq(mongosColl.find({a: 1, $comment: "TEST"}).itcount(), 1);
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {
+ op: "query",
+ ns: collNS,
+ "command.filter": {a: 1, $comment: "TEST"},
+ "command.comment": {$exists: false}
+ }
+});
+
// TEST CASE: A legacy string $comment meta-operator is propagated to the shards via mongos.
assert.eq(mongosColl.find({$query: {a: 1}, $comment: "TEST"}).itcount(), 1);
profilerHasSingleMatchingEntryOrThrow(
{profileDB: shardDB, filter: {op: "query", ns: collNS, "command.comment": "TEST"}});
-// TEST CASE: A legacy BSONObj $comment is converted to a string and propagated via mongos.
+// TEST CASE: A legacy BSONObj $comment propagated via mongos.
assert.eq(mongosColl.find({$query: {a: 1}, $comment: {c: 2, d: {e: "TEST"}}}).itcount(), 1);
profilerHasSingleMatchingEntryOrThrow({
profileDB: shardDB,
- filter: {op: "query", ns: collNS, "command.comment": "{ c: 2.0, d: { e: \"TEST\" } }"}
+ filter: {
+ op: "query",
+ ns: collNS,
+ "command.comment": {c: 2, d: {e: "TEST"}},
+ "command.filter": {a: 1}
+ }
});
-// TEST CASE: Legacy BSONObj $comment is NOT converted to a string when issued on the mongod.
+// TEST CASE: Legacy BSONObj $comment when issued on the mongod.
assert.eq(shardColl.find({$query: {a: 1}, $comment: {c: 3, d: {e: "TEST"}}}).itcount(), 1);
profilerHasSingleMatchingEntryOrThrow({
profileDB: shardDB,
@@ -66,7 +84,7 @@ profilerHasSingleMatchingEntryOrThrow({
mongosDB.getMongo().forceReadMode("commands");
shardDB.getMongo().forceReadMode("commands");
-// TEST CASE: Verify that string find.comment and non-string find.filter.$comment propagate.
+// TEST CASE: Verify that find.comment and non-string find.filter.$comment propagate.
assert.eq(mongosColl.find({a: 1, $comment: {b: "TEST"}}).comment("TEST").itcount(), 1);
profilerHasSingleMatchingEntryOrThrow({
profileDB: shardDB,
@@ -74,11 +92,14 @@ profilerHasSingleMatchingEntryOrThrow({
{op: "query", ns: collNS, "command.comment": "TEST", "command.filter.$comment": {b: "TEST"}}
});
-// TEST CASE: Verify that find command with a non-string comment parameter is rejected.
-assert.commandFailedWithCode(
- mongosDB.runCommand({"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST"}}),
- 9,
- "Non-string find command comment did not return an error.");
+// TEST CASE: Verify that find command with a non-string comment parameter gets propagated.
+assert.commandWorked(mongosDB.runCommand(
+ {"find": mongosColl.getName(), "filter": {a: 1}, "comment": {b: "TEST_BSONOBJ"}}));
+
+profilerHasSingleMatchingEntryOrThrow({
+ profileDB: shardDB,
+ filter: {op: "query", ns: collNS, "command.comment": {b: "TEST_BSONOBJ"}}
+});
st.stop();
})(); \ No newline at end of file
diff --git a/src/mongo/db/command_generic_argument.cpp b/src/mongo/db/command_generic_argument.cpp
index 15f0a7d8227..5969b9e1e0f 100644
--- a/src/mongo/db/command_generic_argument.cpp
+++ b/src/mongo/db/command_generic_argument.cpp
@@ -52,7 +52,7 @@ struct SpecialArgRecord {
// If that changes, it should be added. When you add to this list, consider whether you
// should also change the filterCommandRequestForPassthrough() function.
// clang-format off
-static constexpr std::array<SpecialArgRecord, 26> specials{{
+static constexpr std::array<SpecialArgRecord, 27> specials{{
// /-isGeneric
// | /-stripFromRequest
// | | /-stripFromReply
@@ -81,7 +81,8 @@ static constexpr std::array<SpecialArgRecord, 26> specials{{
{"$gleStats"_sd, 0, 0, 1},
{"operationTime"_sd, 0, 0, 1},
{"lastCommittedOpTime"_sd, 0, 0, 1},
- {"readOnly"_sd, 0, 0, 1}}};
+ {"readOnly"_sd, 0, 0, 1},
+ {"comment"_sd, 1, 0, 0}}};
// clang-format on
template <bool SpecialArgRecord::*pmo>
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 277fb80b3c7..78fa4d67217 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -150,6 +150,13 @@ std::unique_ptr<CommandInvocation> CmdExplain::parse(OperationContext* opCtx,
"explain command requires a nested object",
cmdObj.firstElement().type() == Object);
auto explainedObj = cmdObj.firstElement().Obj();
+
+ // Extract 'comment' field from the 'explainedObj' only if there is no top-level comment.
+ auto commentField = explainedObj["comment"];
+ if (!opCtx->getComment() && commentField) {
+ opCtx->setComment(commentField.wrap());
+ }
+
if (auto innerDb = explainedObj["$db"]) {
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Mismatched $db in explain command. Expected " << dbname
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 1694fe28f97..4800ed178d9 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -101,9 +101,8 @@ boost::intrusive_ptr<ExpressionContext> makeExpressionContext(OperationContext*
auto expCtx =
make_intrusive<ExpressionContext>(opCtx,
boost::none, // explain
- StringData{queryRequest.getComment()},
- false, // fromMongos
- false, // needsMerge
+ false, // fromMongos
+ false, // needsMerge
queryRequest.allowDiskUse(),
false, // bypassDocumentValidation
queryRequest.nss(),
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index cceb0f15fa8..345ac9f0049 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -200,6 +200,13 @@ void setUpOperationContextStateForGetMore(OperationContext* opCtx,
applyCursorReadConcern(opCtx, cursor.getReadConcernArgs());
opCtx->setWriteConcern(cursor.getWriteConcernOptions());
setUpOperationDeadline(opCtx, cursor, request, disableAwaitDataFailpointActive);
+
+ // If the originating command had a 'comment' field, we extract it and set it on opCtx. Note
+ // that if the 'getMore' command itself has a 'comment' field, we give precedence to it.
+ auto comment = cursor.getOriginatingCommandObj()["comment"];
+ if (!opCtx->getComment() && comment) {
+ opCtx->setComment(comment.wrap());
+ }
}
/**
diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp
index 774237627fa..c8ede42d18d 100644
--- a/src/mongo/db/commands/map_reduce_agg.cpp
+++ b/src/mongo/db/commands/map_reduce_agg.cpp
@@ -211,11 +211,10 @@ auto makeExpressionContext(OperationContext* opCtx, const MapReduce& parsedMr) {
// the $group stage of the translated pipeline to spill to disk.
return make_intrusive<ExpressionContext>(
opCtx,
- boost::none, // explain
- std::string{}, // comment
- false, // fromMongos
- false, // needsmerge
- true, // allowDiskUse
+ boost::none, // explain
+ false, // fromMongos
+ false, // needsmerge
+ true, // allowDiskUse
parsedMr.getBypassDocumentValidation().get_value_or(false),
parsedMr.getNamespace(),
parsedMr.getCollation().get_value_or(BSONObj()),
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 945dbfc9bdb..cf08c38548d 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -302,7 +302,7 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx,
lsid->serialize(&lsidBuilder);
}
- CurOp::get(clientOpCtx)->reportState(infoBuilder, truncateOps);
+ CurOp::get(clientOpCtx)->reportState(clientOpCtx, infoBuilder, truncateOps);
}
if (auto diagnostic = DiagnosticInfo::get(*client)) {
@@ -458,11 +458,7 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
// Gets the time spent blocked on prepare conflicts.
_debug.prepareConflictDurationMicros = durationCount<Microseconds>(
PrepareConflictTracker::get(opCtx).getPrepareConflictDuration());
-
- log(component) << _debug.report(client,
- *this,
- (lockerInfo ? &lockerInfo->stats : nullptr),
- opCtx->lockState()->getFlowControlStats());
+ log(component) << _debug.report(opCtx, (lockerInfo ? &lockerInfo->stats : nullptr));
}
// Return 'true' if this operation should also be added to the profiler.
@@ -488,6 +484,11 @@ Command::ReadWriteType CurOp::getReadWriteType() const {
namespace {
+BSONObj appendCommentField(OperationContext* opCtx, const BSONObj& cmdObj) {
+ return opCtx->getComment() && !cmdObj["comment"] ? cmdObj.addField(*opCtx->getComment())
+ : cmdObj;
+}
+
/**
* Appends {<name>: obj} to the provided builder. If obj is greater than maxSize, appends a string
* summary of obj as { <name>: { $truncated: "obj" } }. If a comment parameter is present, add it to
@@ -561,7 +562,7 @@ BSONObj CurOp::truncateAndSerializeGenericCursor(GenericCursor* cursor,
return serialized;
}
-void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) {
+void CurOp::reportState(OperationContext* opCtx, BSONObjBuilder* builder, bool truncateOps) {
if (_start) {
builder->append("secs_running", durationCount<Seconds>(elapsedTimeTotal()));
builder->append("microsecs_running", durationCount<Microseconds>(elapsedTimeTotal()));
@@ -576,7 +577,9 @@ void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) {
// is true, limit the size of each op to 1000 bytes. Otherwise, do not truncate.
const boost::optional<size_t> maxQuerySize{truncateOps, 1000};
- appendAsObjOrString("command", _opDescription, maxQuerySize, builder);
+ appendAsObjOrString(
+ "command", appendCommentField(opCtx, _opDescription), maxQuerySize, builder);
+
if (!_planSummary.empty()) {
builder->append("planSummary", _planSummary);
@@ -635,10 +638,10 @@ StringData getProtoString(int op) {
if (y) \
s << " " x ":" << (*y)
-string OpDebug::report(Client* client,
- const CurOp& curop,
- const SingleThreadedLockStats* lockStats,
- FlowControlTicketholder::CurOp flowControlStats) const {
+string OpDebug::report(OperationContext* opCtx, const SingleThreadedLockStats* lockStats) const {
+ Client* client = opCtx->getClient();
+ auto& curop = *CurOp::get(opCtx);
+ auto flowControlStats = opCtx->lockState()->getFlowControlStats();
StringBuilder s;
if (iscommand)
s << "command ";
@@ -655,7 +658,7 @@ string OpDebug::report(Client* client,
}
}
- auto query = curop.opDescription();
+ auto query = appendCommentField(opCtx, curop.opDescription());
if (!query.isEmpty()) {
s << " command: ";
if (iscommand) {
@@ -776,10 +779,11 @@ string OpDebug::report(Client* client,
if (y) \
b.appendNumber(x, (*y))
-void OpDebug::append(const CurOp& curop,
+void OpDebug::append(OperationContext* opCtx,
const SingleThreadedLockStats& lockStats,
FlowControlTicketholder::CurOp flowControlStats,
BSONObjBuilder& b) const {
+ auto& curop = *CurOp::get(opCtx);
const size_t maxElementSize = 50 * 1024;
b.append("op", logicalOpToString(logicalOp));
@@ -787,7 +791,8 @@ void OpDebug::append(const CurOp& curop,
NamespaceString nss = NamespaceString(curop.getNS());
b.append("ns", nss.ns());
- appendAsObjOrString("command", curop.opDescription(), maxElementSize, &b);
+ appendAsObjOrString(
+ "command", appendCommentField(opCtx, curop.opDescription()), maxElementSize, &b);
auto originatingCommand = curop.originatingCommand();
if (!originatingCommand.isEmpty()) {
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 0019bbb85ad..7cb2395df5f 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -143,10 +143,7 @@ public:
OpDebug() = default;
- std::string report(Client* client,
- const CurOp& curop,
- const SingleThreadedLockStats* lockStats,
- FlowControlTicketholder::CurOp flowControlStats) const;
+ std::string report(OperationContext* opCtx, const SingleThreadedLockStats* lockStats) const;
/**
* Appends information about the current operation to "builder"
@@ -154,7 +151,7 @@ public:
* @param curop reference to the CurOp that owns this OpDebug
* @param lockStats lockStats object containing locking information about the operation
*/
- void append(const CurOp& curop,
+ void append(OperationContext* opCtx,
const SingleThreadedLockStats& lockStats,
FlowControlTicketholder::CurOp flowControlStats,
BSONObjBuilder& builder) const;
@@ -553,7 +550,7 @@ public:
* If called from a thread other than the one executing the operation associated with this
* CurOp, it is necessary to lock the associated Client object before executing this method.
*/
- void reportState(BSONObjBuilder* builder, bool truncateOps = false);
+ void reportState(OperationContext* opCtx, BSONObjBuilder* builder, bool truncateOps = false);
/**
* Sets the message for this CurOp.
diff --git a/src/mongo/db/curop_test.cpp b/src/mongo/db/curop_test.cpp
index abda927f3c8..84d3db43edf 100644
--- a/src/mongo/db/curop_test.cpp
+++ b/src/mongo/db/curop_test.cpp
@@ -201,7 +201,7 @@ TEST(CurOpTest, OptionalAdditiveMetricsNotDisplayedIfUninitialized) {
opCtx.get(), NamespaceString("myDb.coll"), nullptr, command, NetworkOp::dbQuery);
BSONObjBuilder builder;
- od.append(*curop, ls, {}, builder);
+ od.append(opCtx.get(), ls, {}, builder);
auto bs = builder.done();
// Append should always include these basic fields.
@@ -213,7 +213,7 @@ TEST(CurOpTest, OptionalAdditiveMetricsNotDisplayedIfUninitialized) {
ASSERT_EQ(static_cast<size_t>(bs.nFields()), basicFields.size());
// 'reportString' should only contain basic fields.
- std::string reportString = od.report(serviceContext.getClient(), *curop, nullptr, {});
+ std::string reportString = od.report(opCtx.get(), nullptr);
std::string expectedReportString = "query myDb.coll command: { a: 3 } numYields:0 0ms";
ASSERT_EQ(reportString, expectedReportString);
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 52c1435e4a9..114a39305d5 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -93,7 +93,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
Locker::LockerInfo lockerInfo;
opCtx->lockState()->getLockerInfo(&lockerInfo, CurOp::get(opCtx)->getLockStatsBase());
CurOp::get(opCtx)->debug().append(
- *CurOp::get(opCtx), lockerInfo.stats, opCtx->lockState()->getFlowControlStats(), b);
+ opCtx, lockerInfo.stats, opCtx->lockState()->getFlowControlStats(), b);
}
b.appendDate("ts", jsTime());
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index a834f89ae1d..43553efae9b 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -373,6 +373,15 @@ public:
_inMultiDocumentTransaction = true;
}
+ void setComment(const BSONObj& comment) {
+ _comment = comment.getOwned();
+ }
+
+ boost::optional<BSONElement> getComment() {
+ // The '_comment' object, if present, will only ever have one field.
+ return _comment ? boost::optional<BSONElement>(_comment->firstElement()) : boost::none;
+ }
+
private:
IgnoreInterruptsState pushIgnoreInterrupts() override {
IgnoreInterruptsState iis{_ignoreInterrupts,
@@ -498,6 +507,10 @@ private:
bool _writesAreReplicated = true;
bool _shouldParticipateInFlowControl = true;
bool _inMultiDocumentTransaction = false;
+
+ // If populated, this is an owned singleton BSONObj whose only field, 'comment', is a copy of
+ // the 'comment' field from the input command object.
+ boost::optional<BSONObj> _comment;
};
namespace repl {
diff --git a/src/mongo/db/pipeline/aggregation_request.cpp b/src/mongo/db/pipeline/aggregation_request.cpp
index 04c806bb992..8b0aa1c4cb7 100644
--- a/src/mongo/db/pipeline/aggregation_request.cpp
+++ b/src/mongo/db/pipeline/aggregation_request.cpp
@@ -58,7 +58,6 @@ constexpr StringData AggregationRequest::kCollationName;
constexpr StringData AggregationRequest::kExplainName;
constexpr StringData AggregationRequest::kAllowDiskUseName;
constexpr StringData AggregationRequest::kHintName;
-constexpr StringData AggregationRequest::kCommentName;
constexpr StringData AggregationRequest::kExchangeName;
constexpr long long AggregationRequest::kDefaultBatchSize;
@@ -145,13 +144,6 @@ StatusWith<AggregationRequest> AggregationRequest::parseFromBSON(
<< " must be specified as a string representing an index"
<< " name, or an object representing an index's key pattern");
}
- } else if (kCommentName == fieldName) {
- if (elem.type() != BSONType::String) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << kCommentName << " must be a string, not a "
- << typeName(elem.type())};
- }
- request.setComment(elem.str());
} else if (kExplainName == fieldName) {
if (elem.type() != BSONType::Bool) {
return {ErrorCodes::TypeMismatch,
@@ -308,8 +300,6 @@ Document AggregationRequest::serializeToCommandObj() const {
_explainMode ? Value(Document()) : Value(Document{{kBatchSizeName, _batchSize}})},
// Only serialize a hint if one was specified.
{kHintName, _hint.isEmpty() ? Value() : Value(_hint)},
- // Only serialize a comment if one was specified.
- {kCommentName, _comment.empty() ? Value() : Value(_comment)},
// Only serialize readConcern if specified.
{repl::ReadConcernArgs::kReadConcernFieldName,
_readConcern.isEmpty() ? Value() : Value(_readConcern)},
diff --git a/src/mongo/db/pipeline/aggregation_request.h b/src/mongo/db/pipeline/aggregation_request.h
index 99550df8785..053797eb389 100644
--- a/src/mongo/db/pipeline/aggregation_request.h
+++ b/src/mongo/db/pipeline/aggregation_request.h
@@ -61,7 +61,6 @@ public:
static constexpr StringData kExplainName = "explain"_sd;
static constexpr StringData kAllowDiskUseName = "allowDiskUse"_sd;
static constexpr StringData kHintName = "hint"_sd;
- static constexpr StringData kCommentName = "comment"_sd;
static constexpr StringData kExchangeName = "exchange"_sd;
static constexpr StringData kRuntimeConstants = "runtimeConstants"_sd;
@@ -189,10 +188,6 @@ public:
return _hint;
}
- const std::string& getComment() const {
- return _comment;
- }
-
boost::optional<ExplainOptions::Verbosity> getExplain() const {
return _explainMode;
}
@@ -241,10 +236,6 @@ public:
_hint = hint.getOwned();
}
- void setComment(const std::string& comment) {
- _comment = comment;
- }
-
void setExplain(boost::optional<ExplainOptions::Verbosity> verbosity) {
_explainMode = verbosity;
}
@@ -309,9 +300,6 @@ private:
// {$hint: <String>}, where <String> is the index name hinted.
BSONObj _hint;
- // The comment parameter attached to this aggregation, empty if not set.
- std::string _comment;
-
BSONObj _readConcern;
// The unwrapped readPreference object, if one was given to us by the mongos command processor.
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index 10da16a2a85..55b0992ec8f 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -60,7 +60,7 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
"{pipeline: [{$match: {a: 'abc'}}], explain: false, allowDiskUse: true, fromMongos: true, "
"needsMerge: true, bypassDocumentValidation: true, collation: {locale: 'en_US'}, cursor: "
"{batchSize: 10}, hint: {a: 1}, maxTimeMS: 100, readConcern: {level: 'linearizable'}, "
- "$queryOptions: {$readPreference: 'nearest'}, comment: 'agg_comment', exchange: {policy: "
+ "$queryOptions: {$readPreference: 'nearest'}, exchange: {policy: "
"'roundrobin', consumers:NumberInt(2)}}");
auto request = unittest::assertGet(AggregationRequest::parseFromBSON(nss, inputBson));
ASSERT_FALSE(request.getExplain());
@@ -70,7 +70,6 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
ASSERT_TRUE(request.shouldBypassDocumentValidation());
ASSERT_EQ(request.getBatchSize(), 10);
ASSERT_BSONOBJ_EQ(request.getHint(), BSON("a" << 1));
- ASSERT_EQ(request.getComment(), "agg_comment");
ASSERT_BSONOBJ_EQ(request.getCollation(),
BSON("locale"
<< "en_US"));
@@ -157,7 +156,6 @@ TEST(AggregationRequestTest, ShouldNotSerializeOptionalValuesIfEquivalentToDefau
request.setBypassDocumentValidation(false);
request.setCollation(BSONObj());
request.setHint(BSONObj());
- request.setComment("");
request.setMaxTimeMS(0u);
request.setUnwrappedReadPref(BSONObj());
request.setReadConcern(BSONObj());
@@ -180,8 +178,6 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
request.setMaxTimeMS(10u);
const auto hintObj = BSON("a" << 1);
request.setHint(hintObj);
- const auto comment = std::string("agg_comment");
- request.setComment(comment);
const auto collationObj = BSON("locale"
<< "en_US");
request.setCollation(collationObj);
@@ -203,7 +199,6 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
{AggregationRequest::kCursorName,
Value(Document({{AggregationRequest::kBatchSizeName, 10}}))},
{AggregationRequest::kHintName, hintObj},
- {AggregationRequest::kCommentName, comment},
{repl::ReadConcernArgs::kReadConcernFieldName, readConcernObj},
{QueryRequest::kUnwrappedReadPrefField, readPrefObj},
{QueryRequest::cmdOptionMaxTimeMS, 10}};
@@ -311,14 +306,6 @@ TEST(AggregationRequestTest, ShouldRejectHintAsArray) {
AggregationRequest::parseFromBSON(NamespaceString("a.collection"), inputBson).getStatus());
}
-TEST(AggregationRequestTest, ShouldRejectNonStringComment) {
- NamespaceString nss("a.collection");
- const BSONObj inputBson =
- fromjson("{pipeline: [{$match: {a: 'abc'}}], cursor: {}, comment: 1}");
- ASSERT_NOT_OK(
- AggregationRequest::parseFromBSON(NamespaceString("a.collection"), inputBson).getStatus());
-}
-
TEST(AggregationRequestTest, ShouldRejectExplainIfNumber) {
NamespaceString nss("a.collection");
const BSONObj inputBson =
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index 489caa64725..e281178ab3e 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -53,7 +53,6 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
boost::optional<UUID> collUUID)
: ExpressionContext(opCtx,
request.getExplain(),
- request.getComment(),
request.isFromMongos(),
request.needsMerge(),
request.shouldAllowDiskUse(),
@@ -69,7 +68,6 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx,
ExpressionContext::ExpressionContext(
OperationContext* opCtx,
const boost::optional<ExplainOptions::Verbosity>& explain,
- StringData comment,
bool fromMongos,
bool needsMerge,
bool allowDiskUse,
@@ -82,7 +80,6 @@ ExpressionContext::ExpressionContext(
StringMap<ExpressionContext::ResolvedNamespace> resolvedNamespaces,
boost::optional<UUID> collUUID)
: explain(explain),
- comment(comment),
fromMongos(fromMongos),
needsMerge(needsMerge),
allowDiskUse(allowDiskUse),
@@ -192,7 +189,6 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
auto expCtx = make_intrusive<ExpressionContext>(opCtx,
explain,
- comment,
fromMongos,
needsMerge,
allowDiskUse,
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 43e9b5a7e37..dbac88c28e1 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -116,7 +116,6 @@ public:
*/
ExpressionContext(OperationContext* opCtx,
const boost::optional<ExplainOptions::Verbosity>& explain,
- StringData comment,
bool fromMongos,
bool needsmerge,
bool allowDiskUse,
@@ -235,9 +234,6 @@ public:
// The explain verbosity requested by the user, or boost::none if no explain was requested.
boost::optional<ExplainOptions::Verbosity> explain;
- // The comment provided by the user, or the empty string if no comment was provided.
- std::string comment;
-
bool fromMongos = false;
bool needsMerge = false;
bool inMongos = false;
diff --git a/src/mongo/db/pipeline/expression_context_for_test.h b/src/mongo/db/pipeline/expression_context_for_test.h
index 8a204fa7039..b9f891b1603 100644
--- a/src/mongo/db/pipeline/expression_context_for_test.h
+++ b/src/mongo/db/pipeline/expression_context_for_test.h
@@ -54,7 +54,6 @@ public:
ExpressionContextForTest(NamespaceString nss)
: ExpressionContext(nullptr, // opCtx, nullptr while base class is constructed.
boost::none, // explain
- "", // comment
false, // fromMongos,
false, // needsMerge,
false, // allowDiskUse,
diff --git a/src/mongo/db/pipeline/mongos_process_interface.cpp b/src/mongo/db/pipeline/mongos_process_interface.cpp
index cec0279587e..742583bc94b 100644
--- a/src/mongo/db/pipeline/mongos_process_interface.cpp
+++ b/src/mongo/db/pipeline/mongos_process_interface.cpp
@@ -163,7 +163,6 @@ boost::optional<Document> MongoSInterface::lookupSingleDocument(
cmdBuilder.append("find", nss.coll());
}
cmdBuilder.append("filter", filterObj);
- cmdBuilder.append("comment", expCtx->comment);
if (readConcern) {
cmdBuilder.append(repl::ReadConcernArgs::kReadConcernFieldName, *readConcern);
}
diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp
index b7ea431f678..ca1bde54ee0 100644
--- a/src/mongo/db/query/count_command_test.cpp
+++ b/src/mongo/db/query/count_command_test.cpp
@@ -246,28 +246,6 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryOptions) {
SimpleBSONObjComparator::kInstance.makeEqualTo()));
}
-TEST(CountCommandTest, ConvertToAggregationWithComment) {
- auto countCmd = CountCommand::parse(ctxt,
- BSON("count"
- << "TestColl"
- << "$db"
- << "TestDB"
- << "comment"
- << "aComment"));
- auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns));
-
- auto ar = uassertStatusOK(AggregationRequest::parseFromBSON(testns, agg));
- ASSERT_EQ(ar.getComment(), "aComment");
-
- std::vector<BSONObj> expectedPipeline{BSON("$count"
- << "count")};
- ASSERT(std::equal(expectedPipeline.begin(),
- expectedPipeline.end(),
- ar.getPipeline().begin(),
- ar.getPipeline().end(),
- SimpleBSONObjComparator::kInstance.makeEqualTo()));
-}
-
TEST(CountCommandTest, ConvertToAggregationWithReadConcern) {
auto countCmd = CountCommand::parse(ctxt,
BSON("count"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 2f351c930d3..8cc25ffea1c 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -580,6 +580,12 @@ std::string runQuery(OperationContext* opCtx,
// Set CurOp information.
const auto upconvertedQuery = upconvertQueryEntry(q.query, nss, q.ntoreturn, q.ntoskip);
+
+ // Extract the 'comment' parameter from the upconverted query, if it exists.
+ if (auto commentField = upconvertedQuery["comment"]) {
+ opCtx->setComment(commentField.wrap());
+ }
+
beginQueryOp(opCtx, nss, upconvertedQuery, q.ntoreturn, q.ntoskip);
// Parse the qm into a CanonicalQuery.
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 226754acba4..5cfa30ebdf9 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -47,7 +47,6 @@ namespace mongo {
const char ParsedDistinct::kKeyField[] = "key";
const char ParsedDistinct::kQueryField[] = "query";
const char ParsedDistinct::kCollationField[] = "collation";
-const char ParsedDistinct::kCommentField[] = "comment";
namespace {
@@ -241,10 +240,6 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
aggregationBuilder.append(QueryRequest::kUnwrappedReadPrefField, qr.getUnwrappedReadPref());
}
- if (!qr.getComment().empty()) {
- aggregationBuilder.append(kCommentField, qr.getComment());
- }
-
// Specify the 'cursor' option so that aggregation uses the cursor interface.
aggregationBuilder.append("cursor", BSONObj());
@@ -284,10 +279,6 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx,
qr->setCollation(collation.get());
}
- if (auto comment = parsedDistinct.getComment()) {
- qr->setComment(comment.get().toString());
- }
-
// The IDL parser above does not handle generic command arguments. Since the underlying query
// request requires the following options, manually parse and verify them here.
if (auto readConcernElt = cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) {
diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp
index dd6e501ed24..0f0efd7a18a 100644
--- a/src/mongo/db/query/parsed_distinct_test.cpp
+++ b/src/mongo/db/query/parsed_distinct_test.cpp
@@ -67,7 +67,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) {
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
ASSERT(ar.getValue().getReadConcern().isEmpty());
ASSERT(ar.getValue().getUnwrappedReadPref().isEmpty());
- ASSERT(ar.getValue().getComment().empty());
ASSERT_EQUALS(ar.getValue().getMaxTimeMS(), 0u);
std::vector<BSONObj> expectedPipeline{
@@ -107,7 +106,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationDottedPathNoQuery) {
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
ASSERT(ar.getValue().getReadConcern().isEmpty());
ASSERT(ar.getValue().getUnwrappedReadPref().isEmpty());
- ASSERT(ar.getValue().getComment().empty());
ASSERT_EQUALS(ar.getValue().getMaxTimeMS(), 0u);
std::vector<BSONObj> expectedPipeline{
@@ -155,8 +153,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
<< "$queryOptions"
<< BSON("readPreference"
<< "secondary")
- << "comment"
- << "aComment"
<< "maxTimeMS" << 100 << "$db"
<< "testdb"),
ExtensionsCallbackNoop(),
@@ -180,7 +176,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) {
ASSERT_BSONOBJ_EQ(ar.getValue().getUnwrappedReadPref(),
BSON("readPreference"
<< "secondary"));
- ASSERT_EQUALS(ar.getValue().getComment(), "aComment");
ASSERT_EQUALS(ar.getValue().getMaxTimeMS(), 100u);
std::vector<BSONObj> expectedPipeline{
@@ -221,7 +216,6 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) {
ASSERT_BSONOBJ_EQ(ar.getValue().getCollation(), BSONObj());
ASSERT(ar.getValue().getReadConcern().isEmpty());
ASSERT(ar.getValue().getUnwrappedReadPref().isEmpty());
- ASSERT(ar.getValue().getComment().empty());
ASSERT_EQUALS(ar.getValue().getMaxTimeMS(), 0u);
std::vector<BSONObj> expectedPipeline{
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index a4867aca42f..d8ab73b8116 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -90,7 +90,6 @@ const char kLimitField[] = "limit";
const char kBatchSizeField[] = "batchSize";
const char kNToReturnField[] = "ntoreturn";
const char kSingleBatchField[] = "singleBatch";
-const char kCommentField[] = "comment";
const char kMaxField[] = "max";
const char kMinField[] = "min";
const char kReturnKeyField[] = "returnKey";
@@ -274,13 +273,6 @@ StatusWith<unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(unique_p
}
qr->_allowDiskUse = el.boolean();
- } else if (fieldName == kCommentField) {
- Status status = checkFieldType(el, String);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_comment = el.str();
} else if (fieldName == cmdOptionMaxTimeMS) {
StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
if (!maxTimeMS.isOK()) {
@@ -517,10 +509,6 @@ void QueryRequest::asFindCommandInternal(BSONObjBuilder* cmdBuilder) const {
cmdBuilder->append(kSingleBatchField, true);
}
- if (!_comment.empty()) {
- cmdBuilder->append(kCommentField, _comment);
- }
-
if (_maxTimeMS > 0) {
cmdBuilder->append(cmdOptionMaxTimeMS, _maxTimeMS);
}
@@ -944,14 +932,6 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
return maxTimeMS.getStatus();
}
_maxTimeMS = maxTimeMS.getValue();
- } else if (name == "comment") {
- // Legacy $comment can be any BSON element. Convert to string if it isn't
- // already.
- if (e.type() == BSONType::String) {
- _comment = e.str();
- } else {
- _comment = e.toString(false);
- }
}
}
}
@@ -1126,9 +1106,6 @@ StatusWith<BSONObj> QueryRequest::asAggregationCommand() const {
if (!_hint.isEmpty()) {
aggregationBuilder.append("hint", _hint);
}
- if (!_comment.empty()) {
- aggregationBuilder.append("comment", _comment);
- }
if (!_readConcern.isEmpty()) {
aggregationBuilder.append("readConcern", _readConcern);
}
diff --git a/src/mongo/db/query/query_request.h b/src/mongo/db/query/query_request.h
index 17e915ace5c..a204e45c39b 100644
--- a/src/mongo/db/query/query_request.h
+++ b/src/mongo/db/query/query_request.h
@@ -259,14 +259,6 @@ public:
_explain = explain;
}
- const std::string& getComment() const {
- return _comment;
- }
-
- void setComment(const std::string& comment) {
- _comment = comment;
- }
-
const BSONObj& getUnwrappedReadPref() const {
return _unwrappedReadPref;
}
@@ -518,8 +510,6 @@ private:
bool _explain = false;
- std::string _comment;
-
// A user-specified maxTimeMS limit, or a value of '0' if not specified.
int _maxTimeMS = 0;
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index e4f9989b44f..c2624e65ceb 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -430,7 +430,7 @@ TEST(QueryRequestTest, ParseFromCommandReadOnceDefaultsToFalse) {
ASSERT(!qr->isReadOnce());
}
-TEST(QueryRequestTest, ParseFromCommandCommentWithValidMinMax) {
+TEST(QueryRequestTest, ParseFromCommandValidMinMax) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"comment: 'the comment',"
@@ -441,7 +441,6 @@ TEST(QueryRequestTest, ParseFromCommandCommentWithValidMinMax) {
unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_EQUALS("the comment", qr->getComment());
BSONObj expectedMin = BSON("a" << 1);
ASSERT_EQUALS(0, expectedMin.woCompare(qr->getMin()));
BSONObj expectedMax = BSON("a" << 2);
@@ -607,17 +606,6 @@ TEST(QueryRequestTest, ParseFromCommandSingleBatchWrongType) {
ASSERT_NOT_OK(result.getStatus());
}
-TEST(QueryRequestTest, ParseFromCommandCommentWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "comment: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
TEST(QueryRequestTest, ParseFromCommandUnwrappedReadPrefWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
@@ -1272,17 +1260,6 @@ TEST(QueryRequestTest, ConvertToAggregationWithReturnKeyFails) {
ASSERT_NOT_OK(qr.asAggregationCommand());
}
-TEST(QueryRequestTest, ConvertToAggregationWithCommentSucceeds) {
- QueryRequest qr(testns);
- qr.setComment("test");
- const auto aggCmd = qr.asAggregationCommand();
- ASSERT_OK(aggCmd);
-
- auto ar = AggregationRequest::parseFromBSON(testns, aggCmd.getValue());
- ASSERT_OK(ar.getStatus());
- ASSERT_EQ(qr.getComment(), ar.getValue().getComment());
-}
-
TEST(QueryRequestTest, ConvertToAggregationWithShowRecordIdFails) {
QueryRequest qr(testns);
qr.setShowRecordId(true);
@@ -1470,31 +1447,6 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) {
ASSERT_FALSE(findCmd[QueryRequest::kAllowDiskUseField]);
}
-TEST(QueryRequestTest, ParseFromLegacyObjMetaOpComment) {
- BSONObj queryObj = fromjson(
- "{$query: {a: 1},"
- "$comment: {b: 2, c: {d: 'ParseFromLegacyObjMetaOpComment'}}}");
- const NamespaceString nss("test.testns");
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::fromLegacyQuery(nss, queryObj, BSONObj(), 0, 0, 0)));
-
- // Ensure that legacy comment meta-operator is parsed to a string comment
- ASSERT_EQ(qr->getComment(), "{ b: 2, c: { d: \"ParseFromLegacyObjMetaOpComment\" } }");
- ASSERT_BSONOBJ_EQ(qr->getFilter(), fromjson("{a: 1}"));
-}
-
-TEST(QueryRequestTest, ParseFromLegacyStringMetaOpComment) {
- BSONObj queryObj = fromjson(
- "{$query: {a: 1},"
- "$comment: 'ParseFromLegacyStringMetaOpComment'}");
- const NamespaceString nss("test.testns");
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::fromLegacyQuery(nss, queryObj, BSONObj(), 0, 0, 0)));
-
- ASSERT_EQ(qr->getComment(), "ParseFromLegacyStringMetaOpComment");
- ASSERT_BSONOBJ_EQ(qr->getFilter(), fromjson("{a: 1}"));
-}
-
TEST(QueryRequestTest, ParseFromLegacyQuery) {
const auto kSkip = 1;
const auto kNToReturn = 2;
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index 29a2bb973f5..1f38b66d6d9 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -99,9 +99,9 @@ namespace mongo {
MONGO_FAIL_POINT_DEFINE(rsStopGetMore);
MONGO_FAIL_POINT_DEFINE(respondWithNotPrimaryInCommandDispatch);
MONGO_FAIL_POINT_DEFINE(skipCheckingForNotMasterInCommandDispatch);
-MONGO_FAIL_POINT_DEFINE(waitAfterReadCommandFinishesExecution);
MONGO_FAIL_POINT_DEFINE(sleepMillisAfterCommandExecutionBegins);
MONGO_FAIL_POINT_DEFINE(waitAfterNewStatementBlocksBehindPrepare);
+MONGO_FAIL_POINT_DEFINE(waitAfterCommandFinishesExecution);
// Tracks the number of times a legacy unacknowledged write failed due to
// not master error resulted in network disconnection.
@@ -627,19 +627,24 @@ bool runCommandImpl(OperationContext* opCtx,
}
}
- // This failpoint should affect both getMores and commands which are read-only and thus don't
- // support writeConcern.
- if (!shouldWaitForWriteConcern || command->getLogicalOp() == LogicalOp::opGetMore) {
- waitAfterReadCommandFinishesExecution.execute([&](const BSONObj& data) {
- auto db = data["db"].str();
- if (db.empty() || request.getDatabase() == db) {
- CurOpFailpointHelpers::waitWhileFailPointEnabled(
- &waitAfterReadCommandFinishesExecution,
- opCtx,
- "waitAfterReadCommandFinishesExecution");
- }
- });
- }
+ // This fail point blocks all commands which are running on the specified namespace, or which
+ // are present in the given list of commands.If no namespace or command list are provided,then
+ // the failpoint will block all commands.
+ waitAfterCommandFinishesExecution.execute([&](const BSONObj& data) {
+ auto ns = data["ns"].valueStringDataSafe();
+ auto commands =
+ data.hasField("commands") ? data["commands"].Array() : std::vector<BSONElement>();
+
+ // If 'ns' or 'commands' is not set, block for all the namespaces or commands respectively.
+ if ((ns.empty() || invocation->ns().ns() == ns) &&
+ (commands.empty() ||
+ std::any_of(commands.begin(), commands.end(), [&request](auto& element) {
+ return element.valueStringDataSafe() == request.getCommandName();
+ }))) {
+ CurOpFailpointHelpers::waitWhileFailPointEnabled(
+ &waitAfterCommandFinishesExecution, opCtx, "waitAfterCommandFinishesExecution");
+ }
+ });
behaviors.waitForLinearizableReadConcern(opCtx);
@@ -754,6 +759,8 @@ void execCommandDatabase(OperationContext* opCtx,
allowImplicitCollectionCreationField = element;
} else if (fieldName == CommandHelpers::kHelpFieldName) {
helpField = element;
+ } else if (fieldName == "comment") {
+ opCtx->setComment(element.wrap());
} else if (fieldName == QueryRequest::queryOptionMaxTimeMS) {
uasserted(ErrorCodes::InvalidOptions,
"no such command option $maxTimeMs; use maxTimeMS instead");
diff --git a/src/mongo/db/views/resolved_view.cpp b/src/mongo/db/views/resolved_view.cpp
index c5780407243..a6ac9d6b4d9 100644
--- a/src/mongo/db/views/resolved_view.cpp
+++ b/src/mongo/db/views/resolved_view.cpp
@@ -106,7 +106,6 @@ AggregationRequest ResolvedView::asExpandedViewAggregation(
}
expandedRequest.setHint(request.getHint());
- expandedRequest.setComment(request.getComment());
expandedRequest.setMaxTimeMS(request.getMaxTimeMS());
expandedRequest.setReadConcern(request.getReadConcern());
expandedRequest.setUnwrappedReadPref(request.getUnwrappedReadPref());
diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp
index a4b5111419a..0273c5319c5 100644
--- a/src/mongo/db/views/resolved_view_test.cpp
+++ b/src/mongo/db/views/resolved_view_test.cpp
@@ -173,15 +173,6 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesDefaultCollationOfView) {
<< "fr_CA"));
}
-TEST(ResolvedViewTest, ExpandingAggRequestPreservesComment) {
- const ResolvedView resolvedView{backingNss, emptyPipeline, kSimpleCollation};
- AggregationRequest aggRequest(viewNss, {});
- aggRequest.setComment("agg_comment");
-
- auto result = resolvedView.asExpandedViewAggregation(aggRequest);
- ASSERT_EQ(result.getComment(), "agg_comment");
-}
-
TEST(ResolvedViewTest, FromBSONFailsIfMissingResolvedView) {
BSONObj badCmdResponse = BSON("x" << 1);
ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40248);
diff --git a/src/mongo/executor/remote_command_request.cpp b/src/mongo/executor/remote_command_request.cpp
index 38b745bebee..d0fa3d6e081 100644
--- a/src/mongo/executor/remote_command_request.cpp
+++ b/src/mongo/executor/remote_command_request.cpp
@@ -59,7 +59,12 @@ RemoteCommandRequestBase::RemoteCommandRequestBase(RequestId requestId,
const BSONObj& metadataObj,
OperationContext* opCtx,
Milliseconds timeoutMillis)
- : id(requestId), dbname(theDbName), metadata(metadataObj), cmdObj(theCmdObj), opCtx(opCtx) {
+ : id(requestId), dbname(theDbName), metadata(metadataObj), opCtx(opCtx) {
+ // If there is a comment associated with the current operation, append it to the command that we
+ // are about to dispatch to the shards.
+ cmdObj = opCtx && opCtx->getComment() && !theCmdObj["comment"]
+ ? theCmdObj.addField(*opCtx->getComment())
+ : theCmdObj;
timeout = opCtx ? std::min<Milliseconds>(opCtx->getRemainingMaxTimeMillis(), timeoutMillis)
: timeoutMillis;
}
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 09882eeb642..3736947d113 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -176,6 +176,13 @@ std::unique_ptr<CommandInvocation> ClusterExplainCmd::parse(OperationContext* op
// arguments into the inner command since it is what is passed to the virtual
// CommandInvocation::explain() method.
const BSONObj explainedObj = makeExplainedObj(cmdObj, dbName);
+
+ // Extract 'comment' field from the 'explainedObj' only if there is no top-level comment.
+ auto commentField = explainedObj["comment"];
+ if (!opCtx->getComment() && commentField) {
+ opCtx->setComment(commentField.wrap());
+ }
+
const std::string cmdName = explainedObj.firstElementFieldName();
auto explainedCommand = CommandHelpers::findCommand(cmdName);
uassert(ErrorCodes::CommandNotFound,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 5949128fe88..802a1d69ab2 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -365,6 +365,11 @@ void runCommand(OperationContext* opCtx,
}
opCtx->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ // If the command includes a 'comment' field, set it on the current OpCtx.
+ if (auto commentField = request.body["comment"]) {
+ opCtx->setComment(commentField.wrap());
+ }
+
auto invocation = command->parse(opCtx, request);
// Set the logical optype, command object and namespace as soon as we identify the command. If
@@ -634,6 +639,12 @@ DbResponse Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss
Client* const client = opCtx->getClient();
AuthorizationSession* const authSession = AuthorizationSession::get(client);
+ // The legacy '$comment' operator gets converted to 'comment' by upconvertQueryEntry(). We
+ // set the comment in 'opCtx' so that it can be passed on to the respective shards.
+ if (auto commentField = upconvertedQuery["comment"]) {
+ opCtx->setComment(commentField.wrap());
+ }
+
Status status = authSession->checkAuthForFind(nss, false);
audit::logQueryAuthzCheck(client, nss, q.query, status.code());
uassertStatusOK(status);
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index e697d32f150..69fe465d4cb 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -371,6 +371,12 @@ Status setUpOperationContextStateForGetMore(OperationContext* opCtx,
ReadPreferenceSetting::get(opCtx) = *readPref;
}
+ // If the originating command had a 'comment' field, we extract it and set it on opCtx. Note
+ // that if the 'getMore' command itself has a 'comment' field, we give precedence to it.
+ auto comment = cursor->getOriginatingCommand()["comment"];
+ if (!opCtx->getComment() && comment) {
+ opCtx->setComment(comment.wrap());
+ }
if (cursor->isTailableAndAwaitData()) {
// For tailable + awaitData cursors, the request may have indicated a maximum amount of time
// to wait for new data. If not, default it to 1 second. We track the deadline instead via