summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Percy <david.percy@mongodb.com>2020-08-26 21:03:12 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-04 04:15:57 +0000
commita08a00b6c55c6bd43df2dd76893a6917f46cebdc (patch)
tree1ba5f373c551794b586609d2f6ab014f9a56684c
parent74756b18f43407e32496b600f2466457c59cb26a (diff)
downloadmongo-a08a00b6c55c6bd43df2dd76893a6917f46cebdc.tar.gz
SERVER-49102 Custom filter for logging/profiling
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml2
-rw-r--r--jstests/auth/lib/commands_lib.js17
-rw-r--r--jstests/concurrency/fsm_workloads/profile_command.js90
-rw-r--r--jstests/noPassthrough/profile_filter.js170
-rw-r--r--src/mongo/db/SConscript12
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/collection_catalog.cpp24
-rw-r--r--src/mongo/db/catalog/collection_catalog.h61
-rw-r--r--src/mongo/db/catalog/collection_catalog_test.cpp15
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp2
-rw-r--r--src/mongo/db/commands/SConscript3
-rw-r--r--src/mongo/db/commands/count_cmd.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp57
-rw-r--r--src/mongo/db/commands/distinct.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp4
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp2
-rw-r--r--src/mongo/db/commands/profile.idl15
-rw-r--r--src/mongo/db/commands/profile_common.cpp66
-rw-r--r--src/mongo/db/commands/profile_common.h26
-rw-r--r--src/mongo/db/curop.cpp439
-rw-r--r--src/mongo/db/curop.h30
-rw-r--r--src/mongo/db/introspect.cpp32
-rw-r--r--src/mongo/db/matcher/matcher.h4
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp8
-rw-r--r--src/mongo/db/profile_filter.cpp44
-rw-r--r--src/mongo/db/profile_filter.h62
-rw-r--r--src/mongo/db/profile_filter_impl.cpp95
-rw-r--r--src/mongo/db/profile_filter_impl.h52
-rw-r--r--src/mongo/db/query/find.cpp4
-rw-r--r--src/mongo/db/server_options.h3
-rw-r--r--src/mongo/db/server_options_helpers.cpp13
-rw-r--r--src/mongo/db/server_options_nongeneral.idl4
-rw-r--r--src/mongo/db/service_entry_point_common.cpp6
-rw-r--r--src/mongo/s/commands/cluster_profile_cmd.cpp42
51 files changed, 1259 insertions, 186 deletions
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
index 2cc8a214704..1296e2cf6bb 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
@@ -31,6 +31,9 @@ selector:
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
# from.
- uses_curop_agg_stage
+ # The system.profile collection is not replicated, so these tests are incompatible with
+ # readPreference secondary.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
index 473184635f7..935603cb138 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
@@ -31,6 +31,9 @@ selector:
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
# from.
- uses_curop_agg_stage
+ # The system.profile collection is not replicated, so these tests are incompatible with
+ # readPreference secondary.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
index 263b4904675..0f7946d8670 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
@@ -105,6 +105,8 @@ selector:
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
# from.
- uses_curop_agg_stage
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
index 0b3119c9c06..9bb04c112c5 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
@@ -114,6 +114,8 @@ selector:
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
# from.
- uses_curop_agg_stage
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
index f7ecf2b979d..a7cd5221b3d 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
@@ -6,6 +6,8 @@ selector:
exclude_with_any_tags:
- requires_replication
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
index 42861d3f46f..b6dd60cf5bc 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
@@ -186,6 +186,8 @@ selector:
- assumes_balancer_off
- requires_replication
- requires_non_retryable_writes
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
index 13efd8a6bcc..f2f0e5dc637 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
@@ -145,6 +145,8 @@ selector:
# Tests which expect commands to fail and catch the error can cause transactions to abort and
# retry indefinitely.
- catches_command_failures
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
index fc0042547e8..33c364f97dd 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
@@ -148,6 +148,8 @@ selector:
# Tests which expect commands to fail and catch the error can cause transactions to abort and
# retry indefinitely.
- catches_command_failures
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
index 15b7754df79..3ba770e70a6 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
@@ -148,6 +148,8 @@ selector:
# Tests which expect commands to fail and catch the error can cause transactions to abort and
# retry indefinitely.
- catches_command_failures
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
index 503ec1ca39d..f1a689fa316 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
@@ -217,6 +217,8 @@ selector:
# This tag corresponds to operations which are run long enough to exceed the stepdown interval
# when grouped into transactions.
- operations_longer_than_stepdown_interval_in_txns
+ # mongos has no system.profile collection.
+ - requires_profiling
# Note that "requires_non_retryable_writes" does not need to be blacklisted because some writes
# that are not individually retryable can be retried if they are part of a transaction.
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
index 0f2cf6603b8..dd6c821c646 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
@@ -217,6 +217,8 @@ selector:
# This tag corresponds to operations which are run long enough to exceed the stepdown interval
# when grouped into transactions.
- operations_longer_than_stepdown_interval_in_txns
+ # mongos has no system.profile collection.
+ - requires_profiling
# Note that "requires_non_retryable_writes" does not need to be blacklisted because some writes
# that are not individually retryable can be retried if they are part of a transaction.
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
index 9cecbff459c..874de8baaee 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
@@ -148,6 +148,8 @@ selector:
# Tests which expect commands to fail and catch the error can cause transactions to abort and
# retry indefinitely.
- catches_command_failures
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
index f833c4dac5e..d916b3ce65e 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
@@ -198,6 +198,8 @@ selector:
# This tag corresponds to operations which are run long enough to exceed the stepdown interval
# when grouped into transactions.
- operations_longer_than_stepdown_interval_in_txns
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
index 546481fb7d1..a4077faed3f 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
@@ -111,6 +111,8 @@ selector:
exclude_with_any_tags:
- requires_replication
- assumes_balancer_on
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
index e9b9485f42d..2a53ce35f21 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
@@ -116,6 +116,8 @@ selector:
exclude_with_any_tags:
- assumes_balancer_off
- requires_replication
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
index 38c417bcfe7..76fb5a6f270 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
@@ -186,6 +186,8 @@ selector:
- assumes_balancer_off
- requires_replication
- requires_non_retryable_writes
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
index 02f0a379920..ce3749db4bd 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
@@ -175,6 +175,8 @@ selector:
- assumes_balancer_on
# Curop requires readConcern local.
- uses_curop_agg_stage
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
index 40eb7f6b5a0..a839376f964 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
@@ -183,6 +183,8 @@ selector:
- requires_non_retryable_writes
# Curop requires readConcern local.
- uses_curop_agg_stage
+ # mongos has no system.profile collection.
+ - requires_profiling
executor:
archive:
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 46bcf102d26..58ecb710651 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -4832,7 +4832,7 @@ var authCommandsLib = {
},
{
runOnDb: firstDbName,
- roles: {},
+ roles: roles_dbAdmin,
}
]
},
@@ -4858,7 +4858,20 @@ var authCommandsLib = {
},
{
runOnDb: firstDbName,
- roles: {},
+ roles: {
+ backup: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{
+ resource: {db: firstDbName, collection: "system.profile"},
+ actions: ["find"]
+ }]
}
]
},
diff --git a/jstests/concurrency/fsm_workloads/profile_command.js b/jstests/concurrency/fsm_workloads/profile_command.js
new file mode 100644
index 00000000000..e76e6b70e50
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/profile_command.js
@@ -0,0 +1,90 @@
+'use strict';
+
+/**
+ * profile_command.js
+ *
+ * Sets the profile level and filter to different values, and asserts that they take effect
+ * simultaneously:
+ * 1. The profile command never returns an inconsistent combination of {level, filter}.
+ * 2. We never log or profile based on an inconsistent combination of {level, filter}.
+ *
+ * @tags: [requires_profiling, requires_fcv_47]
+ */
+
+var $config = (function() {
+ const data = {
+ numDocs: 1000,
+ checkProfileResult: function(result) {
+ // After init(), these are the only profile settings we set.
+ // For example, we never set { level: 1, filter {millis: {$gt: 1}} }.
+ assert.contains({level: result.was, filter: result.filter}, [
+ {level: 0, filter: {$expr: {$const: false}}},
+ {level: 0, filter: {nreturned: {$gt: 0}}},
+ {level: 1, filter: {nModified: {$gt: 0}}},
+ ]);
+ },
+ };
+ const states = {
+ init: function(db, collName) {},
+
+ logReads: function(db, collName) {
+ const result = db.setProfilingLevel(0, {filter: {nreturned: {$gt: 0}}});
+ this.checkProfileResult(result);
+ },
+ profileUpdates: function(db, collName) {
+ const result = db.setProfilingLevel(1, {filter: {nModified: {$gt: 0}}});
+ this.checkProfileResult(result);
+ },
+
+ readSomething: function(db, collName) {
+ assert.eq(this.numDocs, db[collName].count());
+ const numDocs = Random.randInt(this.numDocs);
+ const n = db[collName].find({_id: {$lt: numDocs}}).comment('readSomething').itcount();
+ assert.eq(n, numDocs);
+ },
+ updateSomething: function(db, collName) {
+ assert.eq(this.numDocs, db[collName].count());
+ const numDocs = Random.randInt(this.numDocs);
+ const resp = assert.commandWorked(db[collName].updateMany(
+ {_id: {$lt: numDocs}}, {$inc: {i: 1}}, {comment: 'updateSomething'}));
+ assert.eq(numDocs, resp.matchedCount);
+ assert.eq(numDocs, resp.modifiedCount);
+ },
+
+ };
+ const setup = function(db, collName) {
+ Random.setRandomSeed();
+ db.setProfilingLevel(0, {filter: {$expr: false}});
+ const docs = Array.from({length: this.numDocs}, (_, i) => ({_id: i}));
+ assert.commandWorked(db[collName].insert(docs));
+ };
+ const teardown = function(db, collName) {
+ db.setProfilingLevel(0, {filter: 'unset'});
+ // We never profiled a read.
+ assert.eq(0, db.system.profile.find({comment: 'readSomething'}).itcount());
+ };
+
+ // Just let every state go to every other state with equal probability.
+ const weights = {
+ logReads: 0.25,
+ profileUpdates: 0.25,
+ readSomething: 0.25,
+ updateSomething: 0.25,
+ };
+ const transitions = {
+ init: weights,
+ logReads: weights,
+ profileUpdates: weights,
+ readSomething: weights,
+ updateSomething: weights,
+ };
+ return {
+ data,
+ threadCount: 4,
+ iterations: 100,
+ states,
+ transitions,
+ setup,
+ teardown,
+ };
+})();
diff --git a/jstests/noPassthrough/profile_filter.js b/jstests/noPassthrough/profile_filter.js
new file mode 100644
index 00000000000..a3eb47e78e9
--- /dev/null
+++ b/jstests/noPassthrough/profile_filter.js
@@ -0,0 +1,170 @@
+/*
+ * Test the usage and behavior of the 'filter' profiler option.
+ */
+(function() {
+
+load("jstests/libs/log.js"); // For findMatchingLogLine.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+function runTest(conn) {
+ const db = conn.getDB("test");
+ const coll = db.profile_filter;
+ coll.drop();
+ // mongoS supports slow-query log lines but not profiling. So if we are talking to mongoS, we
+ // will avoid enabling profiling, or making assertions about profiling.
+ const isMongos = FixtureHelpers.isMongos(db);
+
+ // When you set a filter it appears in getProfilingLevel.
+ const exampleFilter = {millis: {$gt: 100, $lt: 300}};
+ const oldResponse = assert.commandWorked(db.setProfilingLevel(0, {filter: exampleFilter}));
+ const response = assert.commandWorked(db.setProfilingLevel(0));
+ assert.eq(response.filter, {$and: [{millis: {$gt: 100}}, {millis: {$lt: 300}}]});
+ // But, slowms / sampleRate still appears in getProfilingLevel.
+ assert.eq(response.slowms, oldResponse.slowms);
+ assert.eq(response.sampleRate, oldResponse.sampleRate);
+ // Since this may be confusing, a note also appears.
+ assert.eq(
+ oldResponse.note,
+ 'When a filter expression is set, slowms and sampleRate are not used for profiling and slow-query log lines.');
+ assert.eq(
+ response.note,
+ 'When a filter expression is set, slowms and sampleRate are not used for profiling and slow-query log lines.');
+
+ // When you unset the filter it no longer appears in the response.
+ assert.commandWorked(db.setProfilingLevel(0, {filter: 'unset'}));
+ const response2 = assert.commandWorked(db.setProfilingLevel(0));
+ assert(!response2.hasOwnProperty('filter'), response2);
+ // The note is also gone.
+ assert(!response2.hasOwnProperty('note'), response2);
+
+ // Setting the filter to null is an error: we don't want people to assume that {filter: null}
+ // unsets the filter, and we don't want people to assume {filter: null} is equivalent to not
+ // specifying a new filter.
+ assert.throws(() => db.setProfilingLevel(level, {filter: null}),
+ [],
+ "Expected an object, or the string 'unset'.");
+
+ // While the filter is set, slow-query log lines ignore slowms and sampleRate.
+ // For example:
+ // 1. An always-true filter of {} will always log.
+ // This shows we don't AND all the settings together.
+ assert.commandWorked(
+ db.setProfilingLevel(isMongos ? 0 : 1, {filter: {}, slowms: 9999, sampleRate: 0}));
+ const comment1 = 'profile_filter_example_1';
+ assert.eq(0, coll.find().comment(comment1).itcount());
+
+ let log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
+ assert(findMatchingLogLine(log, {comment: comment1}), `No log line for comment: ${comment1}`);
+ if (!isMongos) {
+ assert(db.system.profile.findOne({'command.comment': comment1}),
+ `No profiler entry for comment: ${comment1}`);
+ }
+
+ // 2. An always-false filter of {$expr: false} will never log.
+ // This shows we don't OR all the settings together.
+ assert.commandWorked(db.setProfilingLevel(isMongos ? 0 : 1,
+ {filter: {$expr: false}, slowms: -1, sampleRate: 1}));
+ const comment2 = 'profile_filter_example_2';
+ assert.eq(0, coll.find().comment(comment2).itcount());
+
+ log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
+ assert.eq(null, findMatchingLogLine(log, {comment: comment2}));
+ if (!isMongos) {
+ assert.eq(null, db.system.profile.findOne({'command.comment': comment2}));
+ }
+
+ coll.drop();
+ assert.commandWorked(coll.insert(Array.from({length: 100}, (_, i) => ({a: i}))));
+ assert.commandWorked(coll.createIndex({a: 1}));
+ if (!isMongos) {
+ // The filter lets you express interesting predicates, such as SERVER-37308.
+ assert.commandWorked(db.setProfilingLevel(
+ isMongos ? 0 : 1,
+ // This filter looks for queries with a high ratio of scanned to returned,
+ // as an attempt to find query plans that aren't using indexes well.
+ {filter: {$expr: {$lt: [10, {$divide: ["$docsExamined", "$nreturned"]}]}}}));
+ const collscanComment = 'profile_filter_example_collscan';
+ coll.find({a: 42}).hint({$natural: 1}).comment(collscanComment).itcount();
+ const ixscanComment = 'profile_filter_example_ixscan';
+ coll.find({a: 42}).hint({a: 1}).comment(ixscanComment).itcount();
+ db.setProfilingLevel(0, {filter: 'unset'});
+
+ // Only the collscan plan should be logged and profiled. The ixscan plan has a low ratio of
+ // docsExamined/nreturned, so the filter does not select it.
+ log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
+ assert(findMatchingLogLine(log, {comment: collscanComment}));
+ assert.eq(null, findMatchingLogLine(log, {comment: ixscanComment}));
+ assert(db.system.profile.findOne({'command.comment': collscanComment}));
+ assert.eq(null, db.system.profile.findOne({'command.comment': ixscanComment}));
+ }
+
+ // The input to the filter has the same schema as system.profile, with a few exceptions.
+ if (!isMongos) {
+ const exampleProfileDoc = db.system.profile.findOne();
+ const profilerFields = Object.keys(exampleProfileDoc);
+ const expectedMissingFields = [
+ // None for now!
+ ];
+ for (const field of profilerFields) {
+ if (expectedMissingFields.includes(field))
+ continue;
+
+ // Set a filter that requires `field` to exist.
+ assert.commandWorked(
+ db.setProfilingLevel(isMongos ? 0 : 1, {filter: {[field]: {$exists: true}}}));
+ const comment = 'profile_filter_input_has_field_' + field;
+ assert.eq(1, coll.find().limit(1).comment(comment).itcount());
+ // If the profile filter's input didn't contain `field`, then this operation wouldn't be
+ // profiled.
+ assert(db.system.profile.findOne({'command.comment': comment}),
+ `Profile filter doc was missing field: ${field}`);
+ }
+ }
+ assert.commandWorked(db.setProfilingLevel(0, {filter: 'unset'}));
+ assert.eq(0, db.getProfilingLevel());
+
+ // To catch mistakes, it's not allowed for the filter to reference fields that don't exist.
+ // setProfilingLevel throws an error, and prevents you from setting an invalid filter like this.
+ assert.throws(() => db.setProfilingLevel(isMongos ? 0 : 1, {filter: {no_such_field: 23}}));
+ assert.eq(0, db.getProfilingLevel());
+ const response3 = db.setProfilingLevel(0);
+ let expectedFields = ['was', 'slowms', 'sampleRate', 'ok'];
+ if (isMongos) {
+ expectedFields = [...expectedFields, '$clusterTime', 'operationTime'];
+ }
+ assert.eq(Object.keys(response3), expectedFields);
+
+ // However, we can't catch every run-time error statically. For example, we make no attempt to
+ // catch a type error statically. This kind of invalid filter will fail silently, because
+ // logging the error would be too expensive, since the filter runs so frequently.
+ assert.commandWorked(db.setProfilingLevel(isMongos ? 0 : 1, {filter: {$expr: {$add: [""]}}}));
+ const invalidFilterComment = 'profile_filter_fails_at_runtime';
+ assert.eq(1, coll.find().limit(1).comment(invalidFilterComment).itcount());
+ if (!isMongos) {
+ assert.eq(null, db.system.profile.findOne({'command.comment': invalidFilterComment}));
+ }
+
+ // It is allowed for the filter to depend on the entire document.
+ assert.commandWorked(db.setProfilingLevel(
+ isMongos ? 0 : 1, {filter: {$expr: {$gt: [3, {$size: {$objectToArray: "$$ROOT"}}]}}}));
+ const entireDocFilterComment = 'profile_filter_depends_on_entire_document';
+ assert.eq(1, coll.find().limit(1).comment(entireDocFilterComment).itcount());
+ if (!isMongos) {
+ assert.eq(null, db.system.profile.findOne({'command.comment': entireDocFilterComment}));
+ }
+}
+
+{
+ jsTest.log('Test standalone');
+ const conn = MongoRunner.runMongod({});
+ runTest(conn);
+ MongoRunner.stopMongod(conn);
+}
+
+{
+ jsTest.log('Test mongos');
+ const st = ShardingTest({shards: 1, rs: {nodes: 1}, config: 1});
+ runTest(st);
+ st.stop();
+}
+})();
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 85907307fe7..ff76c2daefb 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -129,9 +129,20 @@ env.Library(
)
env.Library(
+ target='profile_filter',
+ source=[
+ 'profile_filter.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ ],
+)
+
+env.Library(
target='curop',
source=[
'curop.cpp',
+
],
LIBDEPS=[
'$BUILD_DIR/mongo/bson/mutable/mutable_bson',
@@ -147,6 +158,7 @@ env.Library(
'$BUILD_DIR/mongo/util/net/network',
'$BUILD_DIR/mongo/util/progress_meter',
'generic_cursor',
+ 'profile_filter',
'server_options',
],
LIBDEPS_PRIVATE=[
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index da81d6433b7..0c54bb95887 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -257,6 +257,7 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/namespace_string',
+ '$BUILD_DIR/mongo/db/profile_filter',
'$BUILD_DIR/mongo/db/service_context',
'collection',
],
diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp
index 8457f524beb..35f307e1e5a 100644
--- a/src/mongo/db/catalog/collection_catalog.cpp
+++ b/src/mongo/db/catalog/collection_catalog.cpp
@@ -463,24 +463,26 @@ std::vector<std::string> CollectionCatalog::getAllDbNames() const {
return ret;
}
-void CollectionCatalog::setDatabaseProfileLevel(StringData dbName, int newProfileLevel) {
- stdx::lock_guard<Latch> lock(_profileLevelsLock);
- _databaseProfileLevels[dbName] = newProfileLevel;
+void CollectionCatalog::setDatabaseProfileSettings(
+ StringData dbName, CollectionCatalog::ProfileSettings newProfileSettings) {
+ stdx::lock_guard<Latch> lock(_profileSettingsLock);
+ _databaseProfileSettings[dbName] = newProfileSettings;
}
-int CollectionCatalog::getDatabaseProfileLevel(StringData dbName) const {
- stdx::lock_guard<Latch> lock(_profileLevelsLock);
- auto it = _databaseProfileLevels.find(dbName);
- if (it != _databaseProfileLevels.end()) {
+CollectionCatalog::ProfileSettings CollectionCatalog::getDatabaseProfileSettings(
+ StringData dbName) const {
+ stdx::lock_guard<Latch> lock(_profileSettingsLock);
+ auto it = _databaseProfileSettings.find(dbName);
+ if (it != _databaseProfileSettings.end()) {
return it->second;
}
- return serverGlobalParams.defaultProfile;
+ return {serverGlobalParams.defaultProfile, ProfileFilter::getDefault()};
}
-void CollectionCatalog::clearDatabaseProfileLevel(StringData dbName) {
- stdx::lock_guard<Latch> lock(_profileLevelsLock);
- _databaseProfileLevels.erase(dbName);
+void CollectionCatalog::clearDatabaseProfileSettings(StringData dbName) {
+ stdx::lock_guard<Latch> lock(_profileSettingsLock);
+ _databaseProfileSettings.erase(dbName);
}
void CollectionCatalog::registerCollection(CollectionUUID uuid, std::shared_ptr<Collection> coll) {
diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h
index c63a960661c..f9ad25938f9 100644
--- a/src/mongo/db/catalog/collection_catalog.h
+++ b/src/mongo/db/catalog/collection_catalog.h
@@ -34,6 +34,7 @@
#include <set>
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/profile_filter.h"
#include "mongo/db/service_context.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/uuid.h"
@@ -96,6 +97,26 @@ public:
static constexpr Collection* _nullCollection = nullptr;
};
+ struct ProfileSettings {
+ int level;
+ std::shared_ptr<ProfileFilter> filter; // nullable
+
+ ProfileSettings(int level, std::shared_ptr<ProfileFilter> filter)
+ : level(level), filter(filter) {
+ // ProfileSettings represents a state, not a request to change the state.
+ // -1 is not a valid profiling level: it is only used in requests, to represent
+ // leaving the state unchanged.
+ invariant(0 <= level && level <= 2,
+ str::stream() << "Invalid profiling level: " << level);
+ }
+
+ ProfileSettings() = default;
+
+ bool operator==(const ProfileSettings& other) {
+ return level == other.level && filter == other.filter;
+ }
+ };
+
static CollectionCatalog& get(ServiceContext* svcCtx);
static CollectionCatalog& get(OperationContext* opCtx);
CollectionCatalog() = default;
@@ -229,21 +250,33 @@ public:
std::vector<std::string> getAllDbNames() const;
/**
- * Sets 'newProfileLevel' as the profiling level for the database 'dbName'.
+ * Sets 'newProfileSettings' as the profiling settings for the database 'dbName'.
*/
- void setDatabaseProfileLevel(StringData dbName, int newProfileLevel);
+ void setDatabaseProfileSettings(StringData dbName, ProfileSettings newProfileSettings);
+
+ /**
+ * Fetches the profiling settings for database 'dbName'.
+ *
+ * Returns the server's default database profile settings if the database does not exist.
+ */
+ ProfileSettings getDatabaseProfileSettings(StringData dbName) const;
/**
* Fetches the profiling level for database 'dbName'.
*
- * Returns the server's default database profile level if the database does not exist.
+ * Returns the server's default database profile settings if the database does not exist.
+ *
+ * There is no corresponding setDatabaseProfileLevel; use setDatabaseProfileSettings instead.
+ * This method only exists as a convenience.
*/
- int getDatabaseProfileLevel(StringData dbName) const;
+ int getDatabaseProfileLevel(StringData dbName) const {
+ return getDatabaseProfileSettings(dbName).level;
+ }
/**
- * Clears the database profile level entry for 'dbName'.
+ * Clears the database profile settings entry for 'dbName'.
*/
- void clearDatabaseProfileLevel(StringData dbName);
+ void clearDatabaseProfileSettings(StringData dbName);
/**
* Puts the catalog in closed state. In this state, the lookupNSSByUUID method will fall back
@@ -316,7 +349,7 @@ private:
std::map<std::pair<std::string, CollectionUUID>, std::shared_ptr<Collection>>;
using NamespaceCollectionMap =
stdx::unordered_map<NamespaceString, std::shared_ptr<Collection>>;
- using DatabaseProfileLevelMap = StringMap<int>;
+ using DatabaseProfileSettingsMap = StringMap<ProfileSettings>;
CollectionCatalogMap _catalog;
OrderedCollectionMap _orderedCollections; // Ordered by <dbName, collUUID> pair
@@ -345,14 +378,16 @@ private:
// Mapping from ResourceId to a set of strings that contains collection and database namespaces.
std::map<ResourceId, std::set<std::string>> _resourceInformation;
- // Protects _databaseProfileLevels.
- mutable Mutex _profileLevelsLock = MONGO_MAKE_LATCH("CollectionCatalog::_profileLevelsLock");
+ // Protects _databaseProfileSettings.
+ mutable Mutex _profileSettingsLock =
+ MONGO_MAKE_LATCH("CollectionCatalog::_profileSettingsLock");
/**
- * Contains non-default database profile levels. New collections, current collections and views
- * must all be able to access the correct profile level for the database in which they reside.
- * Simple database name to integer profile level map. Access protected by the _catalogLock.
+ * Contains non-default database profile settings. New collections, current collections and
+ * views must all be able to access the correct profile settings for the database in which they
+ * reside. Simple database name to struct ProfileSettings map. Access protected by
+ * _profileSettingsLock.
*/
- DatabaseProfileLevelMap _databaseProfileLevels;
+ DatabaseProfileSettingsMap _databaseProfileSettings;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp
index 94b2156927d..3e5f1e906e9 100644
--- a/src/mongo/db/catalog/collection_catalog_test.cpp
+++ b/src/mongo/db/catalog/collection_catalog_test.cpp
@@ -623,15 +623,18 @@ TEST_F(CollectionCatalogTest, DatabaseProfileLevel) {
// Requesting a profile level that is not in the _databaseProfileLevel map should return the
// default server-wide setting
- ASSERT_EQ(catalog.getDatabaseProfileLevel(testDBNameFirst), serverGlobalParams.defaultProfile);
-
+ ASSERT_EQ(catalog.getDatabaseProfileSettings(testDBNameFirst).level,
+ serverGlobalParams.defaultProfile);
// Setting the default profile level should have not change the result.
- catalog.setDatabaseProfileLevel(testDBNameFirst, serverGlobalParams.defaultProfile);
- ASSERT_EQ(catalog.getDatabaseProfileLevel(testDBNameFirst), serverGlobalParams.defaultProfile);
+ catalog.setDatabaseProfileSettings(testDBNameFirst,
+ {serverGlobalParams.defaultProfile, nullptr});
+ ASSERT_EQ(catalog.getDatabaseProfileSettings(testDBNameFirst).level,
+ serverGlobalParams.defaultProfile);
// Changing the profile level should make fetching it different.
- catalog.setDatabaseProfileLevel(testDBNameSecond, serverGlobalParams.defaultProfile + 1);
- ASSERT_EQ(catalog.getDatabaseProfileLevel(testDBNameSecond),
+ catalog.setDatabaseProfileSettings(testDBNameSecond,
+ {serverGlobalParams.defaultProfile + 1, nullptr});
+ ASSERT_EQ(catalog.getDatabaseProfileSettings(testDBNameSecond).level,
serverGlobalParams.defaultProfile + 1);
}
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 722ecf6f9ef..0fc3fdeb8dc 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -204,7 +204,7 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) {
}
// Clean up the in-memory database state.
- CollectionCatalog::get(opCtx).clearDatabaseProfileLevel(name);
+ CollectionCatalog::get(opCtx).clearDatabaseProfileSettings(name);
close(opCtx, name);
auto const storageEngine = serviceContext->getStorageEngine();
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index b07b9c4ec1b..5e12faebf7b 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -522,6 +522,7 @@ env.Library(
source=[
'profile_common.cpp',
env.Idlc('profile.idl')[0],
+ '$BUILD_DIR/mongo/db/profile_filter_impl.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/commands',
@@ -529,6 +530,8 @@ env.Library(
LIBDEPS_PRIVATE=[
'$BUILD_DIR/mongo/db/auth/auth',
'$BUILD_DIR/mongo/db/auth/authprivilege',
+ '$BUILD_DIR/mongo/db/curop',
+ '$BUILD_DIR/mongo/db/matcher/expressions',
'$BUILD_DIR/mongo/db/server_options_core',
],
)
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 918167e4739..a450e2cbebe 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -272,7 +272,7 @@ public:
}
curOp->debug().setPlanSummaryMetrics(summaryStats);
- if (curOp->shouldDBProfile()) {
+ if (curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = exec->getStats();
}
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index 37512fdb9ac..ff50ee21f25 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -74,6 +74,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
+#include "mongo/db/profile_filter_impl.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/internal_plans.h"
@@ -105,24 +106,24 @@ MONGO_FAIL_POINT_DEFINE(waitInFilemd5DuringManualYield);
namespace {
-Status _setProfilingLevel(OperationContext* opCtx, Database* db, StringData dbName, int newLevel) {
+Status _setProfileSettings(OperationContext* opCtx,
+ Database* db,
+ StringData dbName,
+ mongo::CollectionCatalog::ProfileSettings newSettings) {
invariant(db);
- auto currLevel = CollectionCatalog::get(opCtx).getDatabaseProfileLevel(dbName);
+ auto currSettings = CollectionCatalog::get(opCtx).getDatabaseProfileSettings(dbName);
- if (currLevel == newLevel) {
+ if (currSettings == newSettings) {
return Status::OK();
}
- if (newLevel == 0) {
- CollectionCatalog::get(opCtx).setDatabaseProfileLevel(dbName, newLevel);
+ if (newSettings.level == 0) {
+ // No need to create the profile collection.
+ CollectionCatalog::get(opCtx).setDatabaseProfileSettings(dbName, newSettings);
return Status::OK();
}
- if (newLevel < 0 || newLevel > 2) {
- return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
- }
-
// Can't support profiling without supporting capped collections.
if (!opCtx->getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return Status(ErrorCodes::CommandNotSupported,
@@ -134,7 +135,7 @@ Status _setProfilingLevel(OperationContext* opCtx, Database* db, StringData dbNa
return status;
}
- CollectionCatalog::get(opCtx).setDatabaseProfileLevel(dbName, newLevel);
+ CollectionCatalog::get(opCtx).setDatabaseProfileSettings(dbName, newSettings);
return Status::OK();
}
@@ -149,15 +150,21 @@ public:
CmdProfile() = default;
protected:
- int _applyProfilingLevel(OperationContext* opCtx,
- const std::string& dbName,
- int profilingLevel) const final {
+ CollectionCatalog::ProfileSettings _applyProfilingLevel(
+ OperationContext* opCtx,
+ const std::string& dbName,
+ const ProfileCmdRequest& request) const final {
+ const auto profilingLevel = request.getCommandParameter();
// The system.profile collection is non-replicated, so writes to it do not cause
// replication lag. As such, they should be excluded from Flow Control.
opCtx->setShouldParticipateInFlowControl(false);
- const bool readOnly = (profilingLevel < 0 || profilingLevel > 2);
+ // An invalid profiling level (outside the range [0, 2]) represents a request to read the
+ // current profiling level. Similarly, if the request does not include a filter, we only
+ // need to read the current filter, if any. If we're not changing either value, then we can
+ // acquire a shared lock instead of exclusive.
+ const bool readOnly = (profilingLevel < 0 || profilingLevel > 2) && !request.getFilter();
const LockMode dbMode = readOnly ? MODE_S : MODE_X;
// Accessing system.profile collection should not conflict with oplog application.
@@ -166,8 +173,9 @@ protected:
AutoGetDb ctx(opCtx, dbName, dbMode);
Database* db = ctx.getDb();
- // Fetches the database profiling level or the server default if the db does not exist.
- auto oldLevel = CollectionCatalog::get(opCtx).getDatabaseProfileLevel(dbName);
+ // Fetches the database profiling level + filter or the server default if the db does not
+ // exist.
+ auto oldSettings = CollectionCatalog::get(opCtx).getDatabaseProfileSettings(dbName);
if (!readOnly) {
if (!db) {
@@ -177,10 +185,23 @@ protected:
db = databaseHolder->openDb(opCtx, dbName);
}
- uassertStatusOK(_setProfilingLevel(opCtx, db, dbName, profilingLevel));
+ auto newSettings = oldSettings;
+ if (profilingLevel >= 0 && profilingLevel <= 2) {
+ newSettings.level = profilingLevel;
+ }
+ if (auto filterOrUnset = request.getFilter()) {
+ if (auto filter = filterOrUnset->obj) {
+ // filter: <match expression>
+ newSettings.filter = std::make_shared<ProfileFilterImpl>(*filter);
+ } else {
+ // filter: "unset"
+ newSettings.filter = nullptr;
+ }
+ }
+ uassertStatusOK(_setProfileSettings(opCtx, db, dbName, newSettings));
}
- return oldLevel;
+ return oldSettings;
}
} cmdProfile;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 4d3f78259e6..0b332721625 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -301,7 +301,7 @@ public:
}
curOp->debug().setPlanSummaryMetrics(stats);
- if (curOp->shouldDBProfile()) {
+ if (curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = executor.getValue()->getStats();
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 62b4ca577bb..6ee903eb59b 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -494,7 +494,7 @@ public:
// Fill out OpDebug with the number of deleted docs.
opDebug->additiveMetrics.ndeleted = docFound ? 1 : 0;
- if (curOp->shouldDBProfile()) {
+ if (curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = exec->getStats();
}
recordStatsForTopCommand(opCtx);
@@ -571,7 +571,7 @@ public:
write_ops_exec::recordUpdateResultInOpDebug(exec->getUpdateResult(), opDebug);
opDebug->setPlanSummaryMetrics(summaryStats);
- if (curOp->shouldDBProfile()) {
+ if (curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = exec->getStats();
}
recordStatsForTopCommand(opCtx);
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 1b3f43bf16c..bc4fafc8070 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -619,7 +619,7 @@ public:
// generate the stats eagerly for all operations due to cost.
if (cursorPin->getExecutor()->lockPolicy() !=
PlanExecutor::LockPolicy::kLocksInternally &&
- curOp->shouldDBProfile()) {
+ curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = exec->getStats();
}
diff --git a/src/mongo/db/commands/profile.idl b/src/mongo/db/commands/profile.idl
index 97e6155f306..91a90567d52 100644
--- a/src/mongo/db/commands/profile.idl
+++ b/src/mongo/db/commands/profile.idl
@@ -28,10 +28,19 @@
global:
cpp_namespace: "mongo"
+ cpp_includes: "mongo/db/commands/profile_common.h"
imports:
- "mongo/idl/basic_types.idl"
+types:
+ ObjectOrUnset:
+ bson_serialization_type: any
+ description: "Either an object or the special string 'unset'."
+ cpp_type: mongo::ObjectOrUnset
+ serializer: "::mongo::serializeObjectOrUnset"
+ deserializer: "::mongo::parseObjectOrUnset"
+
commands:
profile:
@@ -49,3 +58,9 @@ commands:
type: double
description: "The fraction of eligible ops, in the range [0,1), to be logged/profiled."
optional: true
+ filter:
+ type: ObjectOrUnset
+ description: "A query predicate that determines which ops are logged/profiled. This is
+ an alternative to slowms and sampleRate. The special value 'unset' removes
+ the filter."
+ optional: true
diff --git a/src/mongo/db/commands/profile_common.cpp b/src/mongo/db/commands/profile_common.cpp
index 5b41f6a2fa8..8124366a638 100644
--- a/src/mongo/db/commands/profile_common.cpp
+++ b/src/mongo/db/commands/profile_common.cpp
@@ -31,9 +31,12 @@
#include "mongo/platform/basic.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands/profile_common.h"
#include "mongo/db/commands/profile_gen.h"
+#include "mongo/db/curop.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/profile_filter_impl.h"
#include "mongo/idl/idl_parser.h"
#include "mongo/logv2/log.h"
@@ -69,42 +72,64 @@ bool ProfileCmdBase::run(OperationContext* opCtx,
auto request = ProfileCmdRequest::parse(IDLParserErrorContext("profile"), cmdObj);
const auto profilingLevel = request.getCommandParameter();
+ // Validate arguments before making changes.
+ if (auto sampleRate = request.getSampleRate()) {
+ uassert(ErrorCodes::BadValue,
+ "'sampleRate' must be between 0.0 and 1.0 inclusive",
+ *sampleRate >= 0.0 && *sampleRate <= 1.0);
+ }
+
// Delegate to _applyProfilingLevel to set the profiling level appropriately whether we are on
// mongoD or mongoS.
- int oldLevel = _applyProfilingLevel(opCtx, dbName, profilingLevel);
+ auto oldSettings = _applyProfilingLevel(opCtx, dbName, request);
auto oldSlowMS = serverGlobalParams.slowMS;
auto oldSampleRate = serverGlobalParams.sampleRate;
- result.append("was", oldLevel);
+ result.append("was", oldSettings.level);
result.append("slowms", oldSlowMS);
result.append("sampleRate", oldSampleRate);
+ if (oldSettings.filter) {
+ result.append("filter", oldSettings.filter->serialize());
+ }
+ if (oldSettings.filter || request.getFilter()) {
+ result.append("note",
+ "When a filter expression is set, slowms and sampleRate are not used for "
+ "profiling and slow-query log lines.");
+ }
if (auto slowms = request.getSlowms()) {
serverGlobalParams.slowMS = *slowms;
}
if (auto sampleRate = request.getSampleRate()) {
- uassert(ErrorCodes::BadValue,
- "'sampleRate' must be between 0.0 and 1.0 inclusive",
- *sampleRate >= 0.0 && *sampleRate <= 1.0);
serverGlobalParams.sampleRate = *sampleRate;
}
- // Log the change made to server's profiling settings, unless the request was to get the current
- // value.
- if (profilingLevel != -1) {
+ // Log the change made to server's profiling settings, if the request asks to change anything.
+ if (profilingLevel != -1 || request.getSlowms() || request.getSampleRate() ||
+ request.getFilter()) {
logv2::DynamicAttributes attrs;
BSONObjBuilder oldState;
BSONObjBuilder newState;
- oldState.append("level"_sd, oldLevel);
+ oldState.append("level"_sd, oldSettings.level);
oldState.append("slowms"_sd, oldSlowMS);
oldState.append("sampleRate"_sd, oldSampleRate);
+ if (oldSettings.filter) {
+ oldState.append("filter"_sd, oldSettings.filter->serialize());
+ }
attrs.add("from", oldState.obj());
- newState.append("level"_sd, profilingLevel);
+ // newSettings.level may differ from profilingLevel: profilingLevel is part of the request,
+ // and if the request specifies {profile: -1, ...} then we want to show the unchanged value
+ // (0, 1, or 2).
+ auto newSettings = CollectionCatalog::get(opCtx).getDatabaseProfileSettings(dbName);
+ newState.append("level"_sd, newSettings.level);
newState.append("slowms"_sd, serverGlobalParams.slowMS);
newState.append("sampleRate"_sd, serverGlobalParams.sampleRate);
+ if (newSettings.filter) {
+ newState.append("filter"_sd, newSettings.filter->serialize());
+ }
attrs.add("to", newState.obj());
LOGV2(48742, "Profiler settings changed", attrs);
@@ -112,4 +137,25 @@ bool ProfileCmdBase::run(OperationContext* opCtx,
return true;
}
+
+ObjectOrUnset parseObjectOrUnset(const BSONElement& element) {
+ if (element.type() == BSONType::Object) {
+ return {{element.Obj()}};
+ } else if (element.type() == BSONType::String && element.String() == "unset"_sd) {
+ return {{}};
+ } else {
+ uasserted(ErrorCodes::BadValue, "Expected an object, or the string 'unset'.");
+ }
+}
+
+void serializeObjectOrUnset(const ObjectOrUnset& obj,
+ StringData fieldName,
+ BSONObjBuilder* builder) {
+ if (obj.obj) {
+ builder->append(fieldName, *obj.obj);
+ } else {
+ builder->append(fieldName, "unset"_sd);
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/commands/profile_common.h b/src/mongo/db/commands/profile_common.h
index 0e6269745d1..b8ffd40783e 100644
--- a/src/mongo/db/commands/profile_common.h
+++ b/src/mongo/db/commands/profile_common.h
@@ -29,10 +29,16 @@
#pragma once
+#include <boost/optional.hpp>
+
#include "mongo/base/status.h"
+#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands.h"
namespace mongo {
+
+class ProfileCmdRequest;
+
/**
* An abstract base class which implements all functionality common to the mongoD and mongoS
* 'profile' command, and defines a number of virtual functions through which it delegates any
@@ -68,10 +74,20 @@ public:
BSONObjBuilder& result) final;
protected:
- // Applies the given profiling level, or throws if the profiling level could not be set. On
- // success, returns an integer indicating the previous profiling level.
- virtual int _applyProfilingLevel(OperationContext* opCtx,
- const std::string& dbName,
- int profilingLevel) const = 0;
+ // Applies the given profiling level and filter, or throws if the profiling level could not be
+ // set. On success, returns a struct indicating the previous profiling level and filter.
+ virtual CollectionCatalog::ProfileSettings _applyProfilingLevel(
+ OperationContext* opCtx,
+ const std::string& dbName,
+ const ProfileCmdRequest& request) const = 0;
};
+
+struct ObjectOrUnset {
+ boost::optional<BSONObj> obj;
+};
+ObjectOrUnset parseObjectOrUnset(const BSONElement& element);
+void serializeObjectOrUnset(const ObjectOrUnset& obj,
+ StringData fieldName,
+ BSONObjBuilder* builder);
+
} // namespace mongo
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 670449fce89..7144d2a1e65 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -39,7 +39,7 @@
#include "mongo/bson/mutable/document.h"
#include "mongo/config.h"
-#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/server_status_metric.h"
@@ -47,6 +47,7 @@
#include "mongo/db/concurrency/locker.h"
#include "mongo/db/json.h"
#include "mongo/db/prepare_conflict_tracker.h"
+#include "mongo/db/profile_filter.h"
#include "mongo/db/query/getmore_request.h"
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/logv2/log.h"
@@ -475,6 +476,8 @@ void CurOp::raiseDbProfileLevel(int dbProfileLevel) {
_dbprofile = std::max(dbProfileLevel, _dbprofile);
}
+static constexpr size_t appendMaxElementSize = 50 * 1024;
+
bool CurOp::completeAndLogOperation(OperationContext* opCtx,
logv2::LogComponent component,
boost::optional<size_t> responseLength,
@@ -497,11 +500,24 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
oplogGetMoreStats.recordMillis(executionTimeMillis);
}
- bool shouldLogSlowOp, shouldSample;
+ bool shouldLogSlowOp, shouldProfileAtLevel1;
+
+ if (auto filter =
+ CollectionCatalog::get(opCtx).getDatabaseProfileSettings(getNSS().db()).filter) {
+ bool passesFilter = filter->matches(opCtx, _debug, *this);
- // Log the operation if it is eligible according to the current slowMS and sampleRate settings.
- std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
- opCtx, component, Milliseconds(executionTimeMillis), Milliseconds(slowMs));
+ shouldLogSlowOp = passesFilter;
+ shouldProfileAtLevel1 = passesFilter;
+
+ } else {
+ // Log the operation if it is eligible according to the current slowMS and sampleRate
+ // settings.
+ bool shouldSample;
+ std::tie(shouldLogSlowOp, shouldSample) = shouldLogSlowOpWithSampling(
+ opCtx, component, Milliseconds(executionTimeMillis), Milliseconds(slowMs));
+
+ shouldProfileAtLevel1 = shouldLogSlowOp && shouldSample;
+ }
if (forceLog || shouldLogSlowOp) {
auto lockerInfo = opCtx->lockState()->getLockerInfo(_lockStatsBase);
@@ -556,7 +572,11 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
}
// Return 'true' if this operation should also be added to the profiler.
- return shouldDBProfile(shouldSample);
+ if (_dbprofile >= 2)
+ return true;
+ if (_dbprofile <= 0)
+ return false;
+ return shouldProfileAtLevel1;
}
Command::ReadWriteType CurOp::getReadWriteType() const {
@@ -1071,26 +1091,28 @@ void OpDebug::report(OperationContext* opCtx,
pAttrs->add("durationMillis", durationCount<Milliseconds>(executionTime));
}
+#define OPDEBUG_APPEND_NUMBER2(b, x, y) \
+ if (y != -1) \
+ (b).appendNumber(x, (y))
+#define OPDEBUG_APPEND_NUMBER(b, x) OPDEBUG_APPEND_NUMBER2(b, #x, x)
-#define OPDEBUG_APPEND_NUMBER(x) \
- if (x != -1) \
- b.appendNumber(#x, (x))
-#define OPDEBUG_APPEND_BOOL(x) \
- if (x) \
- b.appendBool(#x, (x))
-#define OPDEBUG_APPEND_ATOMIC(x, y) \
- if (auto __y = y.load(); __y > 0) \
- b.appendNumber(x, __y)
-#define OPDEBUG_APPEND_OPTIONAL(x, y) \
+#define OPDEBUG_APPEND_BOOL2(b, x, y) \
if (y) \
- b.appendNumber(x, (*y))
+ (b).appendBool(x, (y))
+#define OPDEBUG_APPEND_BOOL(b, x) OPDEBUG_APPEND_BOOL2(b, #x, x)
+
+#define OPDEBUG_APPEND_ATOMIC(b, x, y) \
+ if (auto __y = y.load(); __y > 0) \
+ (b).appendNumber(x, __y)
+#define OPDEBUG_APPEND_OPTIONAL(b, x, y) \
+ if (y) \
+ (b).appendNumber(x, (*y))
void OpDebug::append(OperationContext* opCtx,
const SingleThreadedLockStats& lockStats,
FlowControlTicketholder::CurOp flowControlStats,
BSONObjBuilder& b) const {
auto& curop = *CurOp::get(opCtx);
- const size_t maxElementSize = 50 * 1024;
b.append("op", logicalOpToString(logicalOp));
@@ -1098,47 +1120,46 @@ void OpDebug::append(OperationContext* opCtx,
b.append("ns", nss.ns());
appendAsObjOrString(
- "command", appendCommentField(opCtx, curop.opDescription()), maxElementSize, &b);
+ "command", appendCommentField(opCtx, curop.opDescription()), appendMaxElementSize, &b);
auto originatingCommand = curop.originatingCommand();
if (!originatingCommand.isEmpty()) {
- appendAsObjOrString("originatingCommand", originatingCommand, maxElementSize, &b);
+ appendAsObjOrString("originatingCommand", originatingCommand, appendMaxElementSize, &b);
}
- OPDEBUG_APPEND_NUMBER(nShards);
- OPDEBUG_APPEND_NUMBER(cursorid);
+ OPDEBUG_APPEND_NUMBER(b, nShards);
+ OPDEBUG_APPEND_NUMBER(b, cursorid);
if (mongotCursorId) {
b.append("mongot", makeMongotDebugStatsObject());
}
- OPDEBUG_APPEND_BOOL(exhaust);
+ OPDEBUG_APPEND_BOOL(b, exhaust);
- OPDEBUG_APPEND_OPTIONAL("keysExamined", additiveMetrics.keysExamined);
- OPDEBUG_APPEND_OPTIONAL("docsExamined", additiveMetrics.docsExamined);
- OPDEBUG_APPEND_BOOL(hasSortStage);
- OPDEBUG_APPEND_BOOL(usedDisk);
- OPDEBUG_APPEND_BOOL(fromMultiPlanner);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysExamined", additiveMetrics.keysExamined);
+ OPDEBUG_APPEND_OPTIONAL(b, "docsExamined", additiveMetrics.docsExamined);
+ OPDEBUG_APPEND_BOOL(b, hasSortStage);
+ OPDEBUG_APPEND_BOOL(b, usedDisk);
+ OPDEBUG_APPEND_BOOL(b, fromMultiPlanner);
if (replanReason) {
bool replanned = true;
- OPDEBUG_APPEND_BOOL(replanned);
+ OPDEBUG_APPEND_BOOL(b, replanned);
b.append("replanReason", *replanReason);
}
- OPDEBUG_APPEND_OPTIONAL("nMatched", additiveMetrics.nMatched);
- OPDEBUG_APPEND_OPTIONAL("nModified", additiveMetrics.nModified);
- OPDEBUG_APPEND_OPTIONAL("ninserted", additiveMetrics.ninserted);
- OPDEBUG_APPEND_OPTIONAL("ndeleted", additiveMetrics.ndeleted);
- OPDEBUG_APPEND_OPTIONAL("nUpserted", additiveMetrics.nUpserted);
- OPDEBUG_APPEND_BOOL(cursorExhausted);
+ OPDEBUG_APPEND_OPTIONAL(b, "nMatched", additiveMetrics.nMatched);
+ OPDEBUG_APPEND_OPTIONAL(b, "nModified", additiveMetrics.nModified);
+ OPDEBUG_APPEND_OPTIONAL(b, "ninserted", additiveMetrics.ninserted);
+ OPDEBUG_APPEND_OPTIONAL(b, "ndeleted", additiveMetrics.ndeleted);
+ OPDEBUG_APPEND_BOOL(b, cursorExhausted);
- OPDEBUG_APPEND_OPTIONAL("keysInserted", additiveMetrics.keysInserted);
- OPDEBUG_APPEND_OPTIONAL("keysDeleted", additiveMetrics.keysDeleted);
- OPDEBUG_APPEND_ATOMIC("prepareReadConflicts", additiveMetrics.prepareReadConflicts);
- OPDEBUG_APPEND_ATOMIC("writeConflicts", additiveMetrics.writeConflicts);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysInserted", additiveMetrics.keysInserted);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysDeleted", additiveMetrics.keysDeleted);
+ OPDEBUG_APPEND_ATOMIC(b, "prepareReadConflicts", additiveMetrics.prepareReadConflicts);
+ OPDEBUG_APPEND_ATOMIC(b, "writeConflicts", additiveMetrics.writeConflicts);
- OPDEBUG_APPEND_OPTIONAL("dataThroughputLastSecond", dataThroughputLastSecond);
- OPDEBUG_APPEND_OPTIONAL("dataThroughputAverage", dataThroughputAverage);
+ OPDEBUG_APPEND_OPTIONAL(b, "dataThroughputLastSecond", dataThroughputLastSecond);
+ OPDEBUG_APPEND_OPTIONAL(b, "dataThroughputAverage", dataThroughputAverage);
b.appendNumber("numYield", curop.numYields());
- OPDEBUG_APPEND_NUMBER(nreturned);
+ OPDEBUG_APPEND_NUMBER(b, nreturned);
if (queryHash) {
b.append("queryHash", unsignedIntToFixedLengthHex(*queryHash));
@@ -1181,7 +1202,7 @@ void OpDebug::append(OperationContext* opCtx,
b.append("errCode", errInfo.code());
}
- OPDEBUG_APPEND_NUMBER(responseLength);
+ OPDEBUG_APPEND_NUMBER(b, responseLength);
if (iscommand) {
b.append("protocol", getProtoString(networkOp));
}
@@ -1197,8 +1218,314 @@ void OpDebug::append(OperationContext* opCtx,
}
if (!execStats.isEmpty()) {
- b.append("execStats", execStats);
+ b.append("execStats", std::move(execStats));
+ }
+}
+
+void OpDebug::appendUserInfo(const CurOp& c,
+ BSONObjBuilder& builder,
+ AuthorizationSession* authSession) {
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
+
+ UserName bestUser;
+ if (nameIter.more())
+ bestUser = *nameIter;
+
+ std::string opdb(nsToDatabase(c.getNS()));
+
+ BSONArrayBuilder allUsers(builder.subarrayStart("allUsers"));
+ for (; nameIter.more(); nameIter.next()) {
+ BSONObjBuilder nextUser(allUsers.subobjStart());
+ nextUser.append(AuthorizationManager::USER_NAME_FIELD_NAME, nameIter->getUser());
+ nextUser.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
+ nextUser.doneFast();
+
+ if (nameIter->getDB() == opdb) {
+ bestUser = *nameIter;
+ }
}
+ allUsers.doneFast();
+
+ builder.append("user", bestUser.getUser().empty() ? "" : bestUser.getFullName());
+}
+
+std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(StringSet requestedFields,
+ bool needWholeDocument) {
+ // This function is analogous to OpDebug::append. The main difference is that append() does
+ // the work of building BSON right away, while appendStaged() stages the work to be done later.
+ // It returns a std::function that builds BSON when called.
+
+ // The other difference is that appendStaged can avoid building BSON for unneeded fields.
+ // requestedFields is a set of top-level field names; any fields beyond this list may be
+ // omitted. This also lets us uassert if the caller asks for an unsupported field.
+
+ // Each piece of the result is a function that appends to a BSONObjBuilder.
+ // Before returning, we encapsulate the result in a simpler function that returns a BSONObj.
+ using Piece = std::function<void(ProfileFilter::Args, BSONObjBuilder&)>;
+ std::vector<Piece> pieces;
+
+ // For convenience, the callback that handles each field gets the fieldName as an extra arg.
+ using Callback = std::function<void(const char*, ProfileFilter::Args, BSONObjBuilder&)>;
+
+ // Helper to check for the presence of a field in the StringSet, and remove it.
+ // At the end of this method, anything left in the StringSet is a field we don't know
+ // how to handle.
+ auto needs = [&](const char* fieldName) {
+ bool val = needWholeDocument || requestedFields.count(fieldName) > 0;
+ requestedFields.erase(fieldName);
+ return val;
+ };
+ auto addIfNeeded = [&](const char* fieldName, Callback cb) {
+ if (needs(fieldName)) {
+ pieces.push_back([fieldName = fieldName, cb = std::move(cb)](auto args, auto& b) {
+ cb(fieldName, args, b);
+ });
+ }
+ };
+
+ addIfNeeded("ts", [](auto field, auto args, auto& b) { b.append(field, jsTime()); });
+ addIfNeeded("client", [](auto field, auto args, auto& b) {
+ b.append(field, args.opCtx->getClient()->clientAddress());
+ });
+ addIfNeeded("appName", [](auto field, auto args, auto& b) {
+ const auto& clientMetadata =
+ ClientMetadataIsMasterState::get(args.opCtx->getClient()).getClientMetadata();
+ if (clientMetadata) {
+ auto appName = clientMetadata.get().getApplicationName();
+ if (!appName.empty()) {
+ b.append(field, appName);
+ }
+ }
+ });
+ bool needsAllUsers = needs("allUsers");
+ bool needsUser = needs("user");
+ if (needsAllUsers || needsUser) {
+ pieces.push_back([](auto args, auto& b) {
+ AuthorizationSession* authSession = AuthorizationSession::get(args.opCtx->getClient());
+ appendUserInfo(args.curop, b, authSession);
+ });
+ }
+
+ addIfNeeded("op", [](auto field, auto args, auto& b) {
+ b.append(field, logicalOpToString(args.op.logicalOp));
+ });
+ addIfNeeded("ns", [](auto field, auto args, auto& b) {
+ b.append(field, NamespaceString(args.curop.getNS()).ns());
+ });
+
+ addIfNeeded("command", [](auto field, auto args, auto& b) {
+ appendAsObjOrString(field,
+ appendCommentField(args.opCtx, args.curop.opDescription()),
+ appendMaxElementSize,
+ &b);
+ });
+
+ addIfNeeded("originatingCommand", [](auto field, auto args, auto& b) {
+ auto originatingCommand = args.curop.originatingCommand();
+ if (!originatingCommand.isEmpty()) {
+ appendAsObjOrString(field, originatingCommand, appendMaxElementSize, &b);
+ }
+ });
+
+ addIfNeeded("nShards", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_NUMBER2(b, field, args.op.nShards);
+ });
+ addIfNeeded("cursorid", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_NUMBER2(b, field, args.op.cursorid);
+ });
+ addIfNeeded("mongot", [](auto field, auto args, auto& b) {
+ if (args.op.mongotCursorId) {
+ b.append(field, args.op.makeMongotDebugStatsObject());
+ }
+ });
+ addIfNeeded("exhaust", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_BOOL2(b, field, args.op.exhaust);
+ });
+
+ addIfNeeded("keysExamined", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysExamined);
+ });
+ addIfNeeded("docsExamined", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.docsExamined);
+ });
+ addIfNeeded("hasSortStage", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_BOOL2(b, field, args.op.hasSortStage);
+ });
+ addIfNeeded("usedDisk", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_BOOL2(b, field, args.op.usedDisk);
+ });
+ addIfNeeded("fromMultiPlanner", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_BOOL2(b, field, args.op.fromMultiPlanner);
+ });
+ addIfNeeded("replanned", [](auto field, auto args, auto& b) {
+ if (args.op.replanReason) {
+ OPDEBUG_APPEND_BOOL2(b, field, true);
+ }
+ });
+ addIfNeeded("replanReason", [](auto field, auto args, auto& b) {
+ if (args.op.replanReason) {
+ b.append(field, *args.op.replanReason);
+ }
+ });
+ addIfNeeded("nMatched", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nMatched);
+ });
+ addIfNeeded("nModified", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nModified);
+ });
+ addIfNeeded("ninserted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.ninserted);
+ });
+ addIfNeeded("ndeleted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.ndeleted);
+ });
+ addIfNeeded("nUpserted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nUpserted);
+ });
+ addIfNeeded("cursorExhausted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_BOOL2(b, field, args.op.cursorExhausted);
+ });
+
+ addIfNeeded("keysInserted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysInserted);
+ });
+ addIfNeeded("keysDeleted", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysDeleted);
+ });
+ addIfNeeded("prepareReadConflicts", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_ATOMIC(b, field, args.op.additiveMetrics.prepareReadConflicts);
+ });
+ addIfNeeded("writeConflicts", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_ATOMIC(b, field, args.op.additiveMetrics.writeConflicts);
+ });
+
+ addIfNeeded("dataThroughputLastSecond", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.dataThroughputLastSecond);
+ });
+ addIfNeeded("dataThroughputAverage", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_OPTIONAL(b, field, args.op.dataThroughputAverage);
+ });
+
+ addIfNeeded("numYield", [](auto field, auto args, auto& b) {
+ b.appendNumber(field, args.curop.numYields());
+ });
+ addIfNeeded("nreturned", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_NUMBER2(b, field, args.op.nreturned);
+ });
+
+ addIfNeeded("queryHash", [](auto field, auto args, auto& b) {
+ if (args.op.queryHash) {
+ b.append(field, unsignedIntToFixedLengthHex(*args.op.queryHash));
+ }
+ });
+ addIfNeeded("planCacheKey", [](auto field, auto args, auto& b) {
+ if (args.op.planCacheKey) {
+ b.append(field, unsignedIntToFixedLengthHex(*args.op.planCacheKey));
+ }
+ });
+
+ addIfNeeded("locks", [](auto field, auto args, auto& b) {
+ if (auto lockerInfo =
+ args.opCtx->lockState()->getLockerInfo(args.curop.getLockStatsBase())) {
+ BSONObjBuilder locks(b.subobjStart(field));
+ lockerInfo->stats.report(&locks);
+ }
+ });
+
+ addIfNeeded("flowControl", [](auto field, auto args, auto& b) {
+ BSONObj flowControlMetrics =
+ makeFlowControlObject(args.opCtx->lockState()->getFlowControlStats());
+ BSONObjBuilder flowControlBuilder(b.subobjStart(field));
+ flowControlBuilder.appendElements(flowControlMetrics);
+ });
+
+ addIfNeeded("writeConcern", [](auto field, auto args, auto& b) {
+ if (args.op.writeConcern && !args.op.writeConcern->usedDefault) {
+ b.append(field, args.op.writeConcern->toBSON());
+ }
+ });
+
+ addIfNeeded("storage", [](auto field, auto args, auto& b) {
+ if (args.op.storageStats) {
+ b.append(field, args.op.storageStats->toBSON());
+ }
+ });
+
+ // Don't short-circuit: call needs() for every supported field, so that at the end we can
+ // uassert that no unsupported fields were requested.
+ bool needsOk = needs("ok");
+ bool needsErrMsg = needs("errMsg");
+ bool needsErrName = needs("errName");
+ bool needsErrCode = needs("errCode");
+ if (needsOk || needsErrMsg || needsErrName || needsErrCode) {
+ pieces.push_back([](auto args, auto& b) {
+ if (!args.op.errInfo.isOK()) {
+ b.appendNumber("ok", 0.0);
+ if (!args.op.errInfo.reason().empty()) {
+ b.append("errMsg", args.op.errInfo.reason());
+ }
+ b.append("errName", ErrorCodes::errorString(args.op.errInfo.code()));
+ b.append("errCode", args.op.errInfo.code());
+ }
+ });
+ }
+
+ addIfNeeded("responseLength", [](auto field, auto args, auto& b) {
+ OPDEBUG_APPEND_NUMBER2(b, field, args.op.responseLength);
+ });
+
+ addIfNeeded("protocol", [](auto field, auto args, auto& b) {
+ if (args.op.iscommand) {
+ b.append(field, getProtoString(args.op.networkOp));
+ }
+ });
+
+ addIfNeeded("remoteOpWaitMillis", [](auto field, auto args, auto& b) {
+ if (args.op.remoteOpWaitTime) {
+ b.append(field, durationCount<Milliseconds>(*args.op.remoteOpWaitTime));
+ }
+ });
+
+ // millis and durationMillis are the same thing. This is one of the few inconsistencies between
+ // the profiler (OpDebug::append) and the log file (OpDebug::report), so for the profile filter
+ // we support both names.
+ addIfNeeded("millis", [](auto field, auto args, auto& b) {
+ b.appendIntOrLL(field, durationCount<Milliseconds>(args.op.executionTime));
+ });
+ addIfNeeded("durationMillis", [](auto field, auto args, auto& b) {
+ b.appendIntOrLL(field, durationCount<Milliseconds>(args.op.executionTime));
+ });
+
+ addIfNeeded("planSummary", [](auto field, auto args, auto& b) {
+ if (!args.curop.getPlanSummary().empty()) {
+ b.append(field, args.curop.getPlanSummary());
+ }
+ });
+
+ addIfNeeded("execStats", [](auto field, auto args, auto& b) {
+ if (!args.op.execStats.isEmpty()) {
+ b.append(field, args.op.execStats);
+ }
+ });
+
+ if (!requestedFields.empty()) {
+ std::stringstream ss;
+ ss << "No such field (or fields) available for profile filter";
+ auto sep = ": ";
+ for (auto&& s : requestedFields) {
+ ss << sep << s;
+ sep = ", ";
+ }
+ uasserted(4910200, ss.str());
+ }
+
+ return [pieces = std::move(pieces)](ProfileFilter::Args args) {
+ BSONObjBuilder bob;
+ for (auto piece : pieces) {
+ piece(args, bob);
+ }
+ return bob.obj();
+ };
}
void OpDebug::setPlanSummaryMetrics(const PlanSummaryStats& planSummaryStats) {
@@ -1210,7 +1537,7 @@ void OpDebug::setPlanSummaryMetrics(const PlanSummaryStats& planSummaryStats) {
replanReason = planSummaryStats.replanReason;
}
-BSONObj OpDebug::makeFlowControlObject(FlowControlTicketholder::CurOp stats) const {
+BSONObj OpDebug::makeFlowControlObject(FlowControlTicketholder::CurOp stats) {
BSONObjBuilder builder;
if (stats.ticketsAcquired > 0) {
builder.append("acquireCount", stats.ticketsAcquired);
@@ -1362,17 +1689,17 @@ void OpDebug::AdditiveMetrics::report(logv2::DynamicAttributes* pAttrs) const {
BSONObj OpDebug::AdditiveMetrics::reportBSON() const {
BSONObjBuilder b;
- OPDEBUG_APPEND_OPTIONAL("keysExamined", keysExamined);
- OPDEBUG_APPEND_OPTIONAL("docsExamined", docsExamined);
- OPDEBUG_APPEND_OPTIONAL("nMatched", nMatched);
- OPDEBUG_APPEND_OPTIONAL("nModified", nModified);
- OPDEBUG_APPEND_OPTIONAL("ninserted", ninserted);
- OPDEBUG_APPEND_OPTIONAL("ndeleted", ndeleted);
- OPDEBUG_APPEND_OPTIONAL("nUpserted", nUpserted);
- OPDEBUG_APPEND_OPTIONAL("keysInserted", keysInserted);
- OPDEBUG_APPEND_OPTIONAL("keysDeleted", keysDeleted);
- OPDEBUG_APPEND_ATOMIC("prepareReadConflicts", prepareReadConflicts);
- OPDEBUG_APPEND_ATOMIC("writeConflicts", writeConflicts);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysExamined", keysExamined);
+ OPDEBUG_APPEND_OPTIONAL(b, "docsExamined", docsExamined);
+ OPDEBUG_APPEND_OPTIONAL(b, "nMatched", nMatched);
+ OPDEBUG_APPEND_OPTIONAL(b, "nModified", nModified);
+ OPDEBUG_APPEND_OPTIONAL(b, "ninserted", ninserted);
+ OPDEBUG_APPEND_OPTIONAL(b, "ndeleted", ndeleted);
+ OPDEBUG_APPEND_OPTIONAL(b, "nUpserted", nUpserted);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysInserted", keysInserted);
+ OPDEBUG_APPEND_OPTIONAL(b, "keysDeleted", keysDeleted);
+ OPDEBUG_APPEND_ATOMIC(b, "prepareReadConflicts", prepareReadConflicts);
+ OPDEBUG_APPEND_ATOMIC(b, "writeConflicts", writeConflicts);
return b.obj();
}
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 7fe4794b984..6ed03de3040 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -31,10 +31,13 @@
#pragma once
#include "mongo/config.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
#include "mongo/db/cursor_id.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/profile_filter.h"
#include "mongo/db/server_options.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/logv2/attribute_storage.h"
@@ -176,6 +179,10 @@ public:
FlowControlTicketholder::CurOp flowControlStats,
BSONObjBuilder& builder) const;
+ static std::function<BSONObj(ProfileFilter::Args args)> appendStaged(StringSet requestedFields,
+ bool needWholeDocument);
+ static void appendUserInfo(const CurOp&, BSONObjBuilder&, AuthorizationSession*);
+
/**
* Copies relevant plan summary metrics to this OpDebug instance.
*/
@@ -184,7 +191,7 @@ public:
/**
* The resulting object has zeros omitted. As is typical in this file.
*/
- BSONObj makeFlowControlObject(FlowControlTicketholder::CurOp flowControlStats) const;
+ static BSONObj makeFlowControlObject(FlowControlTicketholder::CurOp flowControlStats);
/**
* Make object from $search stats with non-populated values omitted.
@@ -411,19 +418,30 @@ public:
}
/**
+ * Gets the name of the namespace on which the current operation operates.
+ */
+ NamespaceString getNSS() const {
+ return NamespaceString{_ns};
+ }
+
+ /**
* Returns true if the elapsed time of this operation is such that it should be profiled or
* profile level is set to 2. Uses total time if the operation is done, current elapsed time
- * otherwise. The argument shouldSample prevents slow diagnostic logging at profile 1
- * when set to false.
+ * otherwise.
+ *
+ * When a custom filter is set, we conservatively assume it would match this operation.
*/
- bool shouldDBProfile(bool shouldSample = true) {
+ bool shouldDBProfile(OperationContext* opCtx) {
// Profile level 2 should override any sample rate or slowms settings.
if (_dbprofile >= 2)
return true;
- if (!shouldSample || _dbprofile <= 0)
+ if (_dbprofile <= 0)
return false;
+ if (CollectionCatalog::get(opCtx).getDatabaseProfileSettings(getNSS().db()).filter)
+ return true;
+
return elapsedTimeExcludingPauses() >= Milliseconds{serverGlobalParams.slowMS};
}
@@ -720,7 +738,7 @@ public:
void setGenericCursor_inlock(GenericCursor gc);
- const boost::optional<SingleThreadedLockStats> getLockStatsBase() {
+ boost::optional<SingleThreadedLockStats> getLockStatsBase() const {
return _lockStatsBase;
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 31749c46c96..95e8f4843fd 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -54,36 +54,6 @@ using std::endl;
using std::string;
using std::unique_ptr;
-namespace {
-
-void _appendUserInfo(const CurOp& c, BSONObjBuilder& builder, AuthorizationSession* authSession) {
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
-
- UserName bestUser;
- if (nameIter.more())
- bestUser = *nameIter;
-
- std::string opdb(nsToDatabase(c.getNS()));
-
- BSONArrayBuilder allUsers(builder.subarrayStart("allUsers"));
- for (; nameIter.more(); nameIter.next()) {
- BSONObjBuilder nextUser(allUsers.subobjStart());
- nextUser.append(AuthorizationManager::USER_NAME_FIELD_NAME, nameIter->getUser());
- nextUser.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
- nextUser.doneFast();
-
- if (nameIter->getDB() == opdb) {
- bestUser = *nameIter;
- }
- }
- allUsers.doneFast();
-
- builder.append("user", bestUser.getUser().empty() ? "" : bestUser.getFullName());
-}
-
-} // namespace
-
-
void profile(OperationContext* opCtx, NetworkOp op) {
// Initialize with 1kb at start in order to avoid realloc later
BufBuilder profileBufBuilder(1024);
@@ -110,7 +80,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
}
AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
- _appendUserInfo(*CurOp::get(opCtx), b, authSession);
+ OpDebug::appendUserInfo(*CurOp::get(opCtx), b, authSession);
const BSONObj p = b.done();
diff --git a/src/mongo/db/matcher/matcher.h b/src/mongo/db/matcher/matcher.h
index ff7aeaa60dc..8410cd74fb9 100644
--- a/src/mongo/db/matcher/matcher.h
+++ b/src/mongo/db/matcher/matcher.h
@@ -73,6 +73,10 @@ public:
return _expression.get();
}
+ const MatchExpression* getMatchExpression() const {
+ return _expression.get();
+ }
+
private:
BSONObj _pattern;
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 2bd2bfc3865..29d07007956 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -137,10 +137,10 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
// Mark the op as complete, and log it if appropriate. Returns a boolean indicating whether
// this op should be sampled for profiling.
- const bool shouldSample =
+ const bool shouldProfile =
curOp->completeAndLogOperation(opCtx, MONGO_LOGV2_DEFAULT_COMPONENT);
- if (curOp->shouldDBProfile(shouldSample)) {
+ if (shouldProfile) {
// Stash the current transaction so that writes to the profile collection are not
// done as part of the transaction.
TransactionParticipant::SideTransactionBlock sideTxn(opCtx);
@@ -681,7 +681,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summary);
}
- if (curOp.shouldDBProfile()) {
+ if (curOp.shouldDBProfile(opCtx)) {
curOp.debug().execStats = exec->getStats();
}
@@ -920,7 +920,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
}
curOp.debug().setPlanSummaryMetrics(summary);
- if (curOp.shouldDBProfile()) {
+ if (curOp.shouldDBProfile(opCtx)) {
curOp.debug().execStats = exec->getStats();
}
diff --git a/src/mongo/db/profile_filter.cpp b/src/mongo/db/profile_filter.cpp
new file mode 100644
index 00000000000..30ae469e704
--- /dev/null
+++ b/src/mongo/db/profile_filter.cpp
@@ -0,0 +1,44 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/profile_filter.h"
+
+namespace mongo {
+
+static std::shared_ptr<ProfileFilter> defaultProfileFilter;
+
+std::shared_ptr<ProfileFilter> ProfileFilter::getDefault() {
+ return defaultProfileFilter;
+}
+
+void ProfileFilter::setDefault(std::shared_ptr<ProfileFilter> filter) {
+ defaultProfileFilter = std::move(filter);
+}
+
+}; // namespace mongo
diff --git a/src/mongo/db/profile_filter.h b/src/mongo/db/profile_filter.h
new file mode 100644
index 00000000000..1702f24a734
--- /dev/null
+++ b/src/mongo/db/profile_filter.h
@@ -0,0 +1,62 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+
+#pragma once
+
+#include "mongo/bson/bsonobj.h"
+
+namespace mongo {
+
+class CurOp;
+class OpDebug;
+class OperationContext;
+
+class ProfileFilter {
+public:
+ struct Args {
+ Args(OperationContext* opCtx, const OpDebug& op, const CurOp& curop)
+ : opCtx(opCtx), op(op), curop(curop) {}
+
+ OperationContext* opCtx;
+ const OpDebug& op;
+ const CurOp& curop;
+ };
+
+ virtual bool matches(OperationContext*, const OpDebug&, const CurOp&) const = 0;
+ virtual BSONObj serialize() const = 0;
+ virtual ~ProfileFilter() = default;
+
+ static std::shared_ptr<ProfileFilter> getDefault();
+
+ // Not thread-safe: should only be called during initialization.
+ static void setDefault(std::shared_ptr<ProfileFilter>);
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/profile_filter_impl.cpp b/src/mongo/db/profile_filter_impl.cpp
new file mode 100644
index 00000000000..92c44a916a4
--- /dev/null
+++ b/src/mongo/db/profile_filter_impl.cpp
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery
+
+#include "mongo/logv2/log.h"
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/profile_filter_impl.h"
+
+namespace mongo {
+
+namespace {
+boost::intrusive_ptr<ExpressionContext> makeExpCtx() {
+ // The namespace string can't be database-specific, because the profile filter applies to all
+ // databases in the process.
+ // Similar to collection validators, it's not safe to share an opCtx in a stored query.
+ return make_intrusive<ExpressionContext>(nullptr, nullptr, NamespaceString{});
+}
+} // namespace
+
+ProfileFilterImpl::ProfileFilterImpl(BSONObj expr) : _matcher(expr.getOwned(), makeExpCtx()) {
+ DepsTracker deps;
+ _matcher.getMatchExpression()->addDependencies(&deps);
+ uassert(4910201,
+ "Profile filter is not allowed to depend on metadata",
+ !deps.getNeedsAnyMetadata());
+
+ // Reduce the DepsTracker down to a set of top-level fields.
+ StringSet toplevelFields;
+ for (auto&& field : deps.fields) {
+ toplevelFields.emplace(FieldPath(std::move(field)).front());
+ }
+
+ // Remember a list of functions we'll call whenever we need to build BSON from CurOp.
+ _makeBSON = OpDebug::appendStaged(toplevelFields, deps.needWholeDocument);
+}
+
+bool ProfileFilterImpl::matches(OperationContext* opCtx,
+ const OpDebug& op,
+ const CurOp& curop) const {
+ try {
+ return _matcher.matches(_makeBSON({opCtx, op, curop}));
+ } catch (const DBException& e) {
+ LOGV2_DEBUG(4910202, 5, "Profile filter threw an exception", "exception"_attr = e);
+ return false;
+ }
+}
+
+// PathlessOperatorMap is required for parsing a MatchExpression.
+MONGO_INITIALIZER_GENERAL(ProfileFilterDefault,
+ ("PathlessOperatorMap", "MatchExpressionParser"),
+ ())
+(InitializerContext*) {
+ try {
+ if (auto expr = serverGlobalParams.defaultProfileFilter) {
+ ProfileFilter::setDefault(std::make_shared<ProfileFilterImpl>(*expr));
+ }
+ return Status::OK();
+ } catch (AssertionException& e) {
+ // Add more context to the error
+ uasserted(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse option operationProfiling.filter: "
+ << e.reason());
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/profile_filter_impl.h b/src/mongo/db/profile_filter_impl.h
new file mode 100644
index 00000000000..b37108af155
--- /dev/null
+++ b/src/mongo/db/profile_filter_impl.h
@@ -0,0 +1,52 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/profile_filter.h"
+
+#include "mongo/db/curop.h"
+#include "mongo/db/matcher/matcher.h"
+
+namespace mongo {
+
+class ProfileFilterImpl final : public ProfileFilter {
+public:
+ ProfileFilterImpl(BSONObj expr);
+ bool matches(OperationContext* opCtx, const OpDebug& op, const CurOp& curop) const override;
+ BSONObj serialize() const override {
+ return _matcher.getMatchExpression()->serialize();
+ }
+
+private:
+ Matcher _matcher;
+ std::function<BSONObj(ProfileFilter::Args)> _makeBSON;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index d16460eb3b9..38c82127276 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -140,7 +140,7 @@ void endQueryOp(OperationContext* opCtx,
CollectionQueryInfo::get(collection).notifyOfQuery(opCtx, collection, summaryStats);
}
- if (curOp->shouldDBProfile()) {
+ if (curOp->shouldDBProfile(opCtx)) {
curOp->debug().execStats = exec.getStats();
}
}
@@ -488,7 +488,7 @@ Message getMore(OperationContext* opCtx,
// need 'execStats' and we do not want to generate the stats eagerly for all operations due to
// cost.
if (cursorPin->getExecutor()->lockPolicy() != PlanExecutor::LockPolicy::kLocksInternally &&
- curOp.shouldDBProfile()) {
+ curOp.shouldDBProfile(opCtx)) {
curOp.debug().execStats = exec->getStats();
}
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 057133d2124..dcdc09967bb 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -71,7 +71,8 @@ struct ServerGlobalParams {
bool objcheck = true; // --objcheck
- int defaultProfile = 0; // --profile
+ int defaultProfile = 0; // --profile
+ boost::optional<BSONObj> defaultProfileFilter;
int slowMS = 100; // --time in ms that is "slow"
double sampleRate = 1.0; // --samplerate rate at which to sample slow queries
int defaultLocalThresholdMillis = 15; // --localThreshold in ms to consider a node local
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 5a6ea0d7917..4dcffc0357f 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -43,6 +43,7 @@
#include <iostream>
#include "mongo/base/status.h"
+#include "mongo/bson/json.h"
#include "mongo/bson/util/builder.h"
#include "mongo/config.h"
#include "mongo/db/server_options.h"
@@ -447,6 +448,18 @@ Status storeBaseOptions(const moe::Environment& params) {
serverGlobalParams.sampleRate = params["operationProfiling.slowOpSampleRate"].as<double>();
}
+ if (params.count("operationProfiling.filter")) {
+ try {
+ serverGlobalParams.defaultProfileFilter =
+ fromjson(params["operationProfiling.filter"].as<std::string>()).getOwned();
+ } catch (AssertionException& e) {
+ // Add more context to the error
+ uasserted(ErrorCodes::FailedToParse,
+ str::stream()
+ << "Failed to parse option operationProfiling.filter: " << e.reason());
+ }
+ }
+
return Status::OK();
}
diff --git a/src/mongo/db/server_options_nongeneral.idl b/src/mongo/db/server_options_nongeneral.idl
index 72d4108aa89..ffbd0432cda 100644
--- a/src/mongo/db/server_options_nongeneral.idl
+++ b/src/mongo/db/server_options_nongeneral.idl
@@ -77,3 +77,7 @@ configs:
short_name: slowOpSampleRate
arg_vartype: Double
default: 1.0
+ 'operationProfiling.filter':
+ description: 'Query predicate to control which operations are logged and profiled'
+ short_name: profileFilter
+ arg_vartype: String
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index b4ff930e95c..6e15fb29bff 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -1794,8 +1794,8 @@ Future<DbResponse> ServiceEntryPointCommon::handleRequest(OperationContext* opCt
}
// Mark the op as complete, and log it if appropriate. Returns a boolean indicating whether
- // this op should be sampled for profiling.
- const bool shouldSample = currentOp.completeAndLogOperation(
+ // this op should be written to the profiler.
+ const bool shouldProfile = currentOp.completeAndLogOperation(
opCtx, MONGO_LOGV2_DEFAULT_COMPONENT, dbresponse.response.size(), slowMsOverride, forceLog);
Top::get(opCtx->getServiceContext())
@@ -1804,7 +1804,7 @@ Future<DbResponse> ServiceEntryPointCommon::handleRequest(OperationContext* opCt
durationCount<Microseconds>(currentOp.elapsedTimeExcludingPauses()),
currentOp.getReadWriteType());
- if (currentOp.shouldDBProfile(shouldSample)) {
+ if (shouldProfile) {
// Performance profiling is on
if (opCtx->lockState()->isReadLocked()) {
LOGV2_DEBUG(21970, 1, "Note: not profiling because of recursive read lock");
diff --git a/src/mongo/s/commands/cluster_profile_cmd.cpp b/src/mongo/s/commands/cluster_profile_cmd.cpp
index 42524e6a3ca..299e4af3817 100644
--- a/src/mongo/s/commands/cluster_profile_cmd.cpp
+++ b/src/mongo/s/commands/cluster_profile_cmd.cpp
@@ -29,8 +29,11 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/profile_common.h"
+#include "mongo/db/commands/profile_gen.h"
+#include "mongo/db/profile_filter_impl.h"
namespace mongo {
namespace {
@@ -39,26 +42,41 @@ class ProfileCmd : public ProfileCmdBase {
public:
ProfileCmd() = default;
- // On mongoS, the 'profile' command is only used to change the global 'slowms' and 'sampleRate'
- // parameters. Since it does not apply to any specific database but rather the mongoS as a
- // whole, we require that it be run on the 'admin' database.
+ // Although mongoS does not have a system.profile collection, the profile command can change the
+ // per-database profile filter, which applies to slow-query log lines just like on mongoD.
bool adminOnly() const final {
- return true;
+ return false;
}
protected:
- int _applyProfilingLevel(OperationContext* opCtx,
- const std::string& dbName,
- int profilingLevel) const final {
- // Because mongoS does not allow profiling, but only uses the 'profile' command to change
- // 'slowms' and 'sampleRate' for logging purposes, we do not apply the profiling level here.
- // Instead, we validate that the user is not attempting to set a "real" profiling level.
+ CollectionCatalog::ProfileSettings _applyProfilingLevel(
+ OperationContext* opCtx,
+ const std::string& dbName,
+ const ProfileCmdRequest& request) const final {
+ const auto profilingLevel = request.getCommandParameter();
+
+ // The only valid profiling level for mongoS is 0, because mongoS has no system.profile
+ // collection in which to record the profiling data (because mongoS has no collections
+ // at all).
uassert(ErrorCodes::BadValue,
"Profiling is not permitted on mongoS: the 'profile' field should be 0 to change "
- "'slowms' and 'sampleRate' settings for logging, or -1 to view current values",
+ "'slowms', 'sampleRate', or 'filter' settings for logging, or -1 to view current "
+ "values",
profilingLevel == -1 || profilingLevel == 0);
- return 0;
+ const auto oldSettings = CollectionCatalog::get(opCtx).getDatabaseProfileSettings(dbName);
+
+ if (auto filterOrUnset = request.getFilter()) {
+ auto newSettings = oldSettings;
+ if (auto filter = filterOrUnset->obj) {
+ newSettings.filter = std::make_shared<ProfileFilterImpl>(*filter);
+ } else {
+ newSettings.filter = nullptr;
+ }
+ CollectionCatalog::get(opCtx).setDatabaseProfileSettings(dbName, newSettings);
+ }
+
+ return oldSettings;
}
} profileCmd;