summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2019-01-18 13:35:37 -0500
committerTess Avitabile <tess.avitabile@mongodb.com>2019-01-22 12:55:51 -0500
commit0d47ebf63cabee3c2ef84ab83dfefe597ca626ec (patch)
tree868cd3ed0baa46773cee57ff3e63fad91cf34368
parentcb25e79a848592d55677afca854561e1bcebda39 (diff)
downloadmongo-0d47ebf63cabee3c2ef84ab83dfefe597ca626ec.tar.gz
SERVER-38998 Create serverStatus metrics for writeConcern
(cherry picked from commit 9de1d61550232f370afa1b4f98bfe6aa7e2cf60f)
-rw-r--r--jstests/noPassthrough/server_write_concern_metrics.js212
-rw-r--r--src/mongo/db/SConscript3
-rw-r--r--src/mongo/db/commands/SConscript1
-rw-r--r--src/mongo/db/commands/find_cmd.cpp2
-rw-r--r--src/mongo/db/ops/SConscript1
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp9
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/repl/oplog.cpp28
-rw-r--r--src/mongo/db/service_entry_point_common.cpp2
-rw-r--r--src/mongo/db/stats/SConscript16
-rw-r--r--src/mongo/db/stats/read_concern_stats.idl (renamed from src/mongo/db/read_concern_stats.idl)0
-rw-r--r--src/mongo/db/stats/server_read_concern_metrics.cpp (renamed from src/mongo/db/server_read_concern_metrics.cpp)3
-rw-r--r--src/mongo/db/stats/server_read_concern_metrics.h (renamed from src/mongo/db/server_read_concern_metrics.h)2
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.cpp131
-rw-r--r--src/mongo/db/stats/server_write_concern_metrics.h111
-rw-r--r--src/mongo/db/write_concern_options.cpp3
-rw-r--r--src/mongo/db/write_concern_options.h3
17 files changed, 517 insertions, 11 deletions
diff --git a/jstests/noPassthrough/server_write_concern_metrics.js b/jstests/noPassthrough/server_write_concern_metrics.js
new file mode 100644
index 00000000000..83ab7e8dd38
--- /dev/null
+++ b/jstests/noPassthrough/server_write_concern_metrics.js
@@ -0,0 +1,212 @@
+// Tests writeConcern metrics in the serverStatus output.
+// @tags: [requires_persistence, requires_replication]
+(function() {
+ "use strict";
+
+ // Verifies that the server status response has the fields that we expect.
+ function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("opWriteConcernCounters"),
+ "Expected the serverStatus response to have a 'opWriteConcernCounters' field\n" +
+ tojson(serverStatusResponse));
+ assert(
+ serverStatusResponse.opWriteConcernCounters.hasOwnProperty("insert"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'insert' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(
+ serverStatusResponse.opWriteConcernCounters.hasOwnProperty("update"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'update' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(
+ serverStatusResponse.opWriteConcernCounters.hasOwnProperty("delete"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'delete' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ }
+
+ // Verifies that the given path of the server status response is incremented in the way we
+ // expect, and no other changes occurred. This function modifies its inputs.
+ function verifyServerStatusChange(initialStats, newStats, path, expectedIncrement) {
+ // Traverse to the parent of the changed element.
+ let pathComponents = path.split(".");
+ let initialParent = initialStats;
+ let newParent = newStats;
+ for (let i = 0; i < pathComponents.length - 1; i++) {
+ assert(initialParent.hasOwnProperty(pathComponents[i]),
+ "initialStats did not contain component " + i + " of path " + path +
+ ", initialStats: " + tojson(initialStats));
+ initialParent = initialParent[pathComponents[i]];
+
+ assert(newParent.hasOwnProperty(pathComponents[i]),
+ "newStats did not contain component " + i + " of path " + path + ", newStats: " +
+ tojson(newStats));
+ newParent = newParent[pathComponents[i]];
+ }
+
+ // Test the expected increment of the changed element. The element may not exist in the
+ // initial stats, in which case it is treated as 0.
+ let lastPathComponent = pathComponents[pathComponents.length - 1];
+ let initialValue = 0;
+ if (initialParent.hasOwnProperty(lastPathComponent)) {
+ initialValue = initialParent[lastPathComponent];
+ }
+ assert(newParent.hasOwnProperty(lastPathComponent),
+ "newStats did not contain last component of path " + path + ", newStats: " +
+ tojson(newStats));
+ assert.eq(initialValue + expectedIncrement,
+ newParent[lastPathComponent],
+ "expected " + path + " to increase by " + expectedIncrement + ", initialStats: " +
+ tojson(initialStats) + ", newStats: " + tojson(newStats));
+
+ // Delete the changed element.
+ delete initialParent[lastPathComponent];
+ delete newParent[lastPathComponent];
+
+ // The stats objects should be equal without the changed element.
+ assert.eq(0,
+ bsonWoCompare(initialStats, newStats),
+ "expected initialStats and newStats to be equal after removing " + path +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " +
+ tojson(newStats));
+ }
+
+ const rst = new ReplSetTest({nodes: 2});
+ rst.startSet();
+ let config = rst.getReplSetConfig();
+ config.members[1].priority = 0;
+ config.members[0].tags = {dc_va: "rack1"};
+ config.settings = {getLastErrorModes: {myTag: {dc_va: 1}}};
+ rst.initiate(config);
+ const primary = rst.getPrimary();
+ const secondary = rst.getSecondary();
+ const dbName = "test";
+ const collName = "server_write_concern_metrics";
+ const testDB = primary.getDB(dbName);
+ const testColl = testDB[collName];
+
+ function resetCollection() {
+ testColl.drop();
+ assert.commandWorked(testDB.createCollection(collName));
+ }
+
+ function testWriteConcernMetrics(cmd, opName, inc) {
+ // Run command with no writeConcern.
+ resetCollection();
+ let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {j: true}. This should be counted as having no 'w' value.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {j: true}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {w: "majority"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(
+ Object.assign(Object.assign({}, cmd), {writeConcern: {w: "majority"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wmajority",
+ inc);
+
+ // Run command with writeConcern {w: 0}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 0}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.0",
+ inc);
+
+ // Run command with writeConcern {w: 1}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 1}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.1",
+ inc);
+
+ // Run command with writeConcern {w: 2}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 2}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.2",
+ inc);
+
+ // Run command with writeConcern {w: "myTag"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "myTag"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wtag.myTag",
+ inc);
+
+ // writeConcern metrics are not tracked on the secondary.
+ resetCollection();
+ serverStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ newStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ assert.eq(
+ 0,
+ bsonWoCompare(serverStatus.opWriteConcernCounters, newStatus.opWriteConcernCounters),
+ "expected no change in secondary writeConcern metrics, before: " +
+ tojson(serverStatus) + ", after: " + tojson(newStatus));
+ }
+
+ // Test single insert/update/delete.
+ testWriteConcernMetrics({insert: collName, documents: [{}]}, "insert", 1);
+ testWriteConcernMetrics({update: collName, updates: [{q: {}, u: {$set: {a: 1}}}]}, "update", 1);
+ testWriteConcernMetrics({delete: collName, deletes: [{q: {}, limit: 1}]}, "delete", 1);
+
+ // Test batch writes.
+ testWriteConcernMetrics({insert: collName, documents: [{}, {}]}, "insert", 2);
+ testWriteConcernMetrics(
+ {update: collName, updates: [{q: {}, u: {$set: {a: 1}}}, {q: {}, u: {$set: {a: 1}}}]},
+ "update",
+ 2);
+ testWriteConcernMetrics(
+ {delete: collName, deletes: [{q: {}, limit: 1}, {q: {}, limit: 1}]}, "delete", 2);
+
+ // Test applyOps.
+ testWriteConcernMetrics(
+ {applyOps: [{op: "i", ns: testColl.getFullName(), o: {_id: 0}}]}, "insert", 1);
+ testWriteConcernMetrics(
+ {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 1}}}]},
+ "update",
+ 1);
+ testWriteConcernMetrics(
+ {applyOps: [{op: "d", ns: testColl.getFullName(), o: {_id: 0}}]}, "delete", 1);
+
+ rst.stopSet();
+}());
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 27f6d1a47ba..e2c297186d5 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -621,14 +621,12 @@ env.Library(
source=[
'catalog_raii.cpp',
'retryable_writes_stats.cpp',
- 'server_read_concern_metrics.cpp',
'server_transactions_metrics.cpp',
'session.cpp',
'session_catalog.cpp',
'single_transaction_stats.cpp',
'transaction_history_iterator.cpp',
env.Idlc('session_txn_record.idl')[0],
- env.Idlc('read_concern_stats.idl')[0],
env.Idlc('transactions_stats.idl')[0],
],
LIBDEPS=[
@@ -793,6 +791,7 @@ env.Library(
'$BUILD_DIR/mongo/db/rw_concern_d',
'$BUILD_DIR/mongo/db/s/sharding_api_d',
'$BUILD_DIR/mongo/db/stats/counters',
+ '$BUILD_DIR/mongo/db/stats/server_read_concern_write_concern_metrics',
'$BUILD_DIR/mongo/db/stats/top',
'$BUILD_DIR/mongo/db/storage/storage_engine_lock_file',
'$BUILD_DIR/mongo/db/storage/storage_engine_metadata',
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index d6c6f145113..0d8dac60721 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -258,6 +258,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repair_database',
'$BUILD_DIR/mongo/db/rw_concern_d',
'$BUILD_DIR/mongo/db/stats/counters',
+ '$BUILD_DIR/mongo/db/stats/server_read_concern_write_concern_metrics',
'$BUILD_DIR/mongo/db/storage/storage_engine_common',
'$BUILD_DIR/mongo/db/views/views_mongod',
'core',
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 8e97c42ebbd..ce03670ef12 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -49,10 +49,10 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/server_parameters.h"
-#include "mongo/db/server_read_concern_metrics.h"
#include "mongo/db/service_context.h"
#include "mongo/db/session_catalog.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/server_read_concern_metrics.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript
index b7428b32608..d87720f3047 100644
--- a/src/mongo/db/ops/SConscript
+++ b/src/mongo/db/ops/SConscript
@@ -17,6 +17,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/oplog',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/stats/counters',
+ '$BUILD_DIR/mongo/db/stats/server_read_concern_write_concern_metrics',
'$BUILD_DIR/mongo/db/write_ops',
'$BUILD_DIR/mongo/util/fail_point',
],
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index a5c7e24cebf..d4599a280b0 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -69,6 +69,7 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/session_catalog.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/server_write_concern_metrics.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/write_concern.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -412,6 +413,8 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
opCtx, collection->getCollection(), batch.begin(), batch.end(), fromMigrate);
lastOpFixer->finishedOpSuccessfully();
globalOpCounters.gotInserts(batch.size());
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInserts(
+ opCtx->getWriteConcern(), batch.size());
SingleWriteResult result;
result.setN(1);
@@ -436,6 +439,8 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
// for batches that failed all-at-once inserting.
for (auto it = batch.begin(); it != batch.end(); ++it) {
globalOpCounters.gotInsert();
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert(
+ opCtx->getWriteConcern());
try {
writeConflictRetry(opCtx, "insert", wholeOp.getNamespace().ns(), [&] {
try {
@@ -573,6 +578,8 @@ WriteResult performInserts(OperationContext* opCtx,
if (canContinue && !fixedDoc.isOK()) {
globalOpCounters.gotInsert();
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert(
+ opCtx->getWriteConcern());
try {
uassertStatusOK(fixedDoc.getStatus());
MONGO_UNREACHABLE;
@@ -600,6 +607,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
!opCtx->getTxnNumber() || !op.getMulti());
globalOpCounters.gotUpdate();
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForUpdate(opCtx->getWriteConcern());
auto& curOp = *CurOp::get(opCtx);
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
@@ -761,6 +769,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
!opCtx->getTxnNumber() || !op.getMulti());
globalOpCounters.gotDelete();
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForDelete(opCtx->getWriteConcern());
auto& curOp = *CurOp::get(opCtx);
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 87fbc277dab..6a569f6dfce 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -26,6 +26,7 @@ env.Library(
'$BUILD_DIR/mongo/db/index_d',
'$BUILD_DIR/mongo/db/op_observer',
'$BUILD_DIR/mongo/db/stats/counters',
+ '$BUILD_DIR/mongo/db/stats/server_read_concern_write_concern_metrics',
'$BUILD_DIR/mongo/idl/idl_parser',
],
)
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 9f90ab6e83c..736bc460dec 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -87,6 +87,7 @@
#include "mongo/db/service_context.h"
#include "mongo/db/session_catalog.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/server_write_concern_metrics.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/platform/random.h"
@@ -246,6 +247,10 @@ void createIndexForApplyOps(OperationContext* opCtx,
OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
opCounters->gotInsert();
+ if (opCtx->writesAreReplicated()) {
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert(
+ opCtx->getWriteConcern());
+ }
const IndexBuilder::IndexConstraints constraints =
ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss)
@@ -1065,10 +1070,9 @@ Status applyOperation_inlock(OperationContext* opCtx,
// Choose opCounters based on running on standalone/primary or secondary by checking
// whether writes are replicated. Atomic applyOps command is an exception, which runs
// on primary/standalone but disables write replication.
- OpCounters* opCounters =
- (mode == repl::OplogApplication::Mode::kApplyOpsCmd || opCtx->writesAreReplicated())
- ? &globalOpCounters
- : &replOpCounters;
+ const bool shouldUseGlobalOpCounters =
+ mode == repl::OplogApplication::Mode::kApplyOpsCmd || opCtx->writesAreReplicated();
+ OpCounters* opCounters = shouldUseGlobalOpCounters ? &globalOpCounters : &replOpCounters;
std::array<StringData, 8> names = {"ts", "t", "o", "ui", "ns", "op", "b", "o2"};
std::array<BSONElement, 8> fields;
@@ -1289,6 +1293,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
wuow.commit();
for (auto entry : insertObjs) {
opCounters->gotInsert();
+ if (shouldUseGlobalOpCounters) {
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert(
+ opCtx->getWriteConcern());
+ }
if (incrementOpsAppliedStats) {
incrementOpsAppliedStats();
}
@@ -1296,6 +1304,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
} else {
// Single insert.
opCounters->gotInsert();
+ if (shouldUseGlobalOpCounters) {
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert(
+ opCtx->getWriteConcern());
+ }
// No _id.
// This indicates an issue with the upstream server:
@@ -1395,6 +1407,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
} else if (*opType == 'u') {
opCounters->gotUpdate();
+ if (shouldUseGlobalOpCounters) {
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForUpdate(
+ opCtx->getWriteConcern());
+ }
auto idField = o2["_id"];
uassert(ErrorCodes::NoSuchKey,
@@ -1480,6 +1496,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
}
} else if (*opType == 'd') {
opCounters->gotDelete();
+ if (shouldUseGlobalOpCounters) {
+ ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForDelete(
+ opCtx->getWriteConcern());
+ }
auto idField = o["_id"];
uassert(ErrorCodes::NoSuchKey,
diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp
index e6b2df157bb..c0e0541ae17 100644
--- a/src/mongo/db/service_entry_point_common.cpp
+++ b/src/mongo/db/service_entry_point_common.cpp
@@ -68,10 +68,10 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/server_read_concern_metrics.h"
#include "mongo/db/service_entry_point_common.h"
#include "mongo/db/session_catalog.h"
#include "mongo/db/stats/counters.h"
+#include "mongo/db/stats/server_read_concern_metrics.h"
#include "mongo/db/stats/top.h"
#include "mongo/rpc/factory.h"
#include "mongo/rpc/get_status_from_command_result.h"
diff --git a/src/mongo/db/stats/SConscript b/src/mongo/db/stats/SConscript
index e8bce249241..6d2c333bf7e 100644
--- a/src/mongo/db/stats/SConscript
+++ b/src/mongo/db/stats/SConscript
@@ -68,6 +68,22 @@ env.Library(
)
env.Library(
+ target='server_read_concern_write_concern_metrics',
+ source=[
+ 'server_read_concern_metrics.cpp',
+ 'server_write_concern_metrics.cpp',
+ env.Idlc('read_concern_stats.idl')[0],
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/commands/server_status',
+ '$BUILD_DIR/mongo/db/repl/read_concern_args',
+ '$BUILD_DIR/mongo/db/write_concern_options',
+ '$BUILD_DIR/mongo/idl/idl_parser',
+ ],
+)
+
+env.Library(
target='fill_locker_info',
source=[
'fill_locker_info.cpp',
diff --git a/src/mongo/db/read_concern_stats.idl b/src/mongo/db/stats/read_concern_stats.idl
index 60636cf0c08..60636cf0c08 100644
--- a/src/mongo/db/read_concern_stats.idl
+++ b/src/mongo/db/stats/read_concern_stats.idl
diff --git a/src/mongo/db/server_read_concern_metrics.cpp b/src/mongo/db/stats/server_read_concern_metrics.cpp
index 76b2a987442..97e77079140 100644
--- a/src/mongo/db/server_read_concern_metrics.cpp
+++ b/src/mongo/db/stats/server_read_concern_metrics.cpp
@@ -29,12 +29,11 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/server_read_concern_metrics.h"
+#include "mongo/db/stats/server_read_concern_metrics.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/read_concern_stats_gen.h"
#include "mongo/db/service_context.h"
namespace mongo {
diff --git a/src/mongo/db/server_read_concern_metrics.h b/src/mongo/db/stats/server_read_concern_metrics.h
index 53644e08d78..a79a7cea49e 100644
--- a/src/mongo/db/server_read_concern_metrics.h
+++ b/src/mongo/db/stats/server_read_concern_metrics.h
@@ -31,9 +31,9 @@
#pragma once
#include "mongo/db/operation_context.h"
-#include "mongo/db/read_concern_stats_gen.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/stats/read_concern_stats_gen.h"
namespace mongo {
diff --git a/src/mongo/db/stats/server_write_concern_metrics.cpp b/src/mongo/db/stats/server_write_concern_metrics.cpp
new file mode 100644
index 00000000000..3bcf1fe99af
--- /dev/null
+++ b/src/mongo/db/stats/server_write_concern_metrics.cpp
@@ -0,0 +1,131 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/stats/server_write_concern_metrics.h"
+
+#include "mongo/db/commands/server_status.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
+
+namespace mongo {
+namespace {
+const auto ServerWriteConcernMetricsDecoration =
+ ServiceContext::declareDecoration<ServerWriteConcernMetrics>();
+} // namespace
+
+ServerWriteConcernMetrics* ServerWriteConcernMetrics::get(ServiceContext* service) {
+ return &ServerWriteConcernMetricsDecoration(service);
+}
+
+ServerWriteConcernMetrics* ServerWriteConcernMetrics::get(OperationContext* opCtx) {
+ return get(opCtx->getServiceContext());
+}
+
+BSONObj ServerWriteConcernMetrics::toBSON() const {
+ stdx::lock_guard<stdx::mutex> lg(_mutex);
+
+ BSONObjBuilder builder;
+
+ BSONObjBuilder insertBuilder(builder.subobjStart("insert"));
+ _insertMetrics.toBSON(&insertBuilder);
+ insertBuilder.done();
+
+ BSONObjBuilder updateBuilder(builder.subobjStart("update"));
+ _updateMetrics.toBSON(&updateBuilder);
+ updateBuilder.done();
+
+ BSONObjBuilder deleteBuilder(builder.subobjStart("delete"));
+ _deleteMetrics.toBSON(&deleteBuilder);
+ deleteBuilder.done();
+
+ return builder.obj();
+}
+
+void ServerWriteConcernMetrics::WriteConcernMetricsForOperationType::recordWriteConcern(
+ const WriteConcernOptions& writeConcernOptions, size_t numOps) {
+ if (writeConcernOptions.usedDefaultW) {
+ noWCount += numOps;
+ return;
+ }
+
+ if (!writeConcernOptions.wMode.empty()) {
+ if (writeConcernOptions.wMode == WriteConcernOptions::kMajority) {
+ wMajorityCount += numOps;
+ return;
+ }
+
+ wTagCounts[writeConcernOptions.wMode] += numOps;
+ return;
+ }
+
+ wNumCounts[writeConcernOptions.wNumNodes] += numOps;
+}
+
+void ServerWriteConcernMetrics::WriteConcernMetricsForOperationType::toBSON(
+ BSONObjBuilder* builder) const {
+ builder->append("wmajority", wMajorityCount);
+
+ BSONObjBuilder wNumBuilder(builder->subobjStart("wnum"));
+ for (auto const& pair : wNumCounts) {
+ wNumBuilder.append(std::to_string(pair.first), pair.second);
+ }
+ wNumBuilder.done();
+
+ BSONObjBuilder wTagBuilder(builder->subobjStart("wtag"));
+ for (auto const& pair : wTagCounts) {
+ wTagBuilder.append(pair.first, pair.second);
+ }
+ wTagBuilder.done();
+
+ builder->append("none", noWCount);
+}
+
+namespace {
+class OpWriteConcernCountersSSS : public ServerStatusSection {
+public:
+ OpWriteConcernCountersSSS() : ServerStatusSection("opWriteConcernCounters") {}
+
+ ~OpWriteConcernCountersSSS() override = default;
+
+ bool includeByDefault() const override {
+ return true;
+ }
+
+ BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const override {
+ return ServerWriteConcernMetrics::get(opCtx)->toBSON();
+ }
+
+} opWriteConcernCountersSSS;
+} // namespace
+
+} // namespace mongo
diff --git a/src/mongo/db/stats/server_write_concern_metrics.h b/src/mongo/db/stats/server_write_concern_metrics.h
new file mode 100644
index 00000000000..355a63bff90
--- /dev/null
+++ b/src/mongo/db/stats/server_write_concern_metrics.h
@@ -0,0 +1,111 @@
+
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/write_concern_options.h"
+
+namespace mongo {
+
+/**
+ * Container for server-wide statistics on writeConcern levels used by operations.
+ */
+class ServerWriteConcernMetrics {
+ MONGO_DISALLOW_COPYING(ServerWriteConcernMetrics);
+
+public:
+ ServerWriteConcernMetrics() = default;
+
+ static ServerWriteConcernMetrics* get(ServiceContext* service);
+ static ServerWriteConcernMetrics* get(OperationContext* opCtx);
+
+ /**
+ * Updates the insert metrics 'numInserts' times according to the 'w' value of
+ * 'writeConcernOptions'.
+ */
+ void recordWriteConcernForInserts(const WriteConcernOptions& writeConcernOptions,
+ size_t numInserts) {
+ _insertMetrics.recordWriteConcern(writeConcernOptions, numInserts);
+ }
+
+ /**
+ * Updates the insert metrics according to the 'w' value of 'writeConcernOptions'.
+ */
+ void recordWriteConcernForInsert(const WriteConcernOptions& writeConcernOptions) {
+ recordWriteConcernForInserts(writeConcernOptions, 1);
+ }
+
+ /**
+ * Updates the update metrics according to the 'w' value of 'writeConcernOptions'.
+ */
+ void recordWriteConcernForUpdate(const WriteConcernOptions& writeConcernOptions) {
+ _updateMetrics.recordWriteConcern(writeConcernOptions);
+ }
+
+ /**
+ * Updates the delete metrics according to the 'w' value of 'writeConcernOptions'.
+ */
+ void recordWriteConcernForDelete(const WriteConcernOptions& writeConcernOptions) {
+ _deleteMetrics.recordWriteConcern(writeConcernOptions);
+ }
+
+ BSONObj toBSON() const;
+
+private:
+ struct WriteConcernMetricsForOperationType {
+ /**
+ * Updates counter for the 'w' value of 'writeConcernOptions'.
+ */
+ void recordWriteConcern(const WriteConcernOptions& writeConcernOptions, size_t numOps = 1);
+
+ void toBSON(BSONObjBuilder* builder) const;
+
+ // Count of operations with writeConcern w:"majority".
+ long long wMajorityCount = 0;
+
+ // Count of operations without a writeConcern "w" value.
+ long long noWCount = 0;
+
+ // Counts of operations with writeConcern w:<num>.
+ std::map<int, long long> wNumCounts;
+
+ // Counts of operations with writeConcern w:"tag".
+ StringMap<long long> wTagCounts;
+ };
+
+ mutable stdx::mutex _mutex;
+ WriteConcernMetricsForOperationType _insertMetrics;
+ WriteConcernMetricsForOperationType _updateMetrics;
+ WriteConcernMetricsForOperationType _deleteMetrics;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/write_concern_options.cpp b/src/mongo/db/write_concern_options.cpp
index 060e50e84ac..9a8d7200c55 100644
--- a/src/mongo/db/write_concern_options.cpp
+++ b/src/mongo/db/write_concern_options.cpp
@@ -147,8 +147,10 @@ Status WriteConcernOptions::parse(const BSONObj& obj) {
if (wEl.isNumber()) {
wNumNodes = wEl.numberInt();
+ usedDefaultW = false;
} else if (wEl.type() == String) {
wMode = wEl.valuestrsafe();
+ usedDefaultW = false;
} else if (wEl.eoo() || wEl.type() == jstNULL || wEl.type() == Undefined) {
wNumNodes = 1;
} else {
@@ -162,6 +164,7 @@ StatusWith<WriteConcernOptions> WriteConcernOptions::extractWCFromCommand(
const BSONObj& cmdObj, const WriteConcernOptions& defaultWC) {
WriteConcernOptions writeConcern = defaultWC;
writeConcern.usedDefault = true;
+ writeConcern.usedDefaultW = true;
if (writeConcern.wNumNodes == 0 && writeConcern.wMode.empty()) {
writeConcern.wNumNodes = 1;
}
diff --git a/src/mongo/db/write_concern_options.h b/src/mongo/db/write_concern_options.h
index 80d85d305dc..f4b7a1278cb 100644
--- a/src/mongo/db/write_concern_options.h
+++ b/src/mongo/db/write_concern_options.h
@@ -111,6 +111,9 @@ public:
// True if the default write concern was used.
bool usedDefault = false;
+
+ // True if the default 'w' value of w:1 was used.
+ bool usedDefaultW = false;
};
} // namespace mongo