summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/noPassthrough/libs/user_write_blocking.js46
-rw-r--r--jstests/noPassthrough/set_user_write_block_mode.js206
-rw-r--r--jstests/noPassthrough/user_write_blocking_ttl_index.js7
-rw-r--r--jstests/sharding/set_user_write_block_mode.js2
-rw-r--r--src/mongo/base/error_codes.yml2
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/index_builds_coordinator_mongod.cpp12
-rw-r--r--src/mongo/db/s/SConscript3
-rw-r--r--src/mongo/db/s/global_user_write_block_state.cpp4
-rw-r--r--src/mongo/db/user_write_block_mode_op_observer.cpp107
-rw-r--r--src/mongo/db/user_write_block_mode_op_observer.h110
-rw-r--r--src/mongo/db/user_write_block_mode_op_observer_test.cpp119
12 files changed, 491 insertions, 128 deletions
diff --git a/jstests/noPassthrough/libs/user_write_blocking.js b/jstests/noPassthrough/libs/user_write_blocking.js
index 3eb2322f38f..8078e1a1b8d 100644
--- a/jstests/noPassthrough/libs/user_write_blocking.js
+++ b/jstests/noPassthrough/libs/user_write_blocking.js
@@ -36,7 +36,14 @@ const UserWriteBlockHelpers = (function() {
assert.commandWorked(admin.runCommand({
createUser: noBypassUser,
pwd: password,
- roles: [{role: "readWriteAnyDatabase", db: "admin"}]
+ roles: [
+ // Need for CUD operations
+ {role: "readWriteAnyDatabase", db: "admin"},
+ // Need for DDL operations
+ {role: "dbAdminAnyDatabase", db: "admin"},
+ // Need for importCollection
+ {role: "clusterAdmin", db: "admin"}
+ ]
}));
this.haveCreatedUsers = true;
}
@@ -62,6 +69,14 @@ const UserWriteBlockHelpers = (function() {
return fun({conn: this.adminConn, db: db, admin: db.getSiblingDB("admin"), coll: coll});
}
+ runInParallelShell(asAdmin, funString) {
+ const userName = asAdmin ? bypassUser : noBypassUser;
+ return startParallelShell(`{
+ db.getSiblingDB('admin').auth('${userName}', '${password}');
+ (` + funString + `)({conn: db.getMongo()});
+ }`, this.conn.port);
+ }
+
enableWriteBlockMode() {
assert.commandWorked(
this.adminConn.getDB("admin").runCommand({setUserWriteBlockMode: 1, global: true}));
@@ -98,6 +113,10 @@ const UserWriteBlockHelpers = (function() {
return {waiter: awaitShell, failpoint: hangWhileSetingUserWriteBlockModeFailPoint};
}
+ setFailPoint(failpointName) {
+ throw "UNIMPLEMENTED";
+ }
+
restart() {
throw "UNIMPLEMENTED";
}
@@ -109,8 +128,15 @@ const UserWriteBlockHelpers = (function() {
class ReplicaFixture extends Fixture {
constructor() {
- const rst = new ReplSetTest(
- {nodes: 3, nodeOptions: {auth: "", bind_ip_all: ""}, keyFile: keyfile});
+ const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {auth: "", bind_ip_all: ""},
+ keyFile: keyfile,
+ setParameter: {
+ // Set the history window to zero to explicitly control the oldest timestamp.
+ minSnapshotHistoryWindowInSeconds: 0
+ }
+ });
rst.startSet();
rst.initiate();
@@ -165,6 +191,14 @@ const UserWriteBlockHelpers = (function() {
return new LockHolder(this, parallelShell, opId);
}
+ setFailPoint(failpointName) {
+ return configureFailPoint(this.adminConn, failpointName);
+ }
+
+ getAllDbPaths() {
+ return this.rst.nodes.map(node => this.rst.getDbPath(node));
+ }
+
restart() {
this.rst.stopSet(undefined, /* restart */ true);
this.rst.startSet({}, /* restart */ true);
@@ -197,6 +231,12 @@ const UserWriteBlockHelpers = (function() {
return this._hangTransition(this.st.shard0, "hangInShardsvrSetUserWriteBlockMode");
}
+ setFailPoint(failpointName) {
+ const backend = this.st.rs0.getPrimary();
+ return authutil.asCluster(
+ backend, keyfile, () => configureFailPoint(backend, failpointName));
+ }
+
restart() {
this.st.restartShardRS(0);
this.st.restartConfigServer(0);
diff --git a/jstests/noPassthrough/set_user_write_block_mode.js b/jstests/noPassthrough/set_user_write_block_mode.js
index b8974bc6eb9..e2e83bdd095 100644
--- a/jstests/noPassthrough/set_user_write_block_mode.js
+++ b/jstests/noPassthrough/set_user_write_block_mode.js
@@ -5,15 +5,17 @@
// requires_auth,
// requires_fcv_60,
// requires_non_retryable_commands,
+// requires_persistence,
// requires_replication,
// featureFlagUserWriteBlocking,
// ]
-load("jstests/noPassthrough/libs/user_write_blocking.js");
-
(function() {
'use strict';
+load("jstests/noPassthrough/libs/user_write_blocking.js");
+load("jstests/libs/fail_point_util.js"); // For configureFailPoint
+
const {
WriteBlockState,
ShardingFixture,
@@ -24,79 +26,189 @@ const {
keyfile
} = UserWriteBlockHelpers;
-function runTest(fixture) {
- // For this test to work, we expect the state of the collection passed to be a single {a: 2}
- // document. This test is expected to maintain that state.
- function testCUD(coll, shouldSucceed, expectedFailure) {
- // Ensure we successfully maintained state from last run.
- assert.eq(0, coll.find({a: 1}).count());
- assert.eq(1, coll.find({a: 2}).count());
-
- if (shouldSucceed) {
- assert.commandWorked(coll.insert({a: 1}));
- assert.eq(1, coll.find({a: 1}).count());
- assert.commandWorked(coll.update({a: 1}, {a: 1, b: 2}));
- assert.eq(1, coll.find({a: 1, b: 2}).count());
- assert.commandWorked(coll.remove({a: 1}));
- } else {
- assert.commandFailedWithCode(coll.insert({a: 1}), expectedFailure);
- assert.commandFailedWithCode(coll.update({a: 2}, {a: 2, b: 2}), expectedFailure);
- assert.eq(0, coll.find({a: 2, b: 2}).count());
- assert.commandFailedWithCode(coll.remove({a: 2}), expectedFailure);
+// For this test to work, we expect the state of the connection to be maintained as:
+// One db: "testSetUserWriteBlockMode1"
+// Two collections: db.coll1, db.coll2
+// One index: "index" w/ pattern {"b": 1} on db.coll1
+// One document: {a: 0, b: 0} on db.coll1
+const dbName = "testSetUserWriteBlockMode1";
+const coll1Name = "coll1";
+const coll2Name = "coll2";
+const indexName = "index";
+
+function setupForTesting(conn) {
+ const db = conn.getDB(dbName);
+ assert.commandWorked(db.createCollection(coll1Name));
+ assert.commandWorked(db.createCollection(coll2Name));
+ const coll1 = db[coll1Name];
+ coll1.insert({a: 0, b: 0});
+ coll1.createIndex({"b": 1}, {"name": indexName});
+}
+
+function testCheckedOps(conn, shouldSucceed, expectedFailure) {
+ const transientDbName = "transientDB";
+ const transientCollNames = ["tc0", "tc1", "tc2"];
+ const transientIndexName = "transientIndex";
+
+ const db = conn.getDB(dbName);
+ const coll1 = db[coll1Name];
+ const coll2 = db[coll2Name];
+
+ // Ensure we successfully maintained state from last run.
+ function assertState() {
+ assert(Array.contains(conn.getDBNames(), db.getName()));
+ assert(!Array.contains(conn.getDBNames(), transientDbName));
+
+ assert(Array.contains(db.getCollectionNames(), coll1.getName()));
+ assert(Array.contains(db.getCollectionNames(), coll2.getName()));
+ for (let tName of transientCollNames) {
+ assert(!Array.contains(db.getCollectionNames(), tName));
}
- // Ensure we successfully maintained state on this run.
- assert.eq(0, coll.find({a: 1}).count());
- assert.eq(1, coll.find({a: 2}).count());
+ const indexes = coll1.getIndexes();
+ assert.eq(undefined, indexes.find(i => i.name === transientIndexName));
+ assert.neq(undefined, indexes.find(i => i.name === indexName));
+
+ assert.eq(1, coll1.find({a: 0, b: 0}).count());
+ assert.eq(0, coll1.find({a: 1}).count());
+ }
+ assertState();
+
+ if (shouldSucceed) {
+ // Test CUD
+ assert.commandWorked(coll1.insert({a: 1}));
+ assert.eq(1, coll1.find({a: 1}).count());
+ assert.commandWorked(coll1.update({a: 1}, {a: 1, b: 2}));
+ assert.eq(1, coll1.find({a: 1, b: 2}).count());
+ assert.commandWorked(coll1.remove({a: 1}));
+
+ // Test create index on empty and non-empty colls, collMod, drop index.
+ assert.commandWorked(coll1.createIndex({"a": 1}, {"name": transientIndexName}));
+ assert.commandWorked(db.runCommand(
+ {collMod: coll1Name, "index": {"keyPattern": {"a": 1}, expireAfterSeconds: 200}}));
+ assert.commandWorked(coll1.dropIndex({"a": 1}));
+ assert.commandWorked(coll2.createIndex({"a": 1}, {"name": transientIndexName}));
+ assert.commandWorked(coll2.dropIndex({"a": 1}));
+
+ // Test create, rename (both to a non-existent and an existing target), drop collection.
+ assert.commandWorked(db.createCollection(transientCollNames[0]));
+ assert.commandWorked(db.createCollection(transientCollNames[1]));
+ assert.commandWorked(db[transientCollNames[0]].renameCollection(transientCollNames[2]));
+ assert.commandWorked(
+ db[transientCollNames[2]].renameCollection(transientCollNames[1], true));
+ assert(db[transientCollNames[1]].drop());
+
+ // Test dropping a (non-empty) database.
+ const transientDb = conn.getDB(transientDbName);
+ assert.commandWorked(transientDb.createCollection("coll"));
+ assert.commandWorked(transientDb.dropDatabase());
+ } else {
+ // Test CUD
+ assert.commandFailedWithCode(coll1.insert({a: 1}), expectedFailure);
+ assert.commandFailedWithCode(coll1.update({a: 0, b: 0}, {a: 1}), expectedFailure);
+ assert.commandFailedWithCode(coll1.remove({a: 0, b: 0}), expectedFailure);
+
+ // Test create, collMod, drop index.
+ assert.commandFailedWithCode(coll1.createIndex({"a": 1}, {"name": transientIndexName}),
+ expectedFailure);
+ assert.commandFailedWithCode(
+ db.runCommand(
+ {collMod: coll1Name, "index": {"keyPattern": {"b": 1}, expireAfterSeconds: 200}}),
+ expectedFailure);
+ assert.commandFailedWithCode(coll1.dropIndex({"b": 1}), expectedFailure);
+ assert.commandFailedWithCode(coll2.createIndex({"a": 1}, {"name": transientIndexName}),
+ expectedFailure);
+
+ // Test create, rename (both to a non-existent and an existing target), drop collection.
+ assert.commandFailedWithCode(db.createCollection(transientCollNames[0]), expectedFailure);
+ assert.commandFailedWithCode(coll2.renameCollection(transientCollNames[1]),
+ expectedFailure);
+ assert.commandFailedWithCode(coll2.renameCollection(coll1Name, true), expectedFailure);
+ assert.commandFailedWithCode(db.runCommand({drop: coll2Name}), expectedFailure);
+
+ // Test dropping a database.
+ assert.commandFailedWithCode(db.dropDatabase(), expectedFailure);
}
- // Set up backing collections
- fixture.asUser(({coll}) => assert.commandWorked(coll.insert({a: 2})));
+ // Ensure we successfully maintained state on this run.
+ assertState();
+}
+
+function runTest(fixture) {
+ fixture.asAdmin(({conn}) => setupForTesting(conn));
fixture.assertWriteBlockMode(WriteBlockState.DISABLED);
// Ensure that without setUserWriteBlockMode, both users are privileged for CUD ops
- fixture.asUser(({coll}) => testCUD(coll, true));
- fixture.asAdmin(({coll}) => testCUD(coll, true));
+ fixture.asAdmin(({conn}) => testCheckedOps(conn, true));
- fixture.enableWriteBlockMode();
+ fixture.asUser(({conn}) => {
+ testCheckedOps(conn, true);
+
+ // Ensure that the non-privileged user cannot run setUserWriteBlockMode
+ assert.commandFailedWithCode(
+ conn.getDB('admin').runCommand({setUserWriteBlockMode: 1, global: true}),
+ ErrorCodes.Unauthorized);
+ });
+ fixture.assertWriteBlockMode(WriteBlockState.DISABLED);
+ fixture.enableWriteBlockMode();
fixture.assertWriteBlockMode(WriteBlockState.ENABLED);
// Now with setUserWriteBlockMode enabled, ensure that only the bypassUser can CUD
- fixture.asAdmin(({coll}) => {
- testCUD(coll, true);
- });
- fixture.asUser(({coll}) => {
- testCUD(coll, false, ErrorCodes.OperationFailed);
- });
+ fixture.asAdmin(({conn}) => testCheckedOps(conn, true));
+ fixture.asUser(({conn}) => testCheckedOps(conn, false, ErrorCodes.UserWritesBlocked));
// Restarting the cluster has no impact, as write block state is durable
fixture.restart();
fixture.assertWriteBlockMode(WriteBlockState.ENABLED);
- fixture.asAdmin(({coll}) => {
- testCUD(coll, true);
+ fixture.asAdmin(({conn}) => {
+ testCheckedOps(conn, true);
});
- fixture.asUser(({coll}) => {
- testCUD(coll, false, ErrorCodes.OperationFailed);
+ fixture.asUser(({conn}) => {
+ testCheckedOps(conn, false, ErrorCodes.UserWritesBlocked);
});
// Now disable userWriteBlockMode and ensure both users can CUD again
-
fixture.disableWriteBlockMode();
-
fixture.assertWriteBlockMode(WriteBlockState.DISABLED);
- fixture.asUser(({coll}) => {
- testCUD(coll, true);
- });
- fixture.asAdmin(({coll}) => {
- testCUD(coll, true);
+ fixture.asAdmin(({conn}) => testCheckedOps(conn, true));
+ fixture.asUser(({conn}) => testCheckedOps(conn, true));
+
+ // Test that enabling write blocking while there is an active index build will cause the index
+ // build to fail.
+ fixture.asUser(({conn}) => {
+ const db = conn.getDB(jsTestName());
+ assert.commandWorked(db.createCollection("test"));
+ assert.commandWorked(db.test.insert({"a": 2}));
});
+ const fp = fixture.setFailPoint('hangAfterInitializingIndexBuild');
+ // This createIndex should hang at setup, and when it resumes, userWriteBlockMode will be
+ // enabled and it should eventually fail.
+ const parallelShell = fixture.runInParallelShell(false /* asAdmin */,
+ `({conn}) => {
+ assert.commandFailedWithCode(
+ conn.getDB(jsTestName()).test.createIndex({"a": 1}, {"name": "index"}),
+ ErrorCodes.UserWritesBlocked);
+ }`);
+
+ // Let index build progress to the point where it hits the failpoint.
+ fp.wait();
+ fixture.enableWriteBlockMode();
+ fp.off();
+ parallelShell();
+
+ // Ensure index was not created.
+ fixture.asAdmin(
+ ({conn}) => assert.eq(
+ undefined, conn.getDB(jsTestName()).test.getIndexes().find(i => i.name === "index")));
+
if (fixture.takeGlobalLock) {
+ // Test that serverStatus will produce WriteBlockState.UNKNOWN when the global lock is held.
let globalLock = fixture.takeGlobalLock();
try {
fixture.assertWriteBlockMode(WriteBlockState.UNKNOWN);
@@ -119,7 +231,7 @@ function runTest(fixture) {
MongoRunner.stopMongod(conn);
}
-// Test on replset primary
+// Test on a replset
const rst = new ReplicaFixture();
runTest(rst);
rst.stop();
diff --git a/jstests/noPassthrough/user_write_blocking_ttl_index.js b/jstests/noPassthrough/user_write_blocking_ttl_index.js
index e7a11bffd3f..dc885a198cb 100644
--- a/jstests/noPassthrough/user_write_blocking_ttl_index.js
+++ b/jstests/noPassthrough/user_write_blocking_ttl_index.js
@@ -65,8 +65,11 @@ function runTest(conn, testCase) {
} else {
assert.eq(1, target.col.count());
checkLog.containsJson(conn, 5400703, {
- "error":
- {"code": 96, "codeName": "OperationFailed", "errmsg": "User writes blocked"}
+ "error": {
+ "code": ErrorCodes.UserWritesBlocked,
+ "codeName": "UserWritesBlocked",
+ "errmsg": "User writes blocked"
+ }
});
}
}
diff --git a/jstests/sharding/set_user_write_block_mode.js b/jstests/sharding/set_user_write_block_mode.js
index e2dfc0ff2a2..94c5ed8087c 100644
--- a/jstests/sharding/set_user_write_block_mode.js
+++ b/jstests/sharding/set_user_write_block_mode.js
@@ -64,7 +64,7 @@ newShard.initiate();
assert.commandWorked(st.s.adminCommand({addShard: newShard.getURL(), name: newShardName}));
// Check that we cannot write on the new shard.
- assert.commandFailedWithCode(newShardCollMongos.insert({x: 2}), ErrorCodes.OperationFailed);
+ assert.commandFailedWithCode(newShardCollMongos.insert({x: 2}), ErrorCodes.UserWritesBlocked);
// Now unblock and check we can write to the new shard.
assert.commandWorked(st.s.adminCommand({setUserWriteBlockMode: 1, global: false}));
diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml
index 809a76b89b9..23fbd301d72 100644
--- a/src/mongo/base/error_codes.yml
+++ b/src/mongo/base/error_codes.yml
@@ -482,6 +482,8 @@ error_codes:
- {code: 369, name: FLETransactionAbort}
- {code: 370, name: CannotDropShardKeyIndex}
+ - {code: 371, name: UserWritesBlocked}
+
# Error codes 4000-8999 are reserved.
# Non-sequential error codes for compatibility only)
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 53d0a9067e9..043d2a0d512 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1182,6 +1182,7 @@ env.Library(
'$BUILD_DIR/mongo/db/catalog/collection_catalog',
'$BUILD_DIR/mongo/db/catalog/index_build_entry_idl',
'$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker',
+ '$BUILD_DIR/mongo/db/s/forwardable_operation_metadata',
'$BUILD_DIR/mongo/db/storage/two_phase_index_build_knobs_idl',
'$BUILD_DIR/mongo/executor/task_executor_interface',
'curop',
diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp
index 233e9df2fa1..1b98e9c5f67 100644
--- a/src/mongo/db/index_builds_coordinator_mongod.cpp
+++ b/src/mongo/db/index_builds_coordinator_mongod.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/index_build_entry_helpers.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/tenant_migration_access_blocker_util.h"
+#include "mongo/db/s/forwardable_operation_metadata.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/resource_consumption_metrics.h"
@@ -64,6 +65,7 @@ namespace {
MONGO_FAIL_POINT_DEFINE(hangAfterAcquiringIndexBuildSlot);
MONGO_FAIL_POINT_DEFINE(hangBeforeInitializingIndexBuild);
MONGO_FAIL_POINT_DEFINE(hangIndexBuildAfterSignalPrimaryForCommitReadiness);
+MONGO_FAIL_POINT_DEFINE(hangBeforeRunningIndexBuild);
const StringData kMaxNumActiveUserIndexBuildsServerParameterName = "maxNumActiveUserIndexBuilds"_sd;
@@ -306,6 +308,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
// Since index builds occur in a separate thread, client attributes that are audited must be
// extracted from the client object and passed into the thread separately.
audit::ImpersonatedClientAttrs impersonatedClientAttrs(opCtx->getClient());
+ ForwardableOperationMetadata forwardableOpMetadata(opCtx);
// The thread pool task will be responsible for signalling the condition variable when the index
// build thread is done running.
@@ -324,7 +327,8 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
shardVersion = oss.getShardVersion(nss),
dbVersion = oss.getDbVersion(dbName),
resumeInfo,
- impersonatedClientAttrs = std::move(impersonatedClientAttrs)
+ impersonatedClientAttrs = std::move(impersonatedClientAttrs),
+ forwardableOpMetadata = std::move(forwardableOpMetadata)
](auto status) mutable noexcept {
ScopeGuard onScopeExitGuard([&] {
stdx::unique_lock<Latch> lk(_throttlingMutex);
@@ -341,6 +345,10 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
auto opCtx = Client::getCurrent()->makeOperationContext();
+ // Forward the forwardable operation metadata from the external client to this thread's
+ // client.
+ forwardableOpMetadata.setOn(opCtx.get());
+
// Load the external client's attributes into this thread's client for auditing.
auto authSession = AuthorizationSession::get(opCtx->getClient());
if (authSession) {
@@ -385,6 +393,8 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx,
// Signal that the index build started successfully.
startPromise.setWith([] {});
+ hangBeforeRunningIndexBuild.pauseWhileSet(opCtx.get());
+
// Runs the remainder of the index build. Sets the promise result and cleans up the index
// build.
_runIndexBuild(opCtx.get(), buildUUID, indexBuildOptions, resumeInfo);
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index d16459c2c44..bf63a589862 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -231,6 +231,9 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/s/grid',
+ ],
+ LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/write_block_bypass',
]
)
diff --git a/src/mongo/db/s/global_user_write_block_state.cpp b/src/mongo/db/s/global_user_write_block_state.cpp
index 5bdd8e944c8..0b579d1fcf3 100644
--- a/src/mongo/db/s/global_user_write_block_state.cpp
+++ b/src/mongo/db/s/global_user_write_block_state.cpp
@@ -59,7 +59,7 @@ void GlobalUserWriteBlockState::disableUserWriteBlocking(OperationContext* opCtx
void GlobalUserWriteBlockState::checkUserWritesAllowed(OperationContext* opCtx,
const NamespaceString& nss) const {
invariant(opCtx->lockState()->isLocked());
- uassert(ErrorCodes::OperationFailed,
+ uassert(ErrorCodes::UserWritesBlocked,
"User writes blocked",
!_globalUserWritesBlocked || WriteBlockBypass::get(opCtx).isWriteBlockBypassEnabled() ||
nss.isOnInternalDb() || nss.isTemporaryReshardingCollection());
@@ -81,7 +81,7 @@ void GlobalUserWriteBlockState::disableUserShardedDDLBlocking(OperationContext*
void GlobalUserWriteBlockState::checkShardedDDLAllowedToStart(OperationContext* opCtx,
const NamespaceString& nss) const {
invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
- uassert(ErrorCodes::OperationFailed,
+ uassert(ErrorCodes::UserWritesBlocked,
"User writes blocked",
!_userShardedDDLBlocked.load() ||
WriteBlockBypass::get(opCtx).isWriteBlockBypassEnabled() || nss.isOnInternalDb());
diff --git a/src/mongo/db/user_write_block_mode_op_observer.cpp b/src/mongo/db/user_write_block_mode_op_observer.cpp
index 93a7fb2749c..2ef15e1ef20 100644
--- a/src/mongo/db/user_write_block_mode_op_observer.cpp
+++ b/src/mongo/db/user_write_block_mode_op_observer.cpp
@@ -175,6 +175,113 @@ void UserWriteBlockModeOpObserver::_onReplicationRollback(OperationContext* opCt
}
}
+void UserWriteBlockModeOpObserver::onCreateIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& uuid,
+ BSONObj indexDoc,
+ bool fromMigrate) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+void UserWriteBlockModeOpObserver::onStartIndexBuild(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& collUUID,
+ const UUID& indexBuildUUID,
+ const std::vector<BSONObj>& indexes,
+ bool fromMigrate) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+void UserWriteBlockModeOpObserver::onCommitIndexBuild(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& collUUID,
+ const UUID& indexBuildUUID,
+ const std::vector<BSONObj>& indexes,
+ bool fromMigrate) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+void UserWriteBlockModeOpObserver::onStartIndexBuildSinglePhase(OperationContext* opCtx,
+ const NamespaceString& nss) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+void UserWriteBlockModeOpObserver::onCreateCollection(OperationContext* opCtx,
+ const CollectionPtr& coll,
+ const NamespaceString& collectionName,
+ const CollectionOptions& options,
+ const BSONObj& idIndex,
+ const OplogSlot& createOpTime,
+ bool fromMigrate) {
+ _checkWriteAllowed(opCtx, collectionName);
+}
+
+void UserWriteBlockModeOpObserver::onCollMod(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& uuid,
+ const BSONObj& collModCmd,
+ const CollectionOptions& oldCollOptions,
+ boost::optional<IndexCollModInfo> indexInfo) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+void UserWriteBlockModeOpObserver::onDropDatabase(OperationContext* opCtx,
+ const std::string& dbName) {
+ _checkWriteAllowed(opCtx, NamespaceString(dbName));
+}
+
+repl::OpTime UserWriteBlockModeOpObserver::onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ const UUID& uuid,
+ std::uint64_t numRecords,
+ CollectionDropType dropType) {
+ _checkWriteAllowed(opCtx, collectionName);
+ return repl::OpTime();
+}
+
+void UserWriteBlockModeOpObserver::onDropIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& uuid,
+ const std::string& indexName,
+ const BSONObj& indexInfo) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
+repl::OpTime UserWriteBlockModeOpObserver::preRenameCollection(
+ OperationContext* opCtx,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ const UUID& uuid,
+ const boost::optional<UUID>& dropTargetUUID,
+ std::uint64_t numRecords,
+ bool stayTemp) {
+ _checkWriteAllowed(opCtx, fromCollection);
+ _checkWriteAllowed(opCtx, toCollection);
+ return repl::OpTime();
+}
+
+void UserWriteBlockModeOpObserver::onRenameCollection(OperationContext* opCtx,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ const UUID& uuid,
+ const boost::optional<UUID>& dropTargetUUID,
+ std::uint64_t numRecords,
+ bool stayTemp) {
+ _checkWriteAllowed(opCtx, fromCollection);
+ _checkWriteAllowed(opCtx, toCollection);
+}
+
+void UserWriteBlockModeOpObserver::onImportCollection(OperationContext* opCtx,
+ const UUID& importUUID,
+ const NamespaceString& nss,
+ long long numRecords,
+ long long dataSize,
+ const BSONObj& catalogEntry,
+ const BSONObj& storageMetadata,
+ bool isDryRun) {
+ _checkWriteAllowed(opCtx, nss);
+}
+
void UserWriteBlockModeOpObserver::_checkWriteAllowed(OperationContext* opCtx,
const NamespaceString& nss) {
// Evaluate write blocking only on replica set primaries.
diff --git a/src/mongo/db/user_write_block_mode_op_observer.h b/src/mongo/db/user_write_block_mode_op_observer.h
index 38f61233176..5c3e21f12b9 100644
--- a/src/mongo/db/user_write_block_mode_op_observer.h
+++ b/src/mongo/db/user_write_block_mode_op_observer.h
@@ -47,6 +47,7 @@ public:
// Operations to check for allowed writes.
+ // CUD operations
void onInserts(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
@@ -62,55 +63,30 @@ public:
StmtId stmtId,
const OplogDeleteEntryArgs& args) final;
- // Noop operations.
-
+ // DDL operations
void onCreateIndex(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
BSONObj indexDoc,
- bool fromMigrate) final {}
+ bool fromMigrate) final;
+ // We need to check the startIndexBuild ops because onCreateIndex is only called for empty
+ // collections.
void onStartIndexBuild(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& collUUID,
const UUID& indexBuildUUID,
const std::vector<BSONObj>& indexes,
- bool fromMigrate) final {}
-
- void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {}
-
- void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {}
+ bool fromMigrate) final;
void onCommitIndexBuild(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& collUUID,
const UUID& indexBuildUUID,
const std::vector<BSONObj>& indexes,
- bool fromMigrate) final {}
-
- void onAbortIndexBuild(OperationContext* opCtx,
- const NamespaceString& nss,
- const UUID& collUUID,
- const UUID& indexBuildUUID,
- const std::vector<BSONObj>& indexes,
- const Status& cause,
- bool fromMigrate) final {}
-
+ bool fromMigrate) final;
- void aboutToDelete(OperationContext* opCtx,
- const NamespaceString& nss,
- const UUID& uuid,
- const BSONObj& doc) final;
-
- void onInternalOpMessage(OperationContext* opCtx,
- const NamespaceString& nss,
- const boost::optional<UUID>& uuid,
- const BSONObj& msgObj,
- const boost::optional<BSONObj> o2MsgObj,
- const boost::optional<repl::OpTime> preImageOpTime,
- const boost::optional<repl::OpTime> postImageOpTime,
- const boost::optional<repl::OpTime> prevWriteOpTimeInTransaction,
- const boost::optional<OplogSlot> slot) final {}
+ void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final;
void onCreateCollection(OperationContext* opCtx,
const CollectionPtr& coll,
@@ -118,31 +94,40 @@ public:
const CollectionOptions& options,
const BSONObj& idIndex,
const OplogSlot& createOpTime,
- bool fromMigrate) final {}
+ bool fromMigrate) final;
void onCollMod(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
const BSONObj& collModCmd,
const CollectionOptions& oldCollOptions,
- boost::optional<IndexCollModInfo> indexInfo) final {}
+ boost::optional<IndexCollModInfo> indexInfo) final;
- void onDropDatabase(OperationContext* opCtx, const std::string& dbName) final {}
+ void onDropDatabase(OperationContext* opCtx, const std::string& dbName) final;
using OpObserver::onDropCollection;
repl::OpTime onDropCollection(OperationContext* opCtx,
const NamespaceString& collectionName,
const UUID& uuid,
std::uint64_t numRecords,
- CollectionDropType dropType) final {
- return repl::OpTime();
- }
+ CollectionDropType dropType) final;
void onDropIndex(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
const std::string& indexName,
- const BSONObj& indexInfo) final {}
+ const BSONObj& indexInfo) final;
+
+ // onRenameCollection is only for renaming to a nonexistent target NS, so we need
+ // preRenameCollection too.
+ using OpObserver::preRenameCollection;
+ repl::OpTime preRenameCollection(OperationContext* opCtx,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ const UUID& uuid,
+ const boost::optional<UUID>& dropTargetUUID,
+ std::uint64_t numRecords,
+ bool stayTemp) final;
using OpObserver::onRenameCollection;
void onRenameCollection(OperationContext* opCtx,
@@ -151,7 +136,7 @@ public:
const UUID& uuid,
const boost::optional<UUID>& dropTargetUUID,
std::uint64_t numRecords,
- bool stayTemp) final {}
+ bool stayTemp) final;
void onImportCollection(OperationContext* opCtx,
const UUID& importUUID,
@@ -160,19 +145,40 @@ public:
long long dataSize,
const BSONObj& catalogEntry,
const BSONObj& storageMetadata,
- bool isDryRun) final {}
+ bool isDryRun) final;
- using OpObserver::preRenameCollection;
- repl::OpTime preRenameCollection(OperationContext* opCtx,
- const NamespaceString& fromCollection,
- const NamespaceString& toCollection,
- const UUID& uuid,
- const boost::optional<UUID>& dropTargetUUID,
- std::uint64_t numRecords,
- bool stayTemp) final {
- return repl::OpTime();
- }
+ // Note aboutToDelete is unchecked, but defined.
+ void aboutToDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& uuid,
+ const BSONObj& doc) final;
+
+ // Noop operations (don't perform any check).
+
+ // At the moment we are leaving the onAbortIndexBuilds as unchecked. This is because they can be
+ // called from both user and internal codepaths, and we don't want to risk throwing an assert
+ // for the internal paths.
+ void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {}
+
+ void onAbortIndexBuild(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const UUID& collUUID,
+ const UUID& indexBuildUUID,
+ const std::vector<BSONObj>& indexes,
+ const Status& cause,
+ bool fromMigrate) final {}
+
+ void onInternalOpMessage(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID>& uuid,
+ const BSONObj& msgObj,
+ const boost::optional<BSONObj> o2MsgObj,
+ const boost::optional<repl::OpTime> preImageOpTime,
+ const boost::optional<repl::OpTime> postImageOpTime,
+ const boost::optional<repl::OpTime> prevWriteOpTimeInTransaction,
+ const boost::optional<OplogSlot> slot) final {}
+ // We don't need to check this and preRenameCollection (they are in the same WUOW).
void postRenameCollection(OperationContext* opCtx,
const NamespaceString& fromCollection,
const NamespaceString& toCollection,
@@ -180,6 +186,8 @@ public:
const boost::optional<UUID>& dropTargetUUID,
bool stayTemp) final {}
+ // The transaction commit related hooks don't need to be checked, because all of the operations
+ // inside the transaction are checked and they all execute in one WUOW.
void onApplyOps(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& applyOpCmd) final {}
diff --git a/src/mongo/db/user_write_block_mode_op_observer_test.cpp b/src/mongo/db/user_write_block_mode_op_observer_test.cpp
index 1f1f1b9a9b5..512f42b7003 100644
--- a/src/mongo/db/user_write_block_mode_op_observer_test.cpp
+++ b/src/mongo/db/user_write_block_mode_op_observer_test.cpp
@@ -62,12 +62,12 @@ public:
}
protected:
- // Ensure that inserts, updates, and deletes with the given opCtx on the given namespace will
- // succeed or fail depending on the value of shouldSucceed.
+ // Ensure that CUD ops with the given opCtx on the given namespace will succeed or fail
+ // depending on the value of shouldSucceed.
void runCUD(OperationContext* opCtx,
const NamespaceString& nss,
bool shouldSucceed,
- bool fromMigrate = false) {
+ bool fromMigrate) {
UserWriteBlockModeOpObserver opObserver;
std::vector<InsertStatement> inserts;
CollectionUpdateArgs collectionUpdateArgs;
@@ -78,7 +78,6 @@ protected:
updateArgs.nss = nss;
OplogDeleteEntryArgs deleteArgs;
deleteArgs.fromMigrate = fromMigrate;
-
if (shouldSucceed) {
try {
opObserver.onInserts(opCtx, nss, uuid, inserts.begin(), inserts.end(), fromMigrate);
@@ -98,6 +97,83 @@ protected:
}
}
+ // Ensure that all checked ops with the given opCtx on the given namespace will
+ // succeed or fail depending on the value of shouldSucceed.
+ void runCheckedOps(OperationContext* opCtx,
+ const NamespaceString& nss,
+ bool shouldSucceed,
+ bool fromMigrate = false) {
+ runCUD(opCtx, nss, shouldSucceed, fromMigrate);
+ UserWriteBlockModeOpObserver opObserver;
+ auto uuid = UUID::gen();
+ NamespaceString adminNss = NamespaceString("admin");
+
+ if (shouldSucceed) {
+ try {
+ opObserver.onCreateIndex(opCtx, nss, uuid, BSONObj(), false);
+ opObserver.onStartIndexBuild(opCtx, nss, uuid, uuid, {}, false);
+ opObserver.onStartIndexBuildSinglePhase(opCtx, nss);
+ opObserver.onCreateCollection(
+ opCtx, nullptr, nss, {}, BSONObj(), OplogSlot(), false);
+ opObserver.onCollMod(opCtx, nss, uuid, BSONObj(), {}, boost::none);
+ opObserver.onDropDatabase(opCtx, std::string(nss.db()));
+ opObserver.onDropCollection(
+ opCtx,
+ nss,
+ uuid,
+ 0,
+ UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase);
+ opObserver.onDropIndex(opCtx, nss, uuid, "", BSONObj());
+ // For renames, make sure we check both from and to for the given namespace
+ opObserver.preRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false);
+ opObserver.preRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false);
+ opObserver.onRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false);
+ opObserver.onRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false);
+ opObserver.onImportCollection(opCtx, uuid, nss, 0, 0, BSONObj(), BSONObj(), false);
+ } catch (...) {
+ // Make it easier to see that this is where we failed.
+ ASSERT_OK(exceptionToStatus());
+ }
+ } else {
+ ASSERT_THROWS(opObserver.onCreateIndex(opCtx, nss, uuid, BSONObj(), false),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onStartIndexBuild(opCtx, nss, uuid, uuid, {}, false),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onStartIndexBuildSinglePhase(opCtx, nss), AssertionException);
+ ASSERT_THROWS(opObserver.onCreateCollection(
+ opCtx, nullptr, nss, {}, BSONObj(), OplogSlot(), false),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onCollMod(opCtx, nss, uuid, BSONObj(), {}, boost::none),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onDropDatabase(opCtx, std::string(nss.db())),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onDropCollection(
+ opCtx,
+ nss,
+ uuid,
+ 0,
+ UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase),
+ AssertionException);
+ ASSERT_THROWS(opObserver.onDropIndex(opCtx, nss, uuid, "", BSONObj()),
+ AssertionException);
+ ASSERT_THROWS(
+ opObserver.preRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false),
+ AssertionException);
+ ASSERT_THROWS(
+ opObserver.preRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false),
+ AssertionException);
+ ASSERT_THROWS(
+ opObserver.onRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false),
+ AssertionException);
+ ASSERT_THROWS(
+ opObserver.onRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false),
+ AssertionException);
+ ASSERT_THROWS(
+ opObserver.onImportCollection(opCtx, uuid, nss, 0, 0, BSONObj(), BSONObj(), false),
+ AssertionException);
+ }
+ }
+
private:
// Creates a reasonable set of ReplSettings for most tests.
repl::ReplSettings createReplSettings() {
@@ -117,10 +193,10 @@ TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingDisabledNoBypass) {
ASSERT(!WriteBlockBypass::get(opCtx.get()).isWriteBlockBypassEnabled());
// Ensure writes succeed
- runCUD(opCtx.get(), NamespaceString("a.b"), true);
- runCUD(opCtx.get(), NamespaceString("admin"), true);
- runCUD(opCtx.get(), NamespaceString("local"), true);
- runCUD(opCtx.get(), NamespaceString("config"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("a.b"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("admin"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("local"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("config"), true);
}
TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingDisabledWithBypass) {
@@ -137,10 +213,10 @@ TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingDisabledWithBypass) {
ASSERT(WriteBlockBypass::get(opCtx.get()).isWriteBlockBypassEnabled());
// Ensure writes succeed
- runCUD(opCtx.get(), NamespaceString("a.b"), true);
- runCUD(opCtx.get(), NamespaceString("admin"), true);
- runCUD(opCtx.get(), NamespaceString("local"), true);
- runCUD(opCtx.get(), NamespaceString("config"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("a.b"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("admin"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("local"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("config"), true);
}
TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingEnabledNoBypass) {
@@ -152,12 +228,12 @@ TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingEnabledNoBypass) {
ASSERT(!WriteBlockBypass::get(opCtx.get()).isWriteBlockBypassEnabled());
// Ensure user writes now fail, while non-user writes still succeed
- runCUD(opCtx.get(), NamespaceString("a.b"), false);
- runCUD(opCtx.get(), NamespaceString("admin"), true);
- runCUD(opCtx.get(), NamespaceString("local"), true);
- runCUD(opCtx.get(), NamespaceString("config"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("a.b"), false);
+ runCheckedOps(opCtx.get(), NamespaceString("admin"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("local"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("config"), true);
- // Ensure that writes from migrations succeed
+ // Ensure that CUD ops from migrations succeed
runCUD(opCtx.get(), NamespaceString("a.b"), true, true /* fromMigrate */);
}
@@ -175,10 +251,11 @@ TEST_F(UserWriteBlockModeOpObserverTest, WriteBlockingEnabledWithBypass) {
ASSERT(WriteBlockBypass::get(opCtx.get()).isWriteBlockBypassEnabled());
// Ensure user writes succeed
- runCUD(opCtx.get(), NamespaceString("a.b"), true);
- runCUD(opCtx.get(), NamespaceString("admin"), true);
- runCUD(opCtx.get(), NamespaceString("local"), true);
- runCUD(opCtx.get(), NamespaceString("config"), true);
+
+ runCheckedOps(opCtx.get(), NamespaceString("a.b"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("admin"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("local"), true);
+ runCheckedOps(opCtx.get(), NamespaceString("config"), true);
}
} // namespace