summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Russotto <matthew.russotto@10gen.com>2017-12-20 13:37:47 -0500
committerMatthew Russotto <matthew.russotto@10gen.com>2017-12-20 13:37:47 -0500
commit55be4faf62882b1b6b7313aa87acf393ef9fddcd (patch)
tree4f197336e11b547b7c58e7381da1b4c983152105
parentb3033d477f2356d17a0e5263775a0e2dfce75ede (diff)
downloadmongo-55be4faf62882b1b6b7313aa87acf393ef9fddcd.tar.gz
SERVER-32162 Create a doTxn command
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml1
-rw-r--r--jstests/auth/lib/commands_lib.js484
-rw-r--r--jstests/core/bypass_doc_validation.js13
-rw-r--r--jstests/core/collation.js37
-rw-r--r--jstests/core/do_txn_atomicity.js79
-rw-r--r--jstests/core/do_txn_basic.js555
-rw-r--r--jstests/core/json_schema/misc_validation.js18
-rw-r--r--jstests/core/views/views_all_commands.js5
-rw-r--r--jstests/libs/override_methods/set_read_and_write_concerns.js1
-rw-r--r--src/mongo/db/commands/SConscript2
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp275
-rw-r--r--src/mongo/db/commands/do_txn_cmd.cpp281
-rw-r--r--src/mongo/db/commands/oplog_application_checks.cpp266
-rw-r--r--src/mongo/db/commands/oplog_application_checks.h93
-rw-r--r--src/mongo/db/repl/SConscript17
-rw-r--r--src/mongo/db/repl/do_txn.cpp498
-rw-r--r--src/mongo/db/repl/do_txn.h58
-rw-r--r--src/mongo/db/repl/do_txn_test.cpp328
23 files changed, 2772 insertions, 244 deletions
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
index f0f8cccf6bd..5bf7953fd11 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
@@ -21,6 +21,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # doTxn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # db.eval() and profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
index 7b05805ac14..21ec58ccca9 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
@@ -36,6 +36,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # doTxn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # db.eval() and profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
index e1e324bb334..20eee43dadc 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
@@ -24,6 +24,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # doTxn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index 6a5469b4a0e..3173d85a7d5 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -22,6 +22,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # doTxn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
index 2995e55376c..237d82b4ef1 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_op_query_passthrough.yml
@@ -21,6 +21,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # doTxn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index 7b44978a5ae..7aa64233576 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -21,6 +21,7 @@ selector:
- jstests/core/dbhash.js # dbhash.
- jstests/core/dbhash2.js # dbhash.
- jstests/core/diagdata.js # Command not supported in mongos
+ - jstests/core/do_txn*.js # do_txn
- jstests/core/dropdb_race.js # syncdelay.
- jstests/core/evalb.js # profiling.
- jstests/core/fsync.js # uses fsync.
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 2363f3100a5..3e99d96cac9 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -2956,6 +2956,490 @@ var authCommandsLib = {
]
},
{
+ testname: "doTxn_empty",
+ command: {doTxn: []},
+ skipSharded: true,
+ testcases: [
+ {
+ roles: {__system: 1},
+ runOnDb: adminDbName,
+ },
+ {
+ roles: {__system: 1},
+ runOnDb: firstDbName,
+ }
+ ]
+ },
+ {
+ testname: "doTxn_precondition",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1473353037, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "n",
+ "ns": "",
+ "o": {}
+ }],
+ preCondition: [{ns: firstDbName + ".x", q: {x: 5}, res: []}]
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["find"]},
+ {
+ resource: {cluster: true},
+ actions: ["appendOplogNote"],
+ removeWhenTestingAuthzFailure: false
+ },
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_noop",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1473353037, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "n",
+ "ns": "",
+ "o": {}
+ }]
+ },
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {cluster: true}, actions: ["appendOplogNote"]},
+ ],
+ },
+ {
+ runOnDb: firstDbName,
+ privileges: [
+ {resource: {cluster: true}, actions: ["appendOplogNote"]},
+ ],
+ expectFailure: true
+ }
+ ]
+ },
+ {
+ testname: "doTxn_insert",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns": firstDbName + ".x",
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_write,
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["insert"]},
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_insert_UUID",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns": state.collName,
+ "ui": state.uuid,
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.runCommand({create: "x"});
+
+ return {
+ collName: sibling.x.getFullName(),
+ uuid: getUUIDFromListCollections(sibling, sibling.x.getName())
+ };
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {root: 1, restore: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["insert"]},
+ {resource: {cluster: true}, actions: ["useUUID"]}
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_insert_with_nonexistent_UUID",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns": state.collName,
+ // Given a nonexistent UUID. The command should fail.
+ "ui": UUID("71f1d1d7-68ca-493e-a7e9-f03c94e2e960"),
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.runCommand({create: "x"});
+
+ return {
+ collName: sibling.x.getFullName(),
+ uuid: getUUIDFromListCollections(sibling, sibling.x.getName())
+ };
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ // It would be an sanity check failure rather than a auth check
+ // failure.
+ expectFail: true,
+ runOnDb: adminDbName,
+ roles: {root: 1, restore: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["insert"]},
+ {resource: {cluster: true}, actions: ["useUUID"]}
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_insert_UUID_failure",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns": state.collName,
+ "ui": state.uuid,
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.runCommand({create: "x"});
+
+ return {
+ collName: sibling.x.getFullName(),
+ uuid: getUUIDFromListCollections(sibling, sibling.x.getName())
+ };
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ expectAuthzFailure: true,
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["insert"]},
+ // Don't have useUUID privilege.
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_insert_UUID_with_wrong_ns",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns":
+ firstDbName + ".y", // Specify wrong name but correct uuid. Should work.
+ "ui": state.x_uuid, // The insert should on x
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ db.getSisterDB(firstDbName).y.drop();
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.runCommand({create: "x"});
+ sibling.runCommand({create: "y"});
+ return {x_uuid: getUUIDFromListCollections(sibling, sibling.x.getName())};
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ privileges: [
+ {
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["createCollection", "insert"]
+ },
+ {resource: {db: firstDbName, collection: "y"}, actions: ["createCollection"]},
+ {resource: {cluster: true}, actions: ["useUUID", "forceUUID"]}
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_insert_UUID_with_wrong_ns_failure",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474051453, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "i",
+ "ns":
+ firstDbName + ".y", // Specify wrong name but correct uuid. Should work.
+ "ui": state.x_uuid, // The insert should on x
+ "o": {"_id": ObjectId("57dc3d7da4fce4358afa85b8"), "data": 5}
+ }]
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ db.getSisterDB(firstDbName).y.drop();
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.runCommand({create: "x"});
+ sibling.runCommand({create: "y"});
+ return {x_uuid: getUUIDFromListCollections(sibling, sibling.x.getName())};
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ expectAuthzFailure: true,
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["createCollection"]},
+ {
+ resource: {db: firstDbName, collection: "y"},
+ actions: ["createCollection", "insert"]
+ },
+ {resource: {cluster: true}, actions: ["useUUID", "forceUUID"]}
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_upsert",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1474053682, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "u",
+ "ns": firstDbName + ".x",
+ "o2": {"_id": 1},
+ "o": {"_id": 1, "data": 8}
+ }]
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({_id: 1, data: 1});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.merge(roles_write, {restore: 0}, true),
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["update", "insert"]},
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_update",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1474053682, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "u",
+ "ns": firstDbName + ".x",
+ "o2": {"_id": 1},
+ "o": {"_id": 1, "data": 8}
+ }],
+ alwaysUpsert: false
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({_id: 1, data: 1});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.merge(roles_write, {restore: 0}, true),
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["update"]},
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_update_UUID",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474053682, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "u",
+ "ns": state.collName,
+ "ui": state.uuid,
+ "o2": {"_id": 1},
+ "o": {"_id": 1, "data": 8}
+ }],
+ alwaysUpsert: false
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.x.save({_id: 1, data: 1});
+
+ return {
+ collName: sibling.x.getFullName(),
+ uuid: getUUIDFromListCollections(sibling, sibling.x.getName())
+ };
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {root: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["update"]},
+ {resource: {cluster: true}, actions: ["useUUID"]}
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_update_UUID_failure",
+ command: function(state) {
+ return {
+ doTxn: [{
+ "ts": Timestamp(1474053682, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "u",
+ "ns": state.collName,
+ "ui": state.uuid,
+ "o2": {"_id": 1},
+ "o": {"_id": 1, "data": 8}
+ }],
+ alwaysUpsert: false
+ };
+ },
+ skipSharded: true,
+ setup: function(db) {
+ var sibling = db.getSisterDB(firstDbName);
+ sibling.x.save({_id: 1, data: 1});
+
+ return {
+ collName: sibling.x.getFullName(),
+ uuid: getUUIDFromListCollections(sibling, sibling.x.getName())
+ };
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ expectAuthzFailure: true,
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["update"]},
+ ],
+ },
+ ]
+ },
+ {
+ testname: "doTxn_delete",
+ command: {
+ doTxn: [{
+ "ts": Timestamp(1474056194, 1),
+ "h": NumberLong(0),
+ "v": 2,
+ "op": "d",
+ "ns": firstDbName + ".x",
+ "o": {"_id": 1}
+ }]
+ },
+ skipSharded: true,
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({_id: 1, data: 1});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.merge(roles_write, {restore: 0}, true),
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["remove"]},
+ ],
+ },
+ ]
+ },
+ {
testname: "drop",
command: {drop: "x"},
setup: function(db) {
diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js
index 6a9bbde8ffc..74bddec627d 100644
--- a/jstests/core/bypass_doc_validation.js
+++ b/jstests/core/bypass_doc_validation.js
@@ -6,6 +6,7 @@
* - aggregation with $out
* - applyOps (when not sharded)
* - copyDb
+ * - doTxn (when not sharded)
* - findAndModify
* - insert
* - mapReduce
@@ -39,8 +40,9 @@
assert.writeOK(coll.insert({_id: 2}));
assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator}));
+ const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
// Test applyOps with a simple insert if not on mongos.
- if (!db.runCommand({isdbgrid: 1}).isdbgrid) {
+ if (!isMongos) {
const op = [{h: 1, v: 2, op: 'i', ns: coll.getFullName(), o: {_id: 9}}];
assertFailsValidation(myDb.runCommand({applyOps: op, bypassDocumentValidation: false}));
assert.eq(0, coll.count({_id: 9}));
@@ -48,6 +50,15 @@
assert.eq(1, coll.count({_id: 9}));
}
+ // Test doTxn with a simple insert if not on mongos.
+ if (!isMongos) {
+ const op = [{h: 1, v: 2, op: 'i', ns: coll.getFullName(), o: {_id: 10}}];
+ assertFailsValidation(myDb.runCommand({doTxn: op, bypassDocumentValidation: false}));
+ assert.eq(0, coll.count({_id: 10}));
+ assert.commandWorked(myDb.runCommand({doTxn: op, bypassDocumentValidation: true}));
+ assert.eq(1, coll.count({_id: 10}));
+ }
+
// Test the aggregation command with a $out stage.
const outputCollName = 'bypass_output_coll';
const outputColl = myDb[outputCollName];
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index 1a871d97c22..0e6b4974af4 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -1970,6 +1970,43 @@
assert.eq(8, coll.findOne({_id: "foo"}).x);
}
+ // doTxn
+ if (!isMongos) {
+ coll.drop();
+ assert.commandWorked(
+ db.createCollection("collation", {collation: {locale: "en_US", strength: 2}}));
+ assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"}));
+
+ // preCondition.q respects collection default collation.
+ assert.commandFailed(db.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}]
+ }));
+ assert.eq(5, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(db.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}]
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
+
+ // preCondition.res respects collection default collation.
+ assert.commandFailed(db.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}]
+ }));
+ assert.eq(6, coll.findOne({_id: "foo"}).x);
+ assert.commandWorked(db.runCommand({
+ doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}],
+ preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}]
+ }));
+ assert.eq(7, coll.findOne({_id: "foo"}).x);
+
+ // <operation>.o2 respects collection default collation.
+ assert.commandWorked(db.runCommand(
+ {doTxn: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}]}));
+ assert.eq(8, coll.findOne({_id: "foo"}).x);
+ }
+
// Test that the collections created with the "copydb" command inherit the default collation of
// the corresponding collection.
{
diff --git a/jstests/core/do_txn_atomicity.js b/jstests/core/do_txn_atomicity.js
new file mode 100644
index 00000000000..35f81139ef0
--- /dev/null
+++ b/jstests/core/do_txn_atomicity.js
@@ -0,0 +1,79 @@
+// @tags: [requires_non_retryable_commands]
+
+// Tests that doTxn is atomic for CRUD operations
+(function() {
+ 'use strict';
+
+ var t = db.doTxn;
+ t.drop();
+ assert.writeOK(t.insert({_id: 1}));
+
+ // Operations including commands should not be atomic, so the insert will succeed.
+ assert.commandFailed(db.adminCommand({
+ doTxn: [
+ {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
+ {op: 'c', ns: "invalid", o: {create: "t"}},
+ ]
+ }));
+ assert.eq(t.count({x: 1}), 1);
+
+ // Operations only including CRUD commands should be atomic, so the next insert will fail.
+ var tooLong = Array(2000).join("hello");
+ assert.commandFailed(db.adminCommand({
+ doTxn: [
+ {op: 'i', ns: t.getFullName(), o: {_id: ObjectId(), x: 1}},
+ {op: 'i', ns: t.getFullName(), o: {_id: tooLong, x: 1}},
+ ]
+ }));
+ assert.eq(t.count({x: 1}), 1);
+
+ // Operations on non-existent databases cannot be atomic.
+ var newDBName = "apply_ops_atomicity";
+ var newDB = db.getSiblingDB(newDBName);
+ assert.commandWorked(newDB.dropDatabase());
+ // Updates on a non-existent database no longer implicitly create collections and will fail with
+ // a NamespaceNotFound error.
+ assert.commandFailedWithCode(
+ newDB.runCommand(
+ {doTxn: [{op: "u", ns: newDBName + ".foo", o: {_id: 5, x: 17}, o2: {_id: 5, x: 16}}]}),
+ ErrorCodes.NamespaceNotFound);
+
+ var sawTooManyLocksError = false;
+
+ function applyWithManyLocks(n) {
+ let cappedOps = [];
+ let multiOps = [];
+
+ for (let i = 0; i < n; i++) {
+ // Write to a capped collection, as that may require a lock for serialization.
+ let cappedName = "capped" + n + "-" + i;
+ assert.commandWorked(newDB.createCollection(cappedName, {capped: true, size: 100}));
+ cappedOps.push({op: 'i', ns: newDBName + "." + cappedName, o: {_id: 0}});
+
+ // Make an index multi-key, as that may require a lock for updating the catalog.
+ let multiName = "multi" + n + "-" + i;
+ assert.commandWorked(newDB[multiName].createIndex({x: 1}));
+ multiOps.push({op: 'i', ns: newDBName + "." + multiName, o: {_id: 0, x: [0, 1]}});
+ }
+
+ let res = [cappedOps, multiOps].map((doTxn) => newDB.runCommand({doTxn}));
+ sawTooManyLocksError |= res.some((res) => res.code === ErrorCodes.TooManyLocks);
+ // Transactions involving just two collections should succeed.
+ if (n <= 2)
+ res.every((res) => res.ok);
+ // All transactions should either completely succeed or completely fail.
+ assert(res.every((res) => res.results.every((result) => result == res.ok)));
+ assert(res.every((res) => !res.ok || res.applied == n));
+ }
+
+ // Try requiring different numbers of collection accesses in a single operation to cover
+ // all edge cases, so we run out of available locks in different code paths such as during
+ // oplog application.
+ applyWithManyLocks(1);
+ applyWithManyLocks(2);
+
+ for (let i = 9; i < 16; i++) {
+ applyWithManyLocks(i);
+ }
+ assert(sawTooManyLocksError, "test no longer exhausts the max number of locks held at once");
+})();
diff --git a/jstests/core/do_txn_basic.js b/jstests/core/do_txn_basic.js
new file mode 100644
index 00000000000..c9b1b75ef81
--- /dev/null
+++ b/jstests/core/do_txn_basic.js
@@ -0,0 +1,555 @@
+// @tags: [requires_non_retryable_commands]
+
+(function() {
+ "use strict";
+
+ load("jstests/libs/get_index_helpers.js");
+
+ var t = db.do_txn1;
+ t.drop();
+
+ //
+ // Input validation tests
+ //
+
+ // Empty array of operations.
+ assert.commandWorked(db.adminCommand({doTxn: []}),
+ 'doTxn should not fail on empty array of operations');
+
+ // Non-array type for operations.
+ assert.commandFailed(db.adminCommand({doTxn: "not an array"}),
+ 'doTxn should fail on non-array type for operations');
+
+ // Missing 'op' field in an operation.
+ assert.commandFailed(db.adminCommand({doTxn: [{ns: t.getFullName()}]}),
+ 'doTxn should fail on operation without "op" field');
+
+ // Non-string 'op' field in an operation.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 12345, ns: t.getFullName()}]}),
+ 'doTxn should fail on operation with non-string "op" field');
+
+ // Empty 'op' field value in an operation.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: '', ns: t.getFullName()}]}),
+ 'doTxn should fail on operation with empty "op" field value');
+
+ // Missing 'ns' field in an operation.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c'}]}),
+ 'doTxn should fail on operation without "ns" field');
+
+ // Non-string 'ns' field in an operation.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: 12345}]}),
+ 'doTxn should fail on operation with non-string "ns" field');
+
+ // Empty 'ns' field value in an operation of type 'n' (noop).
+ assert.commandWorked(db.adminCommand({doTxn: [{op: 'n', ns: ''}]}),
+ 'doTxn should work on no op operation with empty "ns" field value');
+
+ // Missing dbname in 'ns' field.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'd', ns: t.getName(), o: {_id: 1}}]}));
+
+ // Missing 'o' field value in an operation of type 'c' (command).
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: t.getFullName()}]}),
+ 'doTxn should fail on command operation without "o" field');
+
+ // Non-object 'o' field value in an operation of type 'c' (command).
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: t.getFullName(), o: 'bar'}]}),
+ 'doTxn should fail on command operation with non-object "o" field');
+
+ // Empty object 'o' field value in an operation of type 'c' (command).
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: t.getFullName(), o: {}}]}),
+ 'doTxn should fail on command operation with empty object "o" field');
+
+ // Unknown key in 'o' field value in an operation of type 'c' (command).
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: t.getFullName(), o: {a: 1}}]}),
+ 'doTxn should fail on command operation on unknown key in "o" field');
+
+ // Empty 'ns' field value in operation type other than 'n'.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'c', ns: ''}]}),
+ 'doTxn should fail on non-"n" operation type with empty "ns" field value');
+
+ // Excessively nested doTxn commands gracefully fail.
+ assert.commandFailed(db.adminCommand({
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts":
+ {"$timestamp": {"t": 1, "i": 100}},
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {
+ "$timestamp":
+ {"t": 1, "i": 100}
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {
+ "$timestamp": {
+ "t": 1,
+ "i": 100
+ }
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns": "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {
+ "$timestamp":
+ {
+ "t":
+ 1,
+ "i":
+ 100
+ }
+ },
+ "h": 0,
+ "v": 2,
+ "op": "c",
+ "ns":
+ "test.$cmd",
+ "o": {
+ "doTxn": [{
+ "ts": {
+ "$timestamp": {
+ "t":
+ 1,
+ "i":
+ 100
+ }
+ },
+ "h": 0,
+ "v": 2,
+ "op":
+ "c",
+ "ns":
+ "test.$cmd",
+ "o": {
+ "doTxn":
+ [
+ ]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }
+ }]
+ }),
+ "Excessively nested doTxn should be rejected");
+
+ // Missing 'o' field value in an operation of type 'i' on 'system.indexes' collection.
+ assert.commandFailedWithCode(
+ db.adminCommand({doTxn: [{op: 'i', ns: db.getName() + '.system.indexes'}]}),
+ ErrorCodes.NoSuchKey,
+ 'doTxn should fail on system.indexes insert operation without "o" field');
+
+ // Non-object 'o' field value in an operation of type 'i' on 'system.indexes' collection.
+ assert.commandFailedWithCode(
+ db.adminCommand({doTxn: [{op: 'i', ns: db.getName() + '.system.indexes', o: 'bar'}]}),
+ ErrorCodes.TypeMismatch,
+ 'doTxn should fail on system.indexes insert operation with non-object "o" field');
+
+ // Missing 'ns' field in index spec.
+ assert.commandFailedWithCode(
+ db.adminCommand({
+ doTxn: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
+ ErrorCodes.NoSuchKey,
+ 'doTxn should fail on system.indexes insert operation with missing index namespace');
+
+ // Non-string 'ns' field in index spec.
+ assert.commandFailedWithCode(
+ db.adminCommand({
+ doTxn: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 12345,
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
+ ErrorCodes.TypeMismatch,
+ 'doTxn should fail on system.indexes insert operation with non-string index namespace');
+
+ // Invalid 'ns' field in index spec.
+ assert.commandFailedWithCode(
+ db.adminCommand({
+ doTxn: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 'invalid_namespace',
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
+ ErrorCodes.InvalidNamespace,
+ 'doTxn should fail on system.indexes insert operation with invalid index namespace');
+
+ // Inconsistent database name in index spec namespace.
+ assert.commandFailedWithCode(
+ db.adminCommand({
+ doTxn: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 'baddbprefix' + t.getFullName(),
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
+ ErrorCodes.InvalidNamespace,
+ 'doTxn should fail on system.indexes insert operation with index namespace containing ' +
+ 'inconsistent database name');
+
+ // Valid 'ns' field value in unknown operation type 'x'.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'x', ns: t.getFullName()}]}),
+ 'doTxn should fail on unknown operation type "x" with valid "ns" value');
+
+ assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
+
+ /**
+ * Test function for running CRUD operations on non-existent namespaces using various
+ * combinations of invalid namespaces (collection/database), allowAtomic and alwaysUpsert.
+ *
+ * Leave 'expectedErrorCode' undefined if this command is expected to run successfully.
+ */
+ function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) {
+ expectedErrorCode = expectedErrorCode || ErrorCodes.OK;
+ const t2 = db.getSiblingDB('do_txn1_no_such_db').getCollection('t');
+ [t, t2].forEach(coll => {
+ const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2};
+ [false, true].forEach(allowAtomic => {
+ [false, true].forEach(alwaysUpsert => {
+ const cmd = {doTxn: [op], allowAtomic: allowAtomic, alwaysUpsert: alwaysUpsert};
+ jsTestLog('Testing doTxn on non-existent namespace: ' + tojson(cmd));
+ if (expectedErrorCode === ErrorCodes.OK) {
+ assert.commandWorked(db.adminCommand(cmd));
+ } else {
+ assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode);
+ }
+ });
+ });
+ });
+ }
+
+ // Insert and update operations on non-existent collections/databases should return
+ // NamespaceNotFound.
+ testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound);
+ testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound);
+
+ // Delete operations on non-existent collections/databases should return OK for idempotency
+ // reasons.
+ testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {});
+
+ assert.commandWorked(db.createCollection(t.getName()));
+ var a = assert.commandWorked(
+ db.adminCommand({doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
+ assert.eq(1, t.find().count(), "Valid insert failed");
+ assert.eq(true, a.results[0], "Bad result value for valid insert");
+
+ a = assert.commandWorked(
+ db.adminCommand({doTxn: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
+ assert.eq(1, t.find().count(), "Duplicate insert failed");
+ assert.eq(true, a.results[0], "Bad result value for duplicate insert");
+
+ var o = {_id: 5, x: 17};
+ assert.eq(o, t.findOne(), "Mismatching document inserted.");
+
+ // 'o' field is an empty array.
+ assert.commandFailed(db.adminCommand({doTxn: [{op: 'i', ns: t.getFullName(), o: []}]}),
+ 'doTxn should fail on insert of object with empty array element');
+
+ var res = assert.commandWorked(db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}}
+ ]
+ }));
+
+ o.x++;
+ o.x++;
+
+ assert.eq(1, t.find().count(), "Updates increased number of documents");
+ assert.eq(o, t.findOne(), "Document doesn't match expected");
+ assert.eq(true, res.results[0], "Bad result value for valid update");
+ assert.eq(true, res.results[1], "Bad result value for valid update");
+
+ // preCondition fully matches
+ res = db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+ });
+
+ // The use of preCondition requires doTxn to run atomically. Therefore, it is incompatible
+ // with {allowAtomic: false}.
+ assert.commandFailedWithCode(
+ db.runCommand({
+ doTxn: [{op: 'u', ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
+ allowAtomic: false,
+ }),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail when preCondition is present and atomicAllowed is false.');
+
+ // The use of preCondition is also incompatible with operations that include commands.
+ assert.commandFailedWithCode(
+ db.runCommand({
+ doTxn: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {doTxn: []}}],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}],
+ }),
+ ErrorCodes.InvalidOptions,
+ 'doTxn should fail when preCondition is present and operations includes commands.');
+
+ o.x++;
+ o.x++;
+
+ assert.eq(1, t.find().count(), "Updates increased number of documents");
+ assert.eq(o, t.findOne(), "Document doesn't match expected");
+ assert.eq(true, res.results[0], "Bad result value for valid update");
+ assert.eq(true, res.results[1], "Bad result value for valid update");
+
+ // preCondition doesn't match ns
+ res = db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}]
+ });
+
+ assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+ // preCondition doesn't match query
+ res = db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+ });
+
+ assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
+
+ res = db.runCommand({
+ doTxn: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}}
+ ]
+ });
+
+ assert.eq(true, res.results[0], "Valid update failed");
+ assert.eq(true, res.results[1], "Valid update failed");
+
+ // Ops with transaction numbers are valid.
+ var lsid = {id: UUID()};
+ res = db.runCommand({
+ doTxn: [
+ {
+ op: "i",
+ ns: t.getFullName(),
+ o: {_id: 7, x: 24},
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmdId: 0
+ },
+ {
+ op: "u",
+ ns: t.getFullName(),
+ o2: {_id: 8},
+ o: {$set: {x: 25}},
+ lsid: lsid,
+ txnNumber: NumberLong(1),
+ stmdId: 1
+ },
+ {
+ op: "d",
+ ns: t.getFullName(),
+ o: {_id: 7},
+ lsid: lsid,
+ txnNumber: NumberLong(2),
+ stmdId: 0
+ },
+ ]
+ });
+
+ assert.eq(true, res.results[0], "Valid insert with transaction number failed");
+ assert.eq(true, res.results[1], "Valid update with transaction number failed");
+ assert.eq(true, res.results[2], "Valid delete with transaction number failed");
+
+ // Foreground index build.
+ res = assert.commandWorked(db.adminCommand({
+ doTxn: [{
+ "op": "i",
+ "ns": db.getName() + ".system.indexes",
+ "o": {
+ ns: t.getFullName(),
+ key: {a: 1},
+ name: "a_1",
+ }
+ }]
+ }));
+ assert.eq(1, res.applied, "Incorrect number of operations applied");
+ assert.eq(true, res.results[0], "Foreground index creation failed");
+ var allIndexes = t.getIndexes();
+ var spec = GetIndexHelpers.findByName(allIndexes, "a_1");
+ assert.neq(null, spec, "Foreground index 'a_1' not found: " + tojson(allIndexes));
+ assert.eq(1, spec.v, "Expected v=1 index to be built since 'v' field was omitted");
+
+ // Background indexes are created in the foreground when processed by doTxn.
+ res = assert.commandWorked(db.adminCommand({
+ doTxn: [{
+ "op": "i",
+ "ns": db.getName() + ".system.indexes",
+ "o": {
+ ns: t.getFullName(),
+ key: {b: 1},
+ name: "b_1",
+ background: true,
+ }
+ }]
+ }));
+ assert.eq(1, res.applied, "Incorrect number of operations applied");
+ assert.eq(true, res.results[0], "Background index creation failed");
+ allIndexes = t.getIndexes();
+ spec = GetIndexHelpers.findByName(allIndexes, "b_1");
+ assert.neq(null, spec, "Background index 'b_1' not found: " + tojson(allIndexes));
+ assert.eq(1, spec.v, "Expected v=1 index to be built since 'v' field was omitted");
+
+ // Foreground v=2 index build.
+ res = assert.commandWorked(db.adminCommand({
+ doTxn: [{
+ "op": "i",
+ "ns": db.getName() + ".system.indexes",
+ "o": {
+ ns: t.getFullName(),
+ key: {c: 1},
+ name: "c_1",
+ v: 2,
+ }
+ }]
+ }));
+ assert.eq(1, res.applied, "Incorrect number of operations applied");
+ assert.eq(true, res.results[0], "Foreground v=2 index creation failed");
+ allIndexes = t.getIndexes();
+ spec = GetIndexHelpers.findByName(allIndexes, "c_1");
+ assert.neq(null, spec, "Foreground index 'c_1' not found: " + tojson(allIndexes));
+ assert.eq(2, spec.v, "Expected v=2 index to be built");
+
+ // When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set
+ // operations add new fields in lexicographic order.
+ res = assert.commandWorked(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 6}},
+ {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}}
+ ]
+ }));
+ assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+
+ // 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail.
+ res = assert.commandFailed(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 7}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 7},
+ "o": {$v: NumberLong(0), $set: {z: 1, a: 2}}
+ }
+ ]
+ }));
+ assert.eq(res.code, 50659);
+
+ // When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set
+ // operations get performed in lexicographic order.
+ res = assert.commandWorked(db.adminCommand({
+ doTxn: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 8}},
+ {
+ "op": "u",
+ "ns": t.getFullName(),
+ "o2": {_id: 8},
+ "o": {$v: NumberLong(1), $set: {z: 1, a: 2}}
+ }
+ ]
+ }));
+ assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted.
+})();
diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js
index ddb7092a44a..2e6a0eed6b0 100644
--- a/jstests/core/json_schema/misc_validation.js
+++ b/jstests/core/json_schema/misc_validation.js
@@ -10,6 +10,7 @@
* - update
* - findAndModify
* - applyOps
+ * - doTxn
* - $elemMatch projection
*
* @tags: [assumes_no_implicit_collection_creation_after_drop, requires_non_retryable_commands,
@@ -308,6 +309,23 @@
assert.commandWorked(res);
assert.eq(1, res.applied);
+ coll.drop();
+ assert.writeOK(coll.insert({_id: 1, a: true}));
+
+ // Test $jsonSchema in the precondition checking for doTxn.
+ res = testDB.adminCommand({
+ doTxn: [
+ {op: "u", ns: coll.getFullName(), o2: {_id: 1}, o: {$set: {a: false}}},
+ ],
+ preCondition: [{
+ ns: coll.getFullName(),
+ q: {$jsonSchema: {properties: {a: {type: "boolean"}}}},
+ res: {a: true}
+ }]
+ });
+ assert.commandWorked(res);
+ assert.eq(1, res.applied);
+
// Test $jsonSchema in an eval function.
assert.eq(1,
testDB.eval(
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index b3cd4d9b050..62769d2e0ec 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -185,6 +185,11 @@
dbStats: {skip: "TODO(SERVER-25948)"},
delete: {command: {delete: "view", deletes: [{q: {x: 1}, limit: 1}]}, expectFailure: true},
distinct: {command: {distinct: "view", key: "_id"}},
+ doTxn: {
+ command: {doTxn: [{op: "i", o: {_id: 1}, ns: "test.view"}]},
+ expectFailure: true,
+ skipSharded: true,
+ },
driverOIDTest: {skip: isUnrelated},
drop: {command: {drop: "view"}},
dropAllRolesFromDatabase: {skip: isUnrelated},
diff --git a/jstests/libs/override_methods/set_read_and_write_concerns.js b/jstests/libs/override_methods/set_read_and_write_concerns.js
index e213ea934d1..a14efa41a7f 100644
--- a/jstests/libs/override_methods/set_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_read_and_write_concerns.js
@@ -92,6 +92,7 @@
"createRole",
"createUser",
"delete",
+ "doTxn",
"drop",
"dropAllRolesFromDatabase",
"dropAllUsersFromDatabase",
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 2535f8e05cd..eeb21f938f9 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -163,6 +163,7 @@ env.Library(
"dbcommands.cpp",
"dbhash.cpp",
"distinct.cpp",
+ "do_txn_cmd.cpp",
"driverHelpers.cpp",
"drop_indexes.cpp",
"eval.cpp",
@@ -182,6 +183,7 @@ env.Library(
"list_indexes.cpp",
"lock_info.cpp",
"mr.cpp",
+ "oplog_application_checks.cpp",
"oplog_note.cpp",
"parallel_collection_scan.cpp",
"pipeline_command.cpp",
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 2f12d0871f8..4d51368a9e4 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/catalog/uuid_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
+#include "mongo/db/commands/oplog_application_checks.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -75,109 +76,20 @@ bool checkCOperationType(const BSONObj& opObj, const StringData opName) {
return false;
};
-UUID getUUIDFromOplogEntry(const BSONObj& oplogEntry) {
- BSONElement uiElem = oplogEntry["ui"];
- return uassertStatusOK(UUID::parse(uiElem));
-};
-
-Status checkOperationAuthorization(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& oplogEntry,
- bool alwaysUpsert) {
- AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
-
- BSONElement opTypeElem = oplogEntry["op"];
- checkBSONType(BSONType::String, opTypeElem);
- const StringData opType = opTypeElem.checkAndGetStringData();
-
- if (opType == "n"_sd) {
- // oplog notes require cluster permissions, and may not have a ns
- if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
- ActionType::appendOplogNote)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
-
- BSONElement nsElem = oplogEntry["ns"];
- checkBSONType(BSONType::String, nsElem);
- NamespaceString ns(oplogEntry["ns"].checkAndGetStringData());
-
- if (oplogEntry.hasField("ui"_sd)) {
- // ns by UUID overrides the ns specified if they are different.
- auto& uuidCatalog = UUIDCatalog::get(opCtx);
- NamespaceString uuidCollNS = uuidCatalog.lookupNSSByUUID(getUUIDFromOplogEntry(oplogEntry));
- if (!uuidCollNS.isEmpty() && uuidCollNS != ns)
- ns = uuidCollNS;
- }
-
- BSONElement oElem = oplogEntry["o"];
- checkBSONType(BSONType::Object, oElem);
- BSONObj o = oElem.Obj();
-
- if (opType == "c"_sd) {
- StringData commandName = o.firstElement().fieldNameStringData();
- Command* command = Command::findCommand(commandName);
- if (!command) {
- return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
- }
-
- std::string dbNameForAuthCheck = ns.db().toString();
- if (commandName == "renameCollection") {
- // renameCollection commands must be run on the 'admin' database. Its arguments are
- // fully qualified namespaces. Catalog internals don't know the op produced by running
- // renameCollection was originally run on 'admin', so we must restore this.
- dbNameForAuthCheck = "admin";
- }
-
- return Command::checkAuthorization(
- command, opCtx, OpMsgRequest::fromDBAndBody(dbNameForAuthCheck, o));
- }
-
- if (opType == "i"_sd) {
- return authSession->checkAuthForInsert(opCtx, ns, o);
- } else if (opType == "u"_sd) {
- BSONElement o2Elem = oplogEntry["o2"];
- checkBSONType(BSONType::Object, o2Elem);
- BSONObj o2 = o2Elem.Obj();
-
- BSONElement bElem = oplogEntry["b"];
- if (!bElem.eoo()) {
- checkBSONType(BSONType::Bool, bElem);
- }
- bool b = bElem.trueValue();
-
- const bool upsert = b || alwaysUpsert;
-
- return authSession->checkAuthForUpdate(opCtx, ns, o, o2, upsert);
- } else if (opType == "d"_sd) {
-
- return authSession->checkAuthForDelete(opCtx, ns, o);
- } else if (opType == "db"_sd) {
- // It seems that 'db' isn't used anymore. Require all actions to prevent casual use.
- ActionSet allActions;
- allActions.addAllActions();
- if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyResource(),
- allActions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
-
- return Status(ErrorCodes::FailedToParse, "Unrecognized opType");
-}
-
-enum class ApplyOpsValidity { kOk, kNeedsUseUUID, kNeedsForceAndUseUUID, kNeedsSuperuser };
-
/**
- * Returns kNeedsSuperuser, if the provided applyOps command contains
- * an empty applyOps command or createCollection/renameCollection commands are mixed in applyOps
- * batch. Returns kNeedForceAndUseUUID if an operation contains a UUID, and will create a collection
- * with the user-specified UUID. Returns
- * kNeedsUseUUID if the operation contains a UUID. Returns kOk if no conditions
- * which must be specially handled are detected. May throw exceptions if the input is malformed.
+ * Returns kNeedsSuperuser, if the provided applyOps command contains an empty applyOps command or
+ * createCollection/renameCollection commands are mixed in applyOps batch.
+ *
+ * Returns kNeedForceAndUseUUID if an operation contains a UUID, and will create a collection with
+ * the user-specified UUID.
+ *
+ * Returns kNeedsUseUUID if the operation contains a UUID.
+ *
+ * Returns kOk if no conditions which must be specially handled are detected.
+ *
+ * May throw exceptions if the input is malformed.
*/
-ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
+OplogApplicationValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
const size_t maxApplyOpsDepth = 10;
std::stack<std::pair<size_t, BSONObj>> toCheck;
@@ -212,7 +124,7 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
return false;
};
- ApplyOpsValidity ret = ApplyOpsValidity::kOk;
+ OplogApplicationValidity ret = OplogApplicationValidity::kOk;
// Insert the top level applyOps command into the stack.
toCheck.emplace(std::make_pair(0, cmdObj));
@@ -227,7 +139,7 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
// Check if the applyOps command is empty. This is probably not something that should
// happen, so require a superuser to do this.
if (applyOpsObj.firstElement().Array().empty()) {
- return ApplyOpsValidity::kNeedsSuperuser;
+ return OplogApplicationValidity::kNeedsSuperuser;
}
// createCollection and renameCollection are only allowed to be applied
@@ -239,7 +151,7 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
auto oplogEntry = e.Obj();
if (checkCOperationType(oplogEntry, "create"_sd) ||
checkCOperationType(oplogEntry, "renameCollection"_sd)) {
- return ApplyOpsValidity::kNeedsSuperuser;
+ return OplogApplicationValidity::kNeedsSuperuser;
}
}
}
@@ -259,12 +171,12 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
}
// If the op uses any UUIDs at all then the user must possess extra privileges.
- if (opHasUUIDs && ret == ApplyOpsValidity::kOk)
- ret = ApplyOpsValidity::kNeedsUseUUID;
+ if (opHasUUIDs && ret == OplogApplicationValidity::kOk)
+ ret = OplogApplicationValidity::kNeedsUseUUID;
if (opHasUUIDs && checkCOperationType(opObj, "create"_sd)) {
// If the op is 'c' and forces the server to ingest a collection
// with a specific, user defined UUID.
- ret = ApplyOpsValidity::kNeedsForceAndUseUUID;
+ ret = OplogApplicationValidity::kNeedsForceAndUseUUID;
}
// If the op contains a nested applyOps...
@@ -286,9 +198,9 @@ ApplyOpsValidity validateApplyOpsCommand(const BSONObj& cmdObj) {
return ret;
}
-class ApplyOpsCmd : public ErrmsgCommandDeprecated {
+class ApplyOpsCmd : public BasicCommand {
public:
- ApplyOpsCmd() : ErrmsgCommandDeprecated("applyOps") {}
+ ApplyOpsCmd() : BasicCommand("applyOps") {}
bool slaveOk() const override {
return false;
@@ -306,95 +218,24 @@ public:
Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) override {
- AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
-
- ApplyOpsValidity validity = validateApplyOpsCommand(cmdObj);
- if (validity == ApplyOpsValidity::kNeedsSuperuser) {
- std::vector<Privilege> universalPrivileges;
- RoleGraph::generateUniversalPrivileges(&universalPrivileges);
- if (!authSession->isAuthorizedForPrivileges(universalPrivileges)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- if (validity == ApplyOpsValidity::kNeedsForceAndUseUUID) {
- if (!authSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- {ActionType::forceUUID, ActionType::useUUID})) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- validity = ApplyOpsValidity::kOk;
- }
- if (validity == ApplyOpsValidity::kNeedsUseUUID) {
- if (!authSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::useUUID)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- validity = ApplyOpsValidity::kOk;
- }
- fassert(40314, validity == ApplyOpsValidity::kOk);
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(opCtx);
-
- const bool alwaysUpsert =
- cmdObj.hasField("alwaysUpsert") ? cmdObj["alwaysUpsert"].trueValue() : true;
-
- checkBSONType(BSONType::Array, cmdObj.firstElement());
- for (const BSONElement& e : cmdObj.firstElement().Array()) {
- checkBSONType(BSONType::Object, e);
- Status status = checkOperationAuthorization(opCtx, dbname, e.Obj(), alwaysUpsert);
- if (!status.isOK()) {
- return status;
- }
- }
-
- BSONElement preconditions = cmdObj["preCondition"];
- if (!preconditions.eoo()) {
- for (const BSONElement& precondition : preconditions.Array()) {
- checkBSONType(BSONType::Object, precondition);
- BSONElement nsElem = precondition.Obj()["ns"];
- checkBSONType(BSONType::String, nsElem);
- NamespaceString nss(nsElem.checkAndGetStringData());
-
- if (!authSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(nss), ActionType::find)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized to check precondition");
- }
- }
- }
-
- return Status::OK();
+ OplogApplicationValidity validity = validateApplyOpsCommand(cmdObj);
+ return OplogApplicationChecks::checkAuthForCommand(
+ opCtx, dbname, cmdObj, validity, OplogApplicationCommand::kApplyOpsCmd);
}
- bool errmsgRun(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& result) override {
+ bool run(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) override {
validateApplyOpsCommand(cmdObj);
boost::optional<DisableDocumentValidation> maybeDisableValidation;
if (shouldBypassDocumentValidationForCommand(cmdObj))
maybeDisableValidation.emplace(opCtx);
- if (cmdObj.firstElement().type() != Array) {
- errmsg = "ops has to be an array";
- return false;
- }
-
- BSONObj ops = cmdObj.firstElement().Obj();
-
- {
- // check input
- BSONObjIterator i(ops);
- while (i.more()) {
- BSONElement e = i.next();
- if (!_checkOperation(e, errmsg)) {
- return false;
- }
- }
+ auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
// TODO (SERVER-30217): When a write concern is provided to the applyOps command, we
@@ -412,7 +253,7 @@ public:
repl::OplogApplication::Mode oplogApplicationMode =
repl::OplogApplication::Mode::kApplyOpsCmd; // the default mode.
std::string oplogApplicationModeString;
- auto status = bsonExtractStringField(
+ status = bsonExtractStringField(
cmdObj, ApplyOps::kOplogApplicationModeFieldName, &oplogApplicationModeString);
if (status.isOK()) {
@@ -442,58 +283,6 @@ public:
return applyOpsStatus;
}
-private:
- /**
- * Returns true if 'e' contains a valid operation.
- */
- static bool _checkOperation(const BSONElement& e, std::string& errmsg) {
- if (e.type() != Object) {
- errmsg = str::stream() << "op not an object: " << e.fieldName();
- return false;
- }
- BSONObj obj = e.Obj();
- // op - operation type
- BSONElement opElement = obj.getField("op");
- if (opElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"op\" field: "
- << e.fieldName();
- return false;
- }
- if (opElement.type() != mongo::String) {
- errmsg = str::stream() << "\"op\" field is not a string: " << e.fieldName();
- return false;
- }
- // operation type -- see logOp() comments for types
- const char* opType = opElement.valuestrsafe();
- if (*opType == '\0') {
- errmsg = str::stream() << "\"op\" field value cannot be empty: " << e.fieldName();
- return false;
- }
-
- // ns - namespace
- // Only operations of type 'n' are allowed to have an empty namespace.
- BSONElement nsElement = obj.getField("ns");
- if (nsElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"ns\" field: "
- << e.fieldName();
- return false;
- }
- if (nsElement.type() != mongo::String) {
- errmsg = str::stream() << "\"ns\" field is not a string: " << e.fieldName();
- return false;
- }
- if (nsElement.String().find('\0') != std::string::npos) {
- errmsg = str::stream() << "namespaces cannot have embedded null characters";
- return false;
- }
- if (*opType != 'n' && nsElement.String().empty()) {
- errmsg = str::stream() << "\"ns\" field value cannot be empty when op type is not 'n': "
- << e.fieldName();
- return false;
- }
- return true;
- }
-
} applyOpsCmd;
} // namespace
diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp
new file mode 100644
index 00000000000..3ed00c1e736
--- /dev/null
+++ b/src/mongo/db/commands/do_txn_cmd.cpp
@@ -0,0 +1,281 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include <vector>
+
+#include "mongo/bson/util/bson_check.h"
+#include "mongo/bson/util/bson_extract.h"
+#include "mongo/db/auth/authorization_manager.h"
+#include "mongo/db/auth/authorization_manager_global.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/document_validation.h"
+#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/client.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/commands/oplog_application_checks.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/repl/do_txn.h"
+#include "mongo/db/repl/oplog.h"
+#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
+#include "mongo/util/uuid.h"
+
+namespace mongo {
+namespace {
+
+bool checkCOperationType(const BSONObj& opObj, const StringData opName) {
+ BSONElement opTypeElem = opObj["op"];
+ checkBSONType(BSONType::String, opTypeElem);
+ const StringData opType = opTypeElem.checkAndGetStringData();
+
+ if (opType == "c"_sd) {
+ BSONElement oElem = opObj["o"];
+ checkBSONType(BSONType::Object, oElem);
+ BSONObj o = oElem.Obj();
+
+ if (o.firstElement().fieldNameStringData() == opName) {
+ return true;
+ }
+ }
+ return false;
+};
+
+/**
+ * Returns kNeedsSuperuser, if the provided doTxn command contains
+ * an empty doTxn command or createCollection/renameCollection commands are mixed in doTxn
+ * batch. Returns kNeedForceAndUseUUID if an operation contains a UUID, and will create a collection
+ * with the user-specified UUID. Returns
+ * kNeedsUseUUID if the operation contains a UUID. Returns kOk if no conditions
+ * which must be specially handled are detected. May throw exceptions if the input is malformed.
+ */
+OplogApplicationValidity validateDoTxnCommand(const BSONObj& cmdObj) {
+ const size_t maxDoTxnDepth = 10;
+ std::stack<std::pair<size_t, BSONObj>> toCheck;
+
+ auto operationContainsUUID = [](const BSONObj& opObj) {
+ auto anyTopLevelElementIsUUID = [](const BSONObj& opObj) {
+ for (const BSONElement opElement : opObj) {
+ if (opElement.type() == BSONType::BinData &&
+ opElement.binDataType() == BinDataType::newUUID) {
+ return true;
+ }
+ }
+ return false;
+ };
+ if (anyTopLevelElementIsUUID(opObj)) {
+ return true;
+ }
+
+ BSONElement opTypeElem = opObj["op"];
+ checkBSONType(BSONType::String, opTypeElem);
+ const StringData opType = opTypeElem.checkAndGetStringData();
+
+ if (opType == "c"_sd) {
+ BSONElement oElem = opObj["o"];
+ checkBSONType(BSONType::Object, oElem);
+ BSONObj o = oElem.Obj();
+
+ if (anyTopLevelElementIsUUID(o)) {
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ OplogApplicationValidity ret = OplogApplicationValidity::kOk;
+
+ // Insert the top level doTxn command into the stack.
+ toCheck.emplace(std::make_pair(0, cmdObj));
+
+ while (!toCheck.empty()) {
+ size_t depth;
+ BSONObj doTxnObj;
+ std::tie(depth, doTxnObj) = toCheck.top();
+ toCheck.pop();
+
+ checkBSONType(BSONType::Array, doTxnObj.firstElement());
+ // Check if the doTxn command is empty. This is probably not something that should
+ // happen, so require a superuser to do this.
+ if (doTxnObj.firstElement().Array().empty()) {
+ return OplogApplicationValidity::kNeedsSuperuser;
+ }
+
+ // createCollection and renameCollection are only allowed to be applied
+ // individually. Ensure there is no create/renameCollection in a batch
+ // of size greater than 1.
+ if (doTxnObj.firstElement().Array().size() > 1) {
+ for (const BSONElement& e : doTxnObj.firstElement().Array()) {
+ checkBSONType(BSONType::Object, e);
+ auto oplogEntry = e.Obj();
+ if (checkCOperationType(oplogEntry, "create"_sd) ||
+ checkCOperationType(oplogEntry, "renameCollection"_sd)) {
+ return OplogApplicationValidity::kNeedsSuperuser;
+ }
+ }
+ }
+
+ // For each doTxn command, iterate the ops.
+ for (BSONElement element : doTxnObj.firstElement().Array()) {
+ checkBSONType(BSONType::Object, element);
+ BSONObj opObj = element.Obj();
+
+ bool opHasUUIDs = operationContainsUUID(opObj);
+
+ if (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo34) {
+ uassert(ErrorCodes::OplogOperationUnsupported,
+ "doTxn with UUID requires upgrading to FeatureCompatibilityVersion 3.6",
+ !opHasUUIDs);
+ }
+
+ // If the op uses any UUIDs at all then the user must possess extra privileges.
+ if (opHasUUIDs && ret == OplogApplicationValidity::kOk)
+ ret = OplogApplicationValidity::kNeedsUseUUID;
+ if (opHasUUIDs && checkCOperationType(opObj, "create"_sd)) {
+ // If the op is 'c' and forces the server to ingest a collection
+ // with a specific, user defined UUID.
+ ret = OplogApplicationValidity::kNeedsForceAndUseUUID;
+ }
+
+ // If the op contains a nested doTxn...
+ if (checkCOperationType(opObj, "doTxn"_sd)) {
+ // And we've recursed too far, then bail out.
+ uassert(ErrorCodes::FailedToParse, "Too many nested doTxn", depth < maxDoTxnDepth);
+
+ // Otherwise, if the op contains an doTxn, but we haven't recursed too far:
+ // extract the doTxn command, and insert it into the stack.
+ checkBSONType(BSONType::Object, opObj["o"]);
+ BSONObj oObj = opObj["o"].Obj();
+ toCheck.emplace(std::make_pair(depth + 1, std::move(oObj)));
+ }
+ }
+ }
+
+ return ret;
+}
+
+class DoTxnCmd : public BasicCommand {
+public:
+ DoTxnCmd() : BasicCommand("doTxn") {}
+
+ bool slaveOk() const override {
+ return false;
+ }
+
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
+ return true;
+ }
+
+ void help(std::stringstream& help) const override {
+ help << "internal (sharding)\n{ doTxn : [ ] , preCondition : [ { ns : ... , q : ... , "
+ "res : ... } ] }";
+ }
+
+ Status checkAuthForOperation(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ OplogApplicationValidity validity = validateDoTxnCommand(cmdObj);
+ return OplogApplicationChecks::checkAuthForCommand(
+ opCtx, dbname, cmdObj, validity, OplogApplicationCommand::kDoTxnCmd);
+ }
+
+ bool run(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) override {
+ validateDoTxnCommand(cmdObj);
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(opCtx);
+
+ auto status = OplogApplicationChecks::checkOperationArray(cmdObj.firstElement());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ // TODO (SERVER-30217): When a write concern is provided to the doTxn command, we
+ // normally wait on the OpTime of whichever operation successfully completed last. This is
+ // erroneous, however, if the last operation in the array happens to be a write no-op and
+ // thus isn’t assigned an OpTime. Let the second to last operation in the doTxn be write
+ // A, the last operation in doTxn be write B. Let B do a no-op write and let the
+ // operation that caused B to be a no-op be C. If C has an OpTime after A but before B,
+ // then we won’t wait for C to be replicated and it could be rolled back, even though B
+ // was acknowledged. To fix this, we should wait for replication of the node’s last applied
+ // OpTime if the last write operation was a no-op write.
+
+ // We set the OplogApplication::Mode argument based on the mode argument given in the
+ // command object. If no mode is given, default to the 'kApplyOpsCmd' mode.
+ repl::OplogApplication::Mode oplogApplicationMode =
+ repl::OplogApplication::Mode::kApplyOpsCmd; // the default mode.
+ std::string oplogApplicationModeString;
+ status = bsonExtractStringField(
+ cmdObj, DoTxn::kOplogApplicationModeFieldName, &oplogApplicationModeString);
+
+ if (status.isOK()) {
+ auto modeSW = repl::OplogApplication::parseMode(oplogApplicationModeString);
+ if (!modeSW.isOK()) {
+ // Unable to parse the mode argument.
+ return appendCommandStatus(
+ result,
+ modeSW.getStatus().withContext(str::stream() << "Could not parse " +
+ DoTxn::kOplogApplicationModeFieldName));
+ }
+ oplogApplicationMode = modeSW.getValue();
+ } else if (status != ErrorCodes::NoSuchKey) {
+ // NoSuchKey means the user did not supply a mode.
+ return appendCommandStatus(result,
+ Status(status.code(),
+ str::stream() << "Could not parse out "
+ << DoTxn::kOplogApplicationModeFieldName
+ << ": "
+ << status.reason()));
+ }
+
+ auto doTxnStatus = appendCommandStatus(
+ result, doTxn(opCtx, dbname, cmdObj, oplogApplicationMode, &result));
+
+ return doTxnStatus;
+ }
+
+} doTxnCmd;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp
new file mode 100644
index 00000000000..88b7296d6d9
--- /dev/null
+++ b/src/mongo/db/commands/oplog_application_checks.cpp
@@ -0,0 +1,266 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#include "mongo/platform/basic.h"
+
+#include "mongo/bson/util/bson_check.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/document_validation.h"
+#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/commands/oplog_application_checks.h"
+
+namespace mongo {
+UUID OplogApplicationChecks::getUUIDFromOplogEntry(const BSONObj& oplogEntry) {
+ BSONElement uiElem = oplogEntry["ui"];
+ return uassertStatusOK(UUID::parse(uiElem));
+};
+
+Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& oplogEntry,
+ AuthorizationSession* authSession,
+ OplogApplicationCommand command,
+ bool alwaysUpsert) {
+ BSONElement opTypeElem = oplogEntry["op"];
+ checkBSONType(BSONType::String, opTypeElem);
+ const StringData opType = opTypeElem.checkAndGetStringData();
+
+ if (opType == "n"_sd) {
+ // oplog notes require cluster permissions, and may not have a ns
+ if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::appendOplogNote)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ BSONElement nsElem = oplogEntry["ns"];
+ checkBSONType(BSONType::String, nsElem);
+ NamespaceString ns(oplogEntry["ns"].checkAndGetStringData());
+
+ if (oplogEntry.hasField("ui"_sd)) {
+ // ns by UUID overrides the ns specified if they are different.
+ auto& uuidCatalog = UUIDCatalog::get(opCtx);
+ NamespaceString uuidCollNS = uuidCatalog.lookupNSSByUUID(getUUIDFromOplogEntry(oplogEntry));
+ if (!uuidCollNS.isEmpty() && uuidCollNS != ns)
+ ns = uuidCollNS;
+ }
+
+ BSONElement oElem = oplogEntry["o"];
+ checkBSONType(BSONType::Object, oElem);
+ BSONObj o = oElem.Obj();
+
+ if (opType == "c"_sd) {
+ if (command == OplogApplicationCommand::kDoTxnCmd) {
+ return Status(ErrorCodes::IllegalOperation, "Commands cannot be applied via doTxn.");
+ }
+ StringData commandName = o.firstElement().fieldNameStringData();
+ Command* command = Command::findCommand(commandName);
+ if (!command) {
+ return Status(ErrorCodes::FailedToParse, "Unrecognized command in op");
+ }
+
+ std::string dbNameForAuthCheck = ns.db().toString();
+ if (commandName == "renameCollection") {
+ // renameCollection commands must be run on the 'admin' database. Its arguments are
+ // fully qualified namespaces. Catalog internals don't know the op produced by running
+ // renameCollection was originally run on 'admin', so we must restore this.
+ dbNameForAuthCheck = "admin";
+ }
+
+ return Command::checkAuthorization(
+ command, opCtx, OpMsgRequest::fromDBAndBody(dbNameForAuthCheck, o));
+ }
+
+ if (opType == "i"_sd) {
+ return authSession->checkAuthForInsert(opCtx, ns, o);
+ } else if (opType == "u"_sd) {
+ BSONElement o2Elem = oplogEntry["o2"];
+ checkBSONType(BSONType::Object, o2Elem);
+ BSONObj o2 = o2Elem.Obj();
+
+ BSONElement bElem = oplogEntry["b"];
+ if (!bElem.eoo()) {
+ checkBSONType(BSONType::Bool, bElem);
+ }
+ bool b = bElem.trueValue();
+
+ const bool upsert = b || alwaysUpsert;
+
+ return authSession->checkAuthForUpdate(opCtx, ns, o, o2, upsert);
+ } else if (opType == "d"_sd) {
+
+ return authSession->checkAuthForDelete(opCtx, ns, o);
+ } else if (opType == "db"_sd) {
+ // It seems that 'db' isn't used anymore. Require all actions to prevent casual use.
+ ActionSet allActions;
+ allActions.addAllActions();
+ if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyResource(),
+ allActions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::FailedToParse, "Unrecognized opType");
+}
+
+Status OplogApplicationChecks::checkOperationArray(const BSONElement& opsElement) {
+ if (opsElement.type() != Array) {
+ return {ErrorCodes::FailedToParse, "ops has to be an array"};
+ }
+ const auto& ops = opsElement.Obj();
+ // check input
+ BSONObjIterator i(ops);
+ while (i.more()) {
+ BSONElement e = i.next();
+ auto status = checkOperation(e);
+ if (!status.isOK()) {
+ return status;
+ }
+ }
+ return Status::OK();
+}
+
+Status OplogApplicationChecks::checkOperation(const BSONElement& e) {
+ if (e.type() != Object) {
+ return {ErrorCodes::FailedToParse, str::stream() << "op not an object: " << e.fieldName()};
+ }
+ BSONObj obj = e.Obj();
+ // op - operation type
+ BSONElement opElement = obj.getField("op");
+ if (opElement.eoo()) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "op does not contain required \"op\" field: " << e.fieldName()};
+ }
+ if (opElement.type() != mongo::String) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "\"op\" field is not a string: " << e.fieldName()};
+ }
+ // operation type -- see logOp() comments for types
+ const char* opType = opElement.valuestrsafe();
+ if (*opType == '\0') {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "\"op\" field value cannot be empty: " << e.fieldName()};
+ }
+
+ // ns - namespace
+ // Only operations of type 'n' are allowed to have an empty namespace.
+ BSONElement nsElement = obj.getField("ns");
+ if (nsElement.eoo()) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "op does not contain required \"ns\" field: " << e.fieldName()};
+ }
+ if (nsElement.type() != mongo::String) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "\"ns\" field is not a string: " << e.fieldName()};
+ }
+ if (nsElement.String().find('\0') != std::string::npos) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "namespaces cannot have embedded null characters"};
+ }
+ if (*opType != 'n' && nsElement.String().empty()) {
+ return {ErrorCodes::IllegalOperation,
+ str::stream() << "\"ns\" field value cannot be empty when op type is not 'n': "
+ << e.fieldName()};
+ }
+ return Status::OK();
+}
+
+Status OplogApplicationChecks::checkAuthForCommand(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ OplogApplicationValidity validity,
+ OplogApplicationCommand command) {
+ AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient());
+ if (validity == OplogApplicationValidity::kNeedsSuperuser) {
+ std::vector<Privilege> universalPrivileges;
+ RoleGraph::generateUniversalPrivileges(&universalPrivileges);
+ if (!authSession->isAuthorizedForPrivileges(universalPrivileges)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+ if (validity == OplogApplicationValidity::kNeedsForceAndUseUUID) {
+ if (!authSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(),
+ {ActionType::forceUUID, ActionType::useUUID})) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ validity = OplogApplicationValidity::kOk;
+ }
+ if (validity == OplogApplicationValidity::kNeedsUseUUID) {
+ if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::useUUID)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ validity = OplogApplicationValidity::kOk;
+ }
+ fassert(40314, validity == OplogApplicationValidity::kOk);
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(opCtx);
+
+ const bool alwaysUpsert =
+ cmdObj.hasField("alwaysUpsert") ? cmdObj["alwaysUpsert"].trueValue() : true;
+
+ checkBSONType(BSONType::Array, cmdObj.firstElement());
+ for (const BSONElement& e : cmdObj.firstElement().Array()) {
+ checkBSONType(BSONType::Object, e);
+ Status status = OplogApplicationChecks::checkOperationAuthorization(
+ opCtx,
+ dbname,
+ e.Obj(),
+ authSession,
+ OplogApplicationCommand::kApplyOpsCmd,
+ alwaysUpsert);
+ if (!status.isOK()) {
+ return status;
+ }
+ }
+
+ BSONElement preconditions = cmdObj["preCondition"];
+ if (!preconditions.eoo()) {
+ for (const BSONElement& precondition : preconditions.Array()) {
+ checkBSONType(BSONType::Object, precondition);
+ BSONElement nsElem = precondition.Obj()["ns"];
+ checkBSONType(BSONType::String, nsElem);
+ NamespaceString nss(nsElem.checkAndGetStringData());
+
+ if (!authSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(nss), ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized to check precondition");
+ }
+ }
+ }
+
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/oplog_application_checks.h b/src/mongo/db/commands/oplog_application_checks.h
new file mode 100644
index 00000000000..ec63c6ab505
--- /dev/null
+++ b/src/mongo/db/commands/oplog_application_checks.h
@@ -0,0 +1,93 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#pragma once
+#include <string>
+
+#include "mongo/base/status.h"
+#include "mongo/util/uuid.h"
+
+namespace mongo {
+class BSONElement;
+class BSONObj;
+class OperationContext;
+
+// OplogApplicationValidity represents special conditions relevant to authorization for
+// oplog application.
+//
+// kNeedsSuperuser means the oplog application command is empty or contains an empty nested
+// applyOps command oplog entry, or a createCollection or renameCollection mixed in a batch.
+//
+// kNeedsUseUUID means any oplog entry in the command contains a UUID, so the useUUID action
+// must be authorized.
+//
+// kNeedsForceAndUseUUID means the command contains one oplog entry which is a collection create
+// with a specified UUID, so both the forceUUID and useUUID actions must be authorized.
+//
+// kOk means no special conditions apply.
+//
+// Only kOk and kNeedsUseUUID are valid for 'doTxn'. All are valid for 'applyOps'.
+enum class OplogApplicationValidity { kOk, kNeedsUseUUID, kNeedsForceAndUseUUID, kNeedsSuperuser };
+enum class OplogApplicationCommand { kApplyOpsCmd, kDoTxnCmd };
+
+// OplogApplicationChecks contains helper functions for checking the applyOps and doTxn commands.
+class OplogApplicationChecks {
+public:
+ /**
+ * Checks the authorization for an entire oplog application command.
+ */
+ static Status checkAuthForCommand(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ OplogApplicationValidity validity,
+ OplogApplicationCommand command);
+
+ /**
+ * Checks that 'opsElement' is an array and all elements of the array are valid operations.
+ */
+ static Status checkOperationArray(const BSONElement& opsElement);
+
+private:
+ static UUID getUUIDFromOplogEntry(const BSONObj& oplogEntry);
+
+ /**
+ * Checks the authorization for a single operation contained within an oplog application
+ * command.
+ */
+ static Status checkOperationAuthorization(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& oplogEntry,
+ AuthorizationSession* authSession,
+ OplogApplicationCommand command,
+ bool alwaysUpsert);
+ /**
+ * Returns OK if 'e' contains a valid operation.
+ */
+ static Status checkOperation(const BSONElement& e);
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 75d416aef2f..0934da02f22 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -8,6 +8,7 @@ env.Library(
target='oplog',
source=[
'apply_ops.cpp',
+ 'do_txn.cpp',
'oplog.cpp',
],
LIBDEPS_PRIVATE=[
@@ -69,6 +70,22 @@ env.CppUnitTest(
],
)
+env.CppUnitTest(
+ target='do_txn_test',
+ source=[
+ 'do_txn_test.cpp',
+ ],
+ LIBDEPS=[
+ 'oplog',
+ 'oplog_entry',
+ 'replmocks',
+ 'storage_interface_impl',
+ '$BUILD_DIR/mongo/db/op_observer_noop',
+ '$BUILD_DIR/mongo/db/service_context_d_test_fixture',
+ '$BUILD_DIR/mongo/rpc/command_status',
+ ],
+)
+
env.Library(
target='rollback_source_impl',
source=[
diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp
new file mode 100644
index 00000000000..1bd9d13d8a1
--- /dev/null
+++ b/src/mongo/db/repl/do_txn.cpp
@@ -0,0 +1,498 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/repl/do_txn.h"
+
+#include "mongo/bson/util/bson_extract.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/document_validation.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/lock_state.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/matcher/matcher.h"
+#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/query/collation/collation_spec.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/util/fail_point_service.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+
+constexpr StringData DoTxn::kPreconditionFieldName;
+constexpr StringData DoTxn::kOplogApplicationModeFieldName;
+
+namespace {
+
+// If enabled, causes loop in _doTxn() to hang after applying current operation.
+MONGO_FP_DECLARE(doTxnPauseBetweenOperations);
+
+/**
+ * Return true iff the doTxnCmd can be executed in a single WriteUnitOfWork.
+ */
+bool _areOpsCrudOnly(const BSONObj& doTxnCmd) {
+ for (const auto& elem : doTxnCmd.firstElement().Obj()) {
+ const char* names[] = {"ns", "op"};
+ BSONElement fields[2];
+ elem.Obj().getFields(2, names, fields);
+ BSONElement& fieldNs = fields[0];
+ BSONElement& fieldOp = fields[1];
+
+ const char* opType = fieldOp.valuestrsafe();
+ const StringData ns = fieldNs.valueStringData();
+
+ // All atomic ops have an opType of length 1.
+ if (opType[0] == '\0' || opType[1] != '\0')
+ return false;
+
+ // Only consider CRUD operations.
+ switch (*opType) {
+ case 'd':
+ case 'n':
+ case 'u':
+ break;
+ case 'i':
+ if (nsToCollectionSubstring(ns) != "system.indexes")
+ break;
+ // Fallthrough.
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+Status _doTxn(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& doTxnCmd,
+ repl::OplogApplication::Mode oplogApplicationMode,
+ BSONObjBuilder* result,
+ int* numApplied,
+ BSONArrayBuilder* opsBuilder) {
+ BSONObj ops = doTxnCmd.firstElement().Obj();
+ // apply
+ *numApplied = 0;
+ int errors = 0;
+
+ BSONObjIterator i(ops);
+ BSONArrayBuilder ab;
+ const bool alwaysUpsert =
+ doTxnCmd.hasField("alwaysUpsert") ? doTxnCmd["alwaysUpsert"].trueValue() : true;
+ const bool haveWrappingWUOW = opCtx->lockState()->inAWriteUnitOfWork();
+
+ // Apply each op in the given 'doTxn' command object.
+ while (i.more()) {
+ BSONElement e = i.next();
+ const BSONObj& opObj = e.Obj();
+
+ // Ignore 'n' operations.
+ const char* opType = opObj["op"].valuestrsafe();
+ if (*opType == 'n')
+ continue;
+
+ const NamespaceString nss(opObj["ns"].String());
+
+ // Need to check this here, or OldClientContext may fail an invariant.
+ if (*opType != 'c' && !nss.isValid())
+ return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
+
+ Status status(ErrorCodes::InternalError, "");
+
+ if (haveWrappingWUOW) {
+ invariant(opCtx->lockState()->isW());
+ invariant(*opType != 'c');
+
+ auto db = dbHolder().get(opCtx, nss.ns());
+ if (!db) {
+ // Retry in non-atomic mode, since MMAP cannot implicitly create a new database
+ // within an active WriteUnitOfWork.
+ uasserted(ErrorCodes::AtomicityFailure,
+ "cannot create a database in atomic doTxn mode; will retry without "
+ "atomicity");
+ }
+
+ // When processing an update on a non-existent collection, applyOperation_inlock()
+ // returns UpdateOperationFailed on updates and allows the collection to be
+ // implicitly created on upserts. We detect both cases here and fail early with
+ // NamespaceNotFound.
+ // Additionally for inserts, we fail early on non-existent collections.
+ auto collection = db->getCollection(opCtx, nss);
+ if (!collection && !nss.isSystemDotIndexes() && (*opType == 'i' || *opType == 'u')) {
+ uasserted(
+ ErrorCodes::AtomicityFailure,
+ str::stream()
+ << "cannot apply insert or update operation on a non-existent namespace "
+ << nss.ns()
+ << " in atomic doTxn mode: "
+ << redact(opObj));
+ }
+
+ // Cannot specify timestamp values in an atomic doTxn.
+ if (opObj.hasField("ts")) {
+ uasserted(ErrorCodes::AtomicityFailure,
+ "cannot apply an op with a timestamp in atomic doTxn mode; "
+ "will retry without atomicity");
+ }
+
+ OldClientContext ctx(opCtx, nss.ns());
+
+ status = repl::applyOperation_inlock(
+ opCtx, ctx.db(), opObj, alwaysUpsert, oplogApplicationMode);
+ if (!status.isOK())
+ return status;
+
+ // Append completed op, including UUID if available, to 'opsBuilder'.
+ if (opsBuilder) {
+ if (opObj.hasField("ui") || nss.isSystemDotIndexes() ||
+ !(collection && collection->uuid())) {
+ // No changes needed to operation document.
+ opsBuilder->append(opObj);
+ } else {
+ // Operation document has no "ui" field and collection has a UUID.
+ auto uuid = collection->uuid();
+ BSONObjBuilder opBuilder;
+ opBuilder.appendElements(opObj);
+ uuid->appendToBuilder(&opBuilder, "ui");
+ opsBuilder->append(opBuilder.obj());
+ }
+ }
+ } else {
+ try {
+ status = writeConflictRetry(
+ opCtx,
+ "doTxn",
+ nss.ns(),
+ [opCtx, nss, opObj, opType, alwaysUpsert, oplogApplicationMode] {
+ if (*opType == 'c') {
+ invariant(opCtx->lockState()->isW());
+ return repl::applyCommand_inlock(opCtx, opObj, oplogApplicationMode);
+ }
+
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX);
+ if (!autoColl.getCollection() && !nss.isSystemDotIndexes()) {
+ // For idempotency reasons, return success on delete operations.
+ if (*opType == 'd') {
+ return Status::OK();
+ }
+ uasserted(ErrorCodes::NamespaceNotFound,
+ str::stream()
+ << "cannot apply insert or update operation on a "
+ "non-existent namespace "
+ << nss.ns()
+ << ": "
+ << mongo::redact(opObj));
+ }
+
+ OldClientContext ctx(opCtx, nss.ns());
+
+ if (!nss.isSystemDotIndexes()) {
+ return repl::applyOperation_inlock(
+ opCtx, ctx.db(), opObj, alwaysUpsert, oplogApplicationMode);
+ }
+
+ auto fieldO = opObj["o"];
+ BSONObj indexSpec;
+ NamespaceString indexNss;
+ std::tie(indexSpec, indexNss) =
+ repl::prepForApplyOpsIndexInsert(fieldO, opObj, nss);
+ if (!indexSpec["collation"]) {
+ // If the index spec does not include a collation, explicitly specify
+ // the simple collation, so the index does not inherit the collection
+ // default collation.
+ auto indexVersion = indexSpec["v"];
+ // The index version is populated by prepForApplyOpsIndexInsert().
+ invariant(indexVersion);
+ if (indexVersion.isNumber() &&
+ (indexVersion.numberInt() >=
+ static_cast<int>(IndexDescriptor::IndexVersion::kV2))) {
+ BSONObjBuilder bob;
+ bob.append("collation", CollationSpec::kSimpleSpec);
+ bob.appendElements(indexSpec);
+ indexSpec = bob.obj();
+ }
+ }
+ BSONObjBuilder command;
+ command.append("createIndexes", indexNss.coll());
+ {
+ BSONArrayBuilder indexes(command.subarrayStart("indexes"));
+ indexes.append(indexSpec);
+ indexes.doneFast();
+ }
+ const BSONObj commandObj = command.done();
+
+ DBDirectClient client(opCtx);
+ BSONObj infoObj;
+ client.runCommand(nss.db().toString(), commandObj, infoObj);
+
+ // Uassert to stop doTxn only when building indexes, but not for CRUD
+ // ops.
+ uassertStatusOK(getStatusFromCommandResult(infoObj));
+
+ return Status::OK();
+ });
+ } catch (const DBException& ex) {
+ ab.append(false);
+ result->append("applied", ++(*numApplied));
+ result->append("code", ex.code());
+ result->append("codeName", ErrorCodes::errorString(ex.code()));
+ result->append("errmsg", ex.what());
+ result->append("results", ab.arr());
+ return Status(ErrorCodes::UnknownError, ex.what());
+ }
+ }
+
+ ab.append(status.isOK());
+ if (!status.isOK()) {
+ log() << "doTxn error applying: " << status;
+ errors++;
+ }
+
+ (*numApplied)++;
+
+ if (MONGO_FAIL_POINT(doTxnPauseBetweenOperations)) {
+ // While holding a database lock under MMAPv1, we would be implicitly holding the
+ // flush lock here. This would prevent other threads from acquiring the global
+ // lock or any database locks. We release all locks temporarily while the fail
+ // point is enabled to allow other threads to make progress.
+ boost::optional<Lock::TempRelease> release;
+ auto storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine();
+ if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) {
+ release.emplace(opCtx->lockState());
+ }
+ MONGO_FAIL_POINT_PAUSE_WHILE_SET(doTxnPauseBetweenOperations);
+ }
+ }
+
+ result->append("applied", *numApplied);
+ result->append("results", ab.arr());
+
+ if (errors != 0) {
+ return Status(ErrorCodes::UnknownError, "doTxn had one or more errors applying ops");
+ }
+
+ return Status::OK();
+}
+
+bool _hasPrecondition(const BSONObj& doTxnCmd) {
+ return doTxnCmd[DoTxn::kPreconditionFieldName].type() == Array;
+}
+
+Status _checkPrecondition(OperationContext* opCtx,
+ const BSONObj& doTxnCmd,
+ BSONObjBuilder* result) {
+ invariant(opCtx->lockState()->isW());
+ invariant(_hasPrecondition(doTxnCmd));
+
+ for (auto elem : doTxnCmd[DoTxn::kPreconditionFieldName].Obj()) {
+ auto preCondition = elem.Obj();
+ if (preCondition["ns"].type() != BSONType::String) {
+ return {ErrorCodes::InvalidNamespace,
+ str::stream() << "ns in preCondition must be a string, but found type: "
+ << typeName(preCondition["ns"].type())};
+ }
+ const NamespaceString nss(preCondition["ns"].valueStringData());
+ if (!nss.isValid()) {
+ return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()};
+ }
+
+ DBDirectClient db(opCtx);
+ BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj());
+
+ // Get collection default collation.
+ Database* database = dbHolder().get(opCtx, nss.db());
+ if (!database) {
+ return {ErrorCodes::NamespaceNotFound, "database in ns does not exist: " + nss.ns()};
+ }
+ Collection* collection = database->getCollection(opCtx, nss);
+ if (!collection) {
+ return {ErrorCodes::NamespaceNotFound, "collection in ns does not exist: " + nss.ns()};
+ }
+ const CollatorInterface* collator = collection->getDefaultCollator();
+
+ // doTxn does not allow any extensions, such as $text, $where, $geoNear, $near,
+ // $nearSphere, or $expr.
+ boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator));
+ Matcher matcher(preCondition["res"].Obj(), std::move(expCtx));
+ if (!matcher.matches(realres)) {
+ result->append("got", realres);
+ result->append("whatFailed", preCondition);
+ return {ErrorCodes::BadValue, "preCondition failed"};
+ }
+ }
+
+ return Status::OK();
+}
+} // namespace
+
+Status doTxn(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& doTxnCmd,
+ repl::OplogApplication::Mode oplogApplicationMode,
+ BSONObjBuilder* result) {
+ bool allowAtomic = false;
+ uassertStatusOK(
+ bsonExtractBooleanFieldWithDefault(doTxnCmd, "allowAtomic", true, &allowAtomic));
+ auto areOpsCrudOnly = _areOpsCrudOnly(doTxnCmd);
+ auto isAtomic = allowAtomic && areOpsCrudOnly;
+ auto hasPrecondition = _hasPrecondition(doTxnCmd);
+
+ if (hasPrecondition) {
+ uassert(ErrorCodes::InvalidOptions,
+ "Cannot use preCondition with {allowAtomic: false}.",
+ allowAtomic);
+ uassert(ErrorCodes::InvalidOptions,
+ "Cannot use preCondition when operations include commands.",
+ areOpsCrudOnly);
+ }
+
+ boost::optional<Lock::GlobalWrite> globalWriteLock;
+ boost::optional<Lock::DBLock> dbWriteLock;
+
+ // There's only one case where we are allowed to take the database lock instead of the global
+ // lock - no preconditions; only CRUD ops; and non-atomic mode.
+ if (!hasPrecondition && areOpsCrudOnly && !allowAtomic) {
+ dbWriteLock.emplace(opCtx, dbName, MODE_IX);
+ } else {
+ globalWriteLock.emplace(opCtx);
+ }
+
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ bool userInitiatedWritesAndNotPrimary =
+ opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName);
+
+ if (userInitiatedWritesAndNotPrimary)
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while applying ops to database " << dbName);
+
+ if (hasPrecondition) {
+ invariant(isAtomic);
+ auto status = _checkPrecondition(opCtx, doTxnCmd, result);
+ if (!status.isOK()) {
+ return status;
+ }
+ }
+
+ int numApplied = 0;
+ if (!isAtomic) {
+ return _doTxn(opCtx, dbName, doTxnCmd, oplogApplicationMode, result, &numApplied, nullptr);
+ }
+
+ // Perform write ops atomically
+ invariant(globalWriteLock);
+
+ try {
+ writeConflictRetry(opCtx, "doTxn", dbName, [&] {
+ BSONObjBuilder intermediateResult;
+ std::unique_ptr<BSONArrayBuilder> opsBuilder;
+ if (opCtx->writesAreReplicated() &&
+ repl::ReplicationCoordinator::modeMasterSlave != replCoord->getReplicationMode()) {
+ opsBuilder = stdx::make_unique<BSONArrayBuilder>();
+ }
+ WriteUnitOfWork wunit(opCtx);
+ numApplied = 0;
+ {
+ // Suppress replication for atomic operations until end of doTxn.
+ repl::UnreplicatedWritesBlock uwb(opCtx);
+ uassertStatusOK(_doTxn(opCtx,
+ dbName,
+ doTxnCmd,
+ oplogApplicationMode,
+ &intermediateResult,
+ &numApplied,
+ opsBuilder.get()));
+ }
+ // Generate oplog entry for all atomic ops collectively.
+ if (opCtx->writesAreReplicated()) {
+ // We want this applied atomically on slaves so we rewrite the oplog entry without
+ // the pre-condition for speed.
+
+ BSONObjBuilder cmdBuilder;
+
+ auto opsFieldName = doTxnCmd.firstElement().fieldNameStringData();
+ for (auto elem : doTxnCmd) {
+ auto name = elem.fieldNameStringData();
+ if (name == opsFieldName) {
+ // This should be written as applyOps, not doTxn.
+ invariant(opsFieldName == "doTxn"_sd);
+ if (opsBuilder) {
+ cmdBuilder.append("applyOps"_sd, opsBuilder->arr());
+ } else {
+ cmdBuilder.appendAs(elem, "applyOps"_sd);
+ }
+ continue;
+ }
+ if (name == DoTxn::kPreconditionFieldName)
+ continue;
+ if (name == bypassDocumentValidationCommandOption())
+ continue;
+ cmdBuilder.append(elem);
+ }
+
+ const BSONObj cmdRewritten = cmdBuilder.done();
+
+ auto opObserver = getGlobalServiceContext()->getOpObserver();
+ invariant(opObserver);
+ opObserver->onApplyOps(opCtx, dbName, cmdRewritten);
+ }
+ wunit.commit();
+ result->appendElements(intermediateResult.obj());
+ });
+ } catch (const DBException& ex) {
+ if (ex.code() == ErrorCodes::AtomicityFailure) {
+ // Retry in non-atomic mode.
+ return _doTxn(
+ opCtx, dbName, doTxnCmd, oplogApplicationMode, result, &numApplied, nullptr);
+ }
+ BSONArrayBuilder ab;
+ ++numApplied;
+ for (int j = 0; j < numApplied; j++)
+ ab.append(false);
+ result->append("applied", numApplied);
+ result->append("code", ex.code());
+ result->append("codeName", ErrorCodes::errorString(ex.code()));
+ result->append("errmsg", ex.what());
+ result->append("results", ab.arr());
+ return Status(ErrorCodes::UnknownError, ex.what());
+ }
+
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/repl/do_txn.h b/src/mongo/db/repl/do_txn.h
new file mode 100644
index 00000000000..022474bcbf1
--- /dev/null
+++ b/src/mongo/db/repl/do_txn.h
@@ -0,0 +1,58 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#include "mongo/base/status.h"
+#include "mongo/db/repl/oplog.h"
+
+namespace mongo {
+class BSONObj;
+class BSONObjBuilder;
+class OperationContext;
+
+class DoTxn {
+public:
+ static constexpr StringData kPreconditionFieldName = "preCondition"_sd;
+ static constexpr StringData kOplogApplicationModeFieldName = "oplogApplicationMode"_sd;
+};
+
+/**
+ * Applies ops contained in 'doTxnCmd' and populates fields in 'result' to be returned to the
+ * caller. The information contained in 'result' can be returned to the user if called as part
+ * of the execution of an 'doTxn' command.
+ *
+ * The 'oplogApplicationMode' argument determines the semantics of the operations contained within
+ * the given command object. This function may be called as part of a direct user invocation of the
+ * 'doTxn' command, or as part of the application of an 'doTxn' oplog operation. In either
+ * case, the mode can be set to determine how the internal ops are executed.
+ */
+Status doTxn(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& doTxnCmd,
+ repl::OplogApplication::Mode oplogApplicationMode,
+ BSONObjBuilder* result);
+
+} // namespace mongo
diff --git a/src/mongo/db/repl/do_txn_test.cpp b/src/mongo/db/repl/do_txn_test.cpp
new file mode 100644
index 00000000000..cb50d3a1d57
--- /dev/null
+++ b/src/mongo/db/repl/do_txn_test.cpp
@@ -0,0 +1,328 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/collection_options.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/client.h"
+#include "mongo/db/op_observer_noop.h"
+#include "mongo/db/repl/do_txn.h"
+#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/repl/storage_interface_impl.h"
+#include "mongo/db/service_context_d_test_fixture.h"
+#include "mongo/logger/logger.h"
+#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/stdx/memory.h"
+
+namespace mongo {
+namespace repl {
+namespace {
+
+/**
+ * Mock OpObserver that tracks doTxn events. doTxn internally applies its arguments using applyOps.
+ */
+class OpObserverMock : public OpObserverNoop {
+public:
+ /**
+ * Called by doTxn() when ops are applied atomically.
+ */
+ void onApplyOps(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& doTxnCmd) override;
+
+ // If not empty, holds the command object passed to last invocation of onApplyOps().
+ BSONObj onApplyOpsCmdObj;
+};
+
+void OpObserverMock::onApplyOps(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& doTxnCmd) {
+ ASSERT_FALSE(doTxnCmd.isEmpty());
+ // Get owned copy because 'doTxnCmd' may be a temporary BSONObj created by doTxn().
+ onApplyOpsCmdObj = doTxnCmd.getOwned();
+}
+
+/**
+ * Test fixture for doTxn().
+ */
+class DoTxnTest : public ServiceContextMongoDTest {
+private:
+ void setUp() override;
+ void tearDown() override;
+
+protected:
+ OpObserverMock* _opObserver = nullptr;
+ std::unique_ptr<StorageInterface> _storage;
+};
+
+void DoTxnTest::setUp() {
+ // Set up mongod.
+ ServiceContextMongoDTest::setUp();
+
+ auto service = getServiceContext();
+ auto opCtx = cc().makeOperationContext();
+
+ // Set up ReplicationCoordinator and create oplog.
+ ReplicationCoordinator::set(service, stdx::make_unique<ReplicationCoordinatorMock>(service));
+ setOplogCollectionName();
+ createOplog(opCtx.get());
+
+ // Ensure that we are primary.
+ auto replCoord = ReplicationCoordinator::get(opCtx.get());
+ ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_PRIMARY));
+
+ // Use OpObserverMock to track notifications for doTxn().
+ auto opObserver = stdx::make_unique<OpObserverMock>();
+ _opObserver = opObserver.get();
+ service->setOpObserver(std::move(opObserver));
+
+ // This test uses StorageInterface to create collections and inspect documents inside
+ // collections.
+ _storage = stdx::make_unique<StorageInterfaceImpl>();
+}
+
+void DoTxnTest::tearDown() {
+ _storage = {};
+ _opObserver = nullptr;
+
+ // Reset default log level in case it was changed.
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogComponent::kReplication,
+ logger::LogSeverity::Debug(0));
+
+ ServiceContextMongoDTest::tearDown();
+}
+
+/**
+ * Fixes up result document returned by doTxn and converts to Status.
+ */
+Status getStatusFromDoTxnResult(const BSONObj& result) {
+ if (result["ok"]) {
+ return getStatusFromCommandResult(result);
+ }
+
+ BSONObjBuilder builder;
+ builder.appendElements(result);
+ auto code = result.getIntField("code");
+ builder.appendIntOrLL("ok", code == 0);
+ auto newResult = builder.obj();
+ return getStatusFromCommandResult(newResult);
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnWithNoOpsReturnsSuccess) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ BSONObjBuilder resultBuilder;
+ auto cmdObj = BSON("doTxn" << BSONArray());
+ auto expectedCmdObj = BSON("applyOps" << BSONArray());
+ ASSERT_OK(doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+ ASSERT_BSONOBJ_EQ(expectedCmdObj, _opObserver->onApplyOpsCmdObj);
+}
+
+BSONObj makeInsertOperation(const NamespaceString& nss,
+ const OptionalCollectionUUID& uuid,
+ const BSONObj& documentToInsert) {
+ return uuid ? BSON("op"
+ << "i"
+ << "ns"
+ << nss.ns()
+ << "o"
+ << documentToInsert
+ << "ui"
+ << *uuid)
+ : BSON("op"
+ << "i"
+ << "ns"
+ << nss.ns()
+ << "o"
+ << documentToInsert);
+}
+
+/**
+ * Creates an doTxn command object with a single insert operation.
+ */
+BSONObj makeDoTxnWithInsertOperation(const NamespaceString& nss,
+ const OptionalCollectionUUID& uuid,
+ const BSONObj& documentToInsert) {
+ auto insertOp = makeInsertOperation(nss, uuid, documentToInsert);
+ return BSON("doTxn" << BSON_ARRAY(insertOp));
+}
+
+/**
+ * Creates an applyOps command object with a single insert operation.
+ */
+BSONObj makeApplyOpsWithInsertOperation(const NamespaceString& nss,
+ const OptionalCollectionUUID& uuid,
+ const BSONObj& documentToInsert) {
+ auto insertOp = makeInsertOperation(nss, uuid, documentToInsert);
+ return BSON("applyOps" << BSON_ARRAY(insertOp));
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnInsertIntoNonexistentCollectionReturnsNamespaceNotFoundInResult) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ NamespaceString nss("test.t");
+ auto documentToInsert = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, boost::none, documentToInsert);
+ BSONObjBuilder resultBuilder;
+ ASSERT_EQUALS(ErrorCodes::UnknownError,
+ doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+ auto result = resultBuilder.obj();
+ auto status = getStatusFromDoTxnResult(result);
+ ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnInsertIntoCollectionWithoutUuid) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ NamespaceString nss("test.t");
+
+ // Collection has no uuid.
+ CollectionOptions collectionOptions;
+ ASSERT_OK(_storage->createCollection(opCtx.get(), nss, collectionOptions));
+
+ auto documentToInsert = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, boost::none, documentToInsert);
+ auto expectedCmdObj = makeApplyOpsWithInsertOperation(nss, boost::none, documentToInsert);
+ BSONObjBuilder resultBuilder;
+ ASSERT_OK(doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+ ASSERT_BSONOBJ_EQ(expectedCmdObj, _opObserver->onApplyOpsCmdObj);
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnInsertWithUuidIntoCollectionWithUuid) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ NamespaceString nss("test.t");
+
+ auto uuid = UUID::gen();
+
+ CollectionOptions collectionOptions;
+ collectionOptions.uuid = uuid;
+ ASSERT_OK(_storage->createCollection(opCtx.get(), nss, collectionOptions));
+
+ auto documentToInsert = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, uuid, documentToInsert);
+ auto expectedCmdObj = makeApplyOpsWithInsertOperation(nss, uuid, documentToInsert);
+ BSONObjBuilder resultBuilder;
+ ASSERT_OK(doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+ ASSERT_BSONOBJ_EQ(expectedCmdObj, _opObserver->onApplyOpsCmdObj);
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnInsertWithUuidIntoCollectionWithoutUuid) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ NamespaceString nss("test.t");
+
+ auto uuid = UUID::gen();
+
+ // Collection has no uuid.
+ CollectionOptions collectionOptions;
+ ASSERT_OK(_storage->createCollection(opCtx.get(), nss, collectionOptions));
+
+ // The doTxn returns a NamespaceNotFound error because of the failed UUID lookup
+ // even though a collection exists with the same namespace as the insert operation.
+ auto documentToInsert = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, uuid, documentToInsert);
+ BSONObjBuilder resultBuilder;
+ ASSERT_EQUALS(ErrorCodes::UnknownError,
+ doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+ auto result = resultBuilder.obj();
+ auto status = getStatusFromDoTxnResult(result);
+ ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status);
+}
+
+TEST_F(DoTxnTest, AtomicDoTxnInsertWithoutUuidIntoCollectionWithUuid) {
+ auto opCtx = cc().makeOperationContext();
+ auto mode = OplogApplication::Mode::kApplyOpsCmd;
+ NamespaceString nss("test.t");
+
+ auto uuid = UUID::gen();
+
+ CollectionOptions collectionOptions;
+ collectionOptions.uuid = uuid;
+ ASSERT_OK(_storage->createCollection(opCtx.get(), nss, collectionOptions));
+
+ auto documentToInsert = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, boost::none, documentToInsert);
+ BSONObjBuilder resultBuilder;
+ ASSERT_OK(doTxn(opCtx.get(), "test", cmdObj, mode, &resultBuilder));
+
+ // Insert operation provided by caller did not contain collection uuid but doTxn() should add
+ // the uuid to the oplog entry.
+ auto expectedCmdObj = makeApplyOpsWithInsertOperation(nss, uuid, documentToInsert);
+ ASSERT_BSONOBJ_EQ(expectedCmdObj, _opObserver->onApplyOpsCmdObj);
+}
+
+TEST_F(DoTxnTest, DoTxnPropagatesOplogApplicationMode) {
+ auto opCtx = cc().makeOperationContext();
+
+ // Increase log component verbosity to check for op application messages.
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogComponent::kReplication,
+ logger::LogSeverity::Debug(3));
+
+ // Test that the 'doTxn' function passes the oplog application mode through correctly to the
+ // underlying op application functions.
+ NamespaceString nss("test.coll");
+ auto uuid = UUID::gen();
+
+ // Create a collection for us to insert documents into.
+ CollectionOptions collectionOptions;
+ collectionOptions.uuid = uuid;
+ ASSERT_OK(_storage->createCollection(opCtx.get(), nss, collectionOptions));
+
+ BSONObjBuilder resultBuilder;
+
+ // Make sure the oplog application mode is passed through via 'doTxn' correctly.
+ startCapturingLogMessages();
+
+ auto docToInsert0 = BSON("_id" << 0);
+ auto cmdObj = makeDoTxnWithInsertOperation(nss, uuid, docToInsert0);
+
+ ASSERT_OK(doTxn(opCtx.get(),
+ nss.coll().toString(),
+ cmdObj,
+ OplogApplication::Mode::kInitialSync,
+ &resultBuilder));
+ ASSERT_EQUALS(1, countLogLinesContaining("oplog application mode: InitialSync"));
+
+ auto docToInsert1 = BSON("_id" << 1);
+ cmdObj = makeDoTxnWithInsertOperation(nss, uuid, docToInsert1);
+
+ ASSERT_OK(doTxn(opCtx.get(),
+ nss.coll().toString(),
+ cmdObj,
+ OplogApplication::Mode::kSecondary,
+ &resultBuilder));
+ ASSERT_EQUALS(1, countLogLinesContaining("oplog application mode: Secondary"));
+
+ stopCapturingLogMessages();
+}
+
+} // namespace
+} // namespace repl
+} // namespace mongo