summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorseanzimm <sean.zimmerman@mongodb.com>2023-04-17 13:37:24 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-17 14:42:54 +0000
commit408cbcd9802208741d5a4d96e2b52aedd97ce50f (patch)
tree4559c07e89f731b4574ed920b6a6037bafc726fe
parentda6aad367c29106aefb8cce21299dc46c6cdfdcd (diff)
downloadmongo-408cbcd9802208741d5a4d96e2b52aedd97ce50f.tar.gz
SERVER-72764: Only allow one retryabe findAndModify on bulkWrite
-rw-r--r--jstests/core/write/bulk/bulk_write_non_transaction.js71
-rw-r--r--src/mongo/db/commands/bulk_write.cpp2
-rw-r--r--src/mongo/db/commands/bulk_write_common.cpp33
-rw-r--r--src/mongo/db/commands/bulk_write_common.h2
-rw-r--r--src/mongo/s/commands/cluster_bulk_write_cmd.cpp2
5 files changed, 106 insertions, 4 deletions
diff --git a/jstests/core/write/bulk/bulk_write_non_transaction.js b/jstests/core/write/bulk/bulk_write_non_transaction.js
index d48e392797d..24e83301ef3 100644
--- a/jstests/core/write/bulk/bulk_write_non_transaction.js
+++ b/jstests/core/write/bulk/bulk_write_non_transaction.js
@@ -480,6 +480,77 @@ assert.eq(coll.findOne().skey, "MongoDB");
coll.drop();
+// Test running multiple findAndModify ops in a command.
+// For normal commands this should succeed and for retryable writes the top level should fail.
+
+// Want to make sure both update + delete handle this correctly so test the following combinations
+// of ops. update + delete, delete + update. This will prove that both ops set and check the flag
+// correctly so doing update + update and delete + delete is redundant.
+
+// update + delete
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"},
+ {delete: 0, filter: {_id: 1}, return: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+let processCursor = true;
+try {
+ assert.commandWorked(res);
+} catch {
+ processCursor = false;
+ assert.commandFailedWithCode(res, [ErrorCodes.BadValue]);
+ assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write");
+}
+
+if (processCursor) {
+ cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
+ assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"});
+ cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+ assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB2"});
+ assert(!res.cursor.firstBatch[3]);
+}
+
+coll.drop();
+
+// delete + update
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 2, skey: "MongoDB"}},
+ {delete: 0, filter: {_id: 2}, return: true},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+processCursor = true;
+try {
+ assert.commandWorked(res);
+} catch {
+ processCursor = false;
+ assert.commandFailedWithCode(res, [ErrorCodes.BadValue]);
+ assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write");
+}
+
+if (processCursor) {
+ cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+ assert.docEq(res.cursor.firstBatch[2].value, {_id: 2, skey: "MongoDB"});
+ cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, nModified: 1});
+ assert.docEq(res.cursor.firstBatch[3].value, {_id: 1, skey: "MongoDB"});
+ assert(!res.cursor.firstBatch[4]);
+}
+
+coll.drop();
+
// Test BypassDocumentValidator
assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(db.runCommand({collMod: "coll", validator: {a: {$exists: true}}}));
diff --git a/src/mongo/db/commands/bulk_write.cpp b/src/mongo/db/commands/bulk_write.cpp
index 6b64b7773a0..e37a5dc04fe 100644
--- a/src/mongo/db/commands/bulk_write.cpp
+++ b/src/mongo/db/commands/bulk_write.cpp
@@ -821,7 +821,7 @@ public:
auto& req = request();
- bulk_write_common::validateRequest(req);
+ bulk_write_common::validateRequest(req, opCtx->isRetryableWrite());
// Apply all of the write operations.
auto [replies, retriedStmtIds] = bulk_write::performWrites(opCtx, req);
diff --git a/src/mongo/db/commands/bulk_write_common.cpp b/src/mongo/db/commands/bulk_write_common.cpp
index e3d649ea309..691932dfc17 100644
--- a/src/mongo/db/commands/bulk_write_common.cpp
+++ b/src/mongo/db/commands/bulk_write_common.cpp
@@ -34,7 +34,7 @@
namespace mongo {
namespace bulk_write_common {
-void validateRequest(const BulkWriteCommandRequest& req) {
+void validateRequest(const BulkWriteCommandRequest& req, bool isRetryableWrite) {
const auto& ops = req.getOps();
const auto& nsInfo = req.getNsInfo();
@@ -55,6 +55,8 @@ void validateRequest(const BulkWriteCommandRequest& req) {
}
// Validate that every ops entry has a valid nsInfo index.
+ // Also validate that we only have one findAndModify for retryable writes.
+ bool seenFindAndModify = false;
for (const auto& op : ops) {
const auto& bulkWriteOp = BulkWriteCRUDOp(op);
unsigned int nsInfoIdx = bulkWriteOp.getNsInfoIdx();
@@ -62,6 +64,35 @@ void validateRequest(const BulkWriteCommandRequest& req) {
str::stream() << "BulkWrite ops entry " << bulkWriteOp.toBSON()
<< " has an invalid nsInfo index.",
nsInfoIdx < nsInfo.size());
+
+ if (isRetryableWrite) {
+ switch (bulkWriteOp.getType()) {
+ case BulkWriteCRUDOp::kInsert:
+ break;
+ case BulkWriteCRUDOp::kUpdate: {
+ auto update = bulkWriteOp.getUpdate();
+ if (update->getReturn()) {
+ uassert(
+ ErrorCodes::BadValue,
+ "BulkWrite can only support 1 op with a return for a retryable write",
+ !seenFindAndModify);
+ seenFindAndModify = true;
+ }
+ break;
+ }
+ case BulkWriteCRUDOp::kDelete: {
+ auto deleteOp = bulkWriteOp.getDelete();
+ if (deleteOp->getReturn()) {
+ uassert(
+ ErrorCodes::BadValue,
+ "BulkWrite can only support 1 op with a return for a retryable write",
+ !seenFindAndModify);
+ seenFindAndModify = true;
+ }
+ break;
+ }
+ }
+ }
}
}
diff --git a/src/mongo/db/commands/bulk_write_common.h b/src/mongo/db/commands/bulk_write_common.h
index 9beb9956092..754a644083f 100644
--- a/src/mongo/db/commands/bulk_write_common.h
+++ b/src/mongo/db/commands/bulk_write_common.h
@@ -42,7 +42,7 @@ namespace bulk_write_common {
/**
* Validates the given bulkWrite command request and throws if the request is malformed.
*/
-void validateRequest(const BulkWriteCommandRequest& req);
+void validateRequest(const BulkWriteCommandRequest& req, bool isRetryableWrite);
/**
* Get the privileges needed to perform the given bulkWrite command.
diff --git a/src/mongo/s/commands/cluster_bulk_write_cmd.cpp b/src/mongo/s/commands/cluster_bulk_write_cmd.cpp
index 324a49c6117..edf181fd7e7 100644
--- a/src/mongo/s/commands/cluster_bulk_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_bulk_write_cmd.cpp
@@ -104,7 +104,7 @@ public:
"BulkWrite may not be run without featureFlagBulkWriteCommand enabled",
gFeatureFlagBulkWriteCommand.isEnabled(serverGlobalParams.featureCompatibility));
- bulk_write_common::validateRequest(request());
+ bulk_write_common::validateRequest(request(), opCtx->isRetryableWrite());
auto replyItems = cluster::bulkWrite(opCtx, request());