summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBobby Morck <bobby.morck@mongodb.com>2021-09-25 17:34:13 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-12-22 04:18:28 +0000
commitc52c37bb2ac332cde41047ce1f8c16447b893361 (patch)
treea6c9c0ee184352f51e704b5cc7ebcda0600edf81
parentbde95276bd632ad5e54a64dab191a6f5f8812f0b (diff)
downloadmongo-c52c37bb2ac332cde41047ce1f8c16447b893361.tar.gz
SERVER-56127 Fixing retryable writes on update and delete commands to not execute more than once
(cherry picked from commit 6d8290297b563121037f8e9a9f2d37ec45ddb4bf)
-rw-r--r--etc/backports_required_for_multiversion_tests.yml4
-rw-r--r--jstests/sharding/retryable_writes_nested_shard_key.js83
-rw-r--r--src/mongo/db/repl/oplog_entry.cpp17
-rw-r--r--src/mongo/db/repl/oplog_entry.h6
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp3
-rw-r--r--src/mongo/s/shard_key_pattern.cpp14
-rw-r--r--src/mongo/s/shard_key_pattern.h21
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp104
8 files changed, 250 insertions, 2 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index e98bfccc1bb..1c40d3e529f 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -80,6 +80,8 @@ last-continuous:
test_file: jstests/replsets/dbcheck.js
- ticket: SERVER-61955
test_file: jstests/auth/dbcheck.js
+ - ticket: SERVER-56127
+ test_file: jstests/sharding/retryable_writes_nested_shard_key.js
# Tests that should only be excluded from particular suites should be listed under that suite.
suites:
@@ -314,6 +316,8 @@ last-lts:
test_file: jstests/replsets/dbcheck.js
- ticket: SERVER-61955
test_file: jstests/auth/dbcheck.js
+ - ticket: SERVER-56127
+ test_file: jstests/sharding/retryable_writes_nested_shard_key.js
# Tests that should only be excluded from particular suites should be listed under that suite.
suites:
diff --git a/jstests/sharding/retryable_writes_nested_shard_key.js b/jstests/sharding/retryable_writes_nested_shard_key.js
new file mode 100644
index 00000000000..38ff71dc2d0
--- /dev/null
+++ b/jstests/sharding/retryable_writes_nested_shard_key.js
@@ -0,0 +1,83 @@
+/**
+ * Tests retryable insert, update and delete operations on a sharded collection with a nested shard
+ * key to ensure that each operation is not re-executed when run after chunk migration.
+ */
+
+(function() {
+"use strict";
+
+load("jstests/sharding/libs/create_sharded_collection_util.js");
+
+const st = new ShardingTest({mongos: 1, config: 1, shards: 2, rs: {nodes: 1}});
+
+const db = st.s.getDB("test");
+const collection = db.getCollection("mycoll");
+CreateShardedCollectionUtil.shardCollectionWithChunks(collection, {"x.y": 1}, [
+ {min: {"x.y": MinKey}, max: {"x.y": 0}, shard: st.shard0.shardName},
+ {min: {"x.y": 0}, max: {"x.y": 10}, shard: st.shard0.shardName},
+ {min: {"x.y": 10}, max: {"x.y": 20}, shard: st.shard1.shardName},
+ {min: {"x.y": 20}, max: {"x.y": MaxKey}, shard: st.shard1.shardName},
+]);
+
+assert.commandWorked(collection.insert({_id: 0, x: {y: 5}, counter: 0}));
+assert.commandWorked(collection.insert({_id: 1, x: {y: 15}}));
+
+const session = st.s.startSession({causalConsistency: false, retryWrites: false});
+const sessionCollection = session.getDatabase(db.getName()).getCollection(collection.getName());
+
+const updateCmd = {
+ updates: [{q: {"x.y": 5, _id: 0}, u: {$inc: {counter: 1}}}],
+ txnNumber: NumberLong(0),
+};
+
+const deleteCmd = {
+ deletes: [{q: {"x.y": 15, _id: 1}, limit: 1}],
+ txnNumber: NumberLong(1),
+};
+
+const insertCmd = {
+ documents: [{_id: 2, x: {y: 25}}],
+ txnNumber: NumberLong(2),
+};
+
+// Test that updateCmd is only executed a single time by verifying that counter has only been
+// incremented once.
+const firstRes = assert.commandWorked(sessionCollection.runCommand("update", updateCmd));
+assert.eq({n: firstRes.n, nModified: firstRes.nModified}, {n: 1, nModified: 1});
+
+assert.commandWorked(db.adminCommand(
+ {moveChunk: collection.getFullName(), find: {"x.y": 5}, to: st.shard1.shardName}));
+
+const secondRes = assert.commandWorked(sessionCollection.runCommand("update", updateCmd));
+print(`secondRes: ${tojsononeline(secondRes)}`);
+assert.eq(collection.findOne({_id: 0}), {_id: 0, x: {y: 5}, counter: 1});
+
+// Tests deleteCmd is only executed a single time by verifying that the command is able to
+// run a second time and that the response to the second command is equivalent to the first
+const firstResDelete = assert.commandWorked(sessionCollection.runCommand("delete", deleteCmd));
+assert.eq({n: firstResDelete.n}, {n: 1});
+assert.eq(collection.findOne({_id: 1}), null);
+
+assert.commandWorked(db.adminCommand(
+ {moveChunk: collection.getFullName(), find: {"x.y": 15}, to: st.shard0.shardName}));
+
+const secondResDelete = assert.commandWorked(sessionCollection.runCommand("delete", deleteCmd));
+print(`secondResDelete: ${tojsononeline(secondResDelete)}`);
+assert.eq(secondResDelete.n, firstResDelete.n);
+
+// Tests insertCmd is only executed a single time by verifying that the command is able to
+// run a second time and that the response to the second command is equivalent to the first.
+// - If command were to execute a second time, we would receieve a duplicate key error
+const firstResInsert = assert.commandWorked(sessionCollection.runCommand("insert", insertCmd));
+assert.eq({n: firstResInsert.n}, {n: 1});
+
+assert.commandWorked(db.adminCommand(
+ {moveChunk: collection.getFullName(), find: {"x.y": 25}, to: st.shard0.shardName}));
+
+const secondResInsert = assert.commandWorked(sessionCollection.runCommand("insert", insertCmd));
+print(`secondResInsert: ${tojsononeline(secondResInsert)}`);
+assert.eq(secondResInsert.n, firstResInsert.n);
+assert.eq(collection.findOne({_id: 2}), {_id: 2, x: {y: 25}});
+
+st.stop();
+})();
diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp
index 9e15d205d02..3f3c88cd359 100644
--- a/src/mongo/db/repl/oplog_entry.cpp
+++ b/src/mongo/db/repl/oplog_entry.cpp
@@ -369,6 +369,20 @@ bool DurableOplogEntry::isCrudOpType() const {
return isCrudOpType(getOpType());
}
+bool DurableOplogEntry::isUpdateOrDelete() const {
+ auto opType = getOpType();
+ switch (opType) {
+ case OpTypeEnum::kDelete:
+ case OpTypeEnum::kUpdate:
+ return true;
+ case OpTypeEnum::kInsert:
+ case OpTypeEnum::kCommand:
+ case OpTypeEnum::kNoop:
+ return false;
+ }
+ MONGO_UNREACHABLE;
+}
+
bool DurableOplogEntry::shouldPrepare() const {
return getCommandType() == CommandType::kApplyOps &&
getObject()[ApplyOpsCommandInfoBase::kPrepareFieldName].booleanSafe();
@@ -711,6 +725,9 @@ bool OplogEntry::isSingleOplogEntryTransactionWithCommand() const {
bool OplogEntry::isCrudOpType() const {
return _entry.isCrudOpType();
}
+bool OplogEntry::isUpdateOrDelete() const {
+ return _entry.isUpdateOrDelete();
+}
bool OplogEntry::isIndexCommandType() const {
return _entry.isIndexCommandType();
diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h
index 7c39c7618c2..9d196e2c85d 100644
--- a/src/mongo/db/repl/oplog_entry.h
+++ b/src/mongo/db/repl/oplog_entry.h
@@ -386,6 +386,11 @@ public:
bool isCrudOpType() const;
/**
+ * Returns true if the oplog entry is for an Update or Delete operation.
+ */
+ bool isUpdateOrDelete() const;
+
+ /**
* Returns true if the oplog entry is for a command related to indexes.
* i.e createIndexes, dropIndexes, startIndexBuild, commitIndexBuild, abortIndexBuild.
*/
@@ -554,6 +559,7 @@ public:
bool isSingleOplogEntryTransaction() const;
bool isSingleOplogEntryTransactionWithCommand() const;
bool isCrudOpType() const;
+ bool isUpdateOrDelete() const;
bool isIndexCommandType() const;
bool shouldPrepare() const;
BSONElement getIdElement() const;
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 38c1723adab..0aa548795d7 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -335,8 +335,7 @@ bool SessionCatalogMigrationSource::_handleWriteHistory(WithLock, OperationConte
}
if (nextOplog->isCrudOpType()) {
- auto shardKey =
- _keyPattern.extractShardKeyFromDoc(nextOplog->getObjectContainingDocumentKey());
+ auto shardKey = _keyPattern.extractShardKeyFromOplogEntry(*nextOplog);
if (!_chunkRange.containsKey(shardKey)) {
continue;
}
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 27ddb80e6ad..db90c4a53f0 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -429,6 +429,20 @@ BSONObj ShardKeyPattern::extractShardKeyFromDocThrows(const BSONObj& doc) const
return shardKey;
}
+BSONObj ShardKeyPattern::extractShardKeyFromOplogEntry(const repl::OplogEntry& entry) const {
+ if (!entry.isCrudOpType()) {
+ return BSONObj();
+ }
+
+ auto objWithDocumentKey = entry.getObjectContainingDocumentKey();
+
+ if (!entry.isUpdateOrDelete()) {
+ return extractShardKeyFromDoc(objWithDocumentKey);
+ }
+
+ return extractShardKeyFromDocumentKey(objWithDocumentKey);
+}
+
BSONObj ShardKeyPattern::emplaceMissingShardKeyValuesForDocument(const BSONObj doc) const {
BSONObjBuilder fullDocBuilder(doc);
for (const auto& skField : _keyPattern.toBSON()) {
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index aed37e46889..4dd79692c71 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -38,6 +38,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/query/index_bounds.h"
+#include "mongo/db/repl/oplog_entry.h"
namespace mongo {
@@ -213,6 +214,26 @@ public:
BSONObj extractShardKeyFromDocThrows(const BSONObj& doc) const;
/**
+ * Given an Oplog entry, extracts the shard key corresponding to the key pattern for insert,
+ * update, and delete op types. If the op type is not a CRUD operation, an empty BSONObj()
+ * will be returned.
+ *
+ * For update and delete operations, the Oplog entry will contain an object with the document
+ * key.
+ *
+ * For insert operations, the Oplog entry will contain the original document from which the
+ * document key must be extracted
+ *
+ * Examples:
+ * For KeyPattern {'a.b': 1}
+ * If the oplog entries contains field op='i'
+ * oplog contains: { a : { b : "1" } }
+ * If the oplog entries contains field op='u' or op='d'
+ * oplog contains: { 'a.b': "1" }
+ */
+ BSONObj extractShardKeyFromOplogEntry(const repl::OplogEntry& entry) const;
+
+ /**
* Returns the document with missing shard key values set to null.
*/
BSONObj emplaceMissingShardKeyValuesForDocument(const BSONObj doc) const;
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index e2cd0a88a83..fde7ad3a592 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -54,6 +54,36 @@ protected:
OperationContext* const _opCtx{_opCtxHolder.get()};
};
+/**
+ * Creates OplogEntry with given field values.
+ */
+repl::OplogEntry makeOplogEntry(repl::OpTime opTime,
+ repl::OpTypeEnum opType,
+ NamespaceString nss,
+ BSONObj oField,
+ boost::optional<BSONObj> o2Field = boost::none) {
+ return {
+ repl::DurableOplogEntry(opTime, // optime
+ boost::none, // hash
+ opType, // opType
+ nss, // namespace
+ boost::none, // uuid
+ boost::none, // fromMigrate
+ repl::OplogEntry::kOplogVersion, // version
+ oField, // o
+ o2Field, // o2
+ {}, // sessionInfo
+ boost::none, // upsert
+ Date_t(), // wall clock time
+ {}, // statement ids
+ boost::none, // optime of previous write within same transaction
+ boost::none, // pre-image optime
+ boost::none, // post-image optime
+ boost::none, // ShardId of resharding recipient
+ boost::none, // _id
+ boost::none)}; // needsRetryImage
+}
+
TEST_F(ShardKeyPatternTest, SingleFieldShardKeyPatternsValidityCheck) {
ShardKeyPattern s1(BSON("a" << 1));
ShardKeyPattern s2(BSON("a" << 1.0f));
@@ -137,6 +167,10 @@ static BSONObj docKey(const ShardKeyPattern& pattern, const BSONObj& doc) {
return pattern.extractShardKeyFromDoc(doc);
}
+static BSONObj docKeyFromOplog(const ShardKeyPattern& pattern, const repl::OplogEntry& entry) {
+ return pattern.extractShardKeyFromOplogEntry(entry);
+}
+
TEST_F(ShardKeyPatternTest, ExtractDocShardKeySingle) {
//
// Single field ShardKeyPatterns
@@ -228,6 +262,76 @@ TEST_F(ShardKeyPatternTest, ExtractDocShardKeyNested) {
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:[10, 20]}, c:30}")), BSONObj());
}
+TEST_F(ShardKeyPatternTest, ExtractShardKeyFromOplogUnnested) {
+ //
+ // Unnested ShardKeyPatterns from oplog entries with CRUD operation
+ //
+
+ ShardKeyPattern pattern(BSON("a" << 1));
+ auto deleteOplog = makeOplogEntry(repl::OpTime(Timestamp(50, 10), 1), // optime
+ repl::OpTypeEnum::kDelete, // op type
+ NamespaceString("a"), // namespace
+ BSON("_id" << 1 << "a" << 5)); // o
+ auto insertOplog = makeOplogEntry(repl::OpTime(Timestamp(60, 10), 1), // optime
+ repl::OpTypeEnum::kInsert, // op type
+ NamespaceString("a"), // namespace
+ BSON("_id" << 2 << "a" << 6)); // o
+ auto updateOplog = makeOplogEntry(repl::OpTime(Timestamp(70, 10), 1), // optime
+ repl::OpTypeEnum::kUpdate, // op type
+ NamespaceString("a"), // namespace
+ BSON("_id" << 3), // o
+ BSON("_id" << 3 << "a" << 7)); // o2
+
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, deleteOplog), fromjson("{a: 5}"));
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, insertOplog), fromjson("{a: 6}"));
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, updateOplog), fromjson("{a: 7}"));
+}
+
+TEST_F(ShardKeyPatternTest, ExtractShardKeyFromOplogNested) {
+ //
+ // Nested ShardKeyPatterns from oplog entries with CRUD operation
+ //
+
+ ShardKeyPattern pattern(BSON("a.b" << 1));
+ auto deleteOplog = makeOplogEntry(repl::OpTime(Timestamp(50, 10), 1), // optime
+ repl::OpTypeEnum::kDelete, // op type
+ NamespaceString("a.b"), // namespace
+ BSON("_id" << 1 << "a.b" << 5)); // o
+ auto insertOplog = makeOplogEntry(repl::OpTime(Timestamp(60, 10), 1), // optime
+ repl::OpTypeEnum::kInsert, // op type
+ NamespaceString("a.b"), // namespace
+ BSON("_id" << 2 << "a" << BSON("b" << 6))); // o
+ auto updateOplog = makeOplogEntry(repl::OpTime(Timestamp(70, 10), 1), // optime
+ repl::OpTypeEnum::kUpdate, // op type
+ NamespaceString("a.b"), // namespace
+ BSON("_id" << 3), // o
+ BSON("_id" << 3 << "a.b" << 7)); // o2
+
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, deleteOplog), fromjson("{'a.b': 5}"));
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, insertOplog), fromjson("{'a.b': 6}"));
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, updateOplog), fromjson("{'a.b': 7}"));
+}
+
+TEST_F(ShardKeyPatternTest, ExtractShardKeyFromOplogNonCRUD) {
+ //
+ // Oplogs with non-CRUD op types
+ //
+
+ ShardKeyPattern pattern(BSON("a.b" << 1));
+ auto noopOplog = makeOplogEntry(repl::OpTime(Timestamp(50, 10), 1), // optime
+ repl::OpTypeEnum::kNoop, // op type
+ NamespaceString("a.b"), // namespace
+ BSON("_id" << 1 << "a.b" << 5)); // o
+ auto commandOplog = makeOplogEntry(repl::OpTime(Timestamp(60, 10), 1), // optime
+ repl::OpTypeEnum::kCommand, // op type
+ NamespaceString("a.b"), // namespace
+ BSON("create"
+ << "c")); // o
+
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, noopOplog), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKeyFromOplog(pattern, commandOplog), BSONObj());
+}
+
TEST_F(ShardKeyPatternTest, ExtractDocShardKeyDeepNested) {
//
// Deeply nested ShardKeyPatterns