summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2022-11-01 19:52:33 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-14 21:08:23 +0000
commitd7ae6e64f16f2b9951b2310c4ae10eaf8e594ba4 (patch)
tree343f520848e6db729292805a18a25d4bf8ec79b1
parentdd817bddaea7c947ed565180df6913cec00b9304 (diff)
downloadmongo-d7ae6e64f16f2b9951b2310c4ae10eaf8e594ba4.tar.gz
SERVER-68361 Make migration properly handle cases when shard key value modification also results to changes in chunk membership
(cherry picked from commit 2061d2244caf64e2fee1b42418cd3a557d028e8c)
-rw-r--r--etc/backports_required_for_multiversion_tests.yml4
-rw-r--r--jstests/sharding/prepare_transaction_then_migrate.js190
-rw-r--r--src/mongo/db/op_observer/op_observer_impl.cpp8
-rw-r--r--src/mongo/db/repl/oplog_entry.h13
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp100
5 files changed, 233 insertions, 82 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 273317cef72..e9f9dd4b5b4 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -260,6 +260,8 @@ last-continuous:
ticket: SERVER-70190
- test_file: jstests/core/cover_null_queries.js
ticket: SERVER-70436
+ - test_file: jstests/sharding/prepare_transaction_then_migrate.js
+ ticket: SERVER-68361
suites: null
last-lts:
all:
@@ -595,4 +597,6 @@ last-lts:
ticket: SERVER-70190
- test_file: jstests/core/cover_null_queries.js
ticket: SERVER-70436
+ - test_file: jstests/sharding/prepare_transaction_then_migrate.js
+ ticket: SERVER-68361
suites: null
diff --git a/jstests/sharding/prepare_transaction_then_migrate.js b/jstests/sharding/prepare_transaction_then_migrate.js
index 034259d02be..36a9581752d 100644
--- a/jstests/sharding/prepare_transaction_then_migrate.js
+++ b/jstests/sharding/prepare_transaction_then_migrate.js
@@ -9,6 +9,7 @@
(function() {
"use strict";
load('jstests/libs/chunk_manipulation_util.js');
+load('jstests/sharding/libs/create_sharded_collection_util.js');
load('jstests/sharding/libs/sharded_transactions_helpers.js');
const dbName = "test";
@@ -16,58 +17,149 @@ const collName = "user";
const staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}});
-st.adminCommand({enableSharding: 'test'});
-st.ensurePrimaryShard('test', st.shard0.shardName);
-st.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+let runTest = function(withStepUp) {
+ const st = new ShardingTest({shards: {rs0: {nodes: withStepUp ? 2 : 1}, rs1: {nodes: 1}}});
+ const collection = st.s.getDB(dbName).getCollection(collName);
-const session = st.s.startSession({causalConsistency: false});
-const sessionDB = session.getDatabase(dbName);
-const sessionColl = sessionDB.getCollection(collName);
+ CreateShardedCollectionUtil.shardCollectionWithChunks(collection, {x: 1}, [
+ {min: {x: MinKey}, max: {x: 0}, shard: st.shard0.shardName},
+ {min: {x: 0}, max: {x: 1000}, shard: st.shard0.shardName},
+ {min: {x: 1000}, max: {x: MaxKey}, shard: st.shard1.shardName},
+ ]);
-assert.commandWorked(sessionColl.insert({_id: 1}));
+ assert.commandWorked(collection.insert([
+ {_id: 1, x: -1, note: "move into chunk range being migrated"},
+ {_id: 2, x: -2, note: "keep out of chunk range being migrated"},
+ {_id: 3, x: 50, note: "move out of chunk range being migrated"},
+ {_id: 4, x: 100, note: "keep in chunk range being migrated"},
+ ]));
-const lsid = {
- id: UUID()
+ const lsid = {id: UUID()};
+ const txnNumber = 0;
+ let stmtId = 0;
+
+ assert.commandWorked(st.s0.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [
+ {_id: 5, x: -1.01, note: "move into chunk range being migrated"},
+ {_id: 6, x: -2.01, note: "keep out of chunk range being migrated"},
+ {_id: 7, x: 50.01, note: "move out of chunk range being migrated"},
+ {_id: 8, x: 100.01, note: "keep in chunk range being migrated"},
+ ],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ startTransaction: true,
+ autocommit: false,
+ }));
+
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ update: collName,
+ updates: [
+ {q: {x: -1}, u: {$set: {x: 5}}},
+ {q: {x: -2}, u: {$set: {x: -10}}},
+ {q: {x: 50}, u: {$set: {x: -20}}},
+ {q: {x: 100}, u: {$set: {x: 500}}},
+ {q: {x: -1.01}, u: {$set: {x: 5.01}}},
+ {q: {x: -2.01}, u: {$set: {x: -10.01}}},
+ {q: {x: 50.01}, u: {$set: {x: -20.01}}},
+ {q: {x: 100.01}, u: {$set: {x: 500.01}}},
+ ],
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ }));
+
+ const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
+ prepareTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"},
+ }));
+
+ let prepareTimestamp = res.prepareTimestamp;
+
+ if (withStepUp) {
+ st.rs0.stepUp(st.rs0.getSecondary());
+ }
+
+ const joinMoveChunk =
+ moveChunkParallel(staticMongod, st.s.host, {x: 1}, null, 'test.user', st.shard1.shardName);
+
+ pauseMigrateAtStep(st.shard1, migrateStepNames.catchup);
+
+ // The donor shard only ignores prepare conflicts while scanning over the shard key index. We
+ // wait for donor shard to have finished buffering the RecordIds into memory from scanning over
+ // the shard key index before committing the transaction. Notably, the donor shard doesn't
+ // ignore prepare conflicts when fetching the full contents of the documents during calls to
+ // _migrateClone.
+ //
+ // TODO: SERVER-71028 Remove comment after making changes.
+
+ waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk);
+
+ assert.commandWorked(
+ st.shard0.getDB(dbName).adminCommand(Object.assign({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ },
+ {commitTimestamp: prepareTimestamp})));
+
+ unpauseMigrateAtStep(st.shard1, migrateStepNames.catchup);
+
+ joinMoveChunk();
+
+ class ArrayCursor {
+ constructor(arr) {
+ this.i = 0;
+ this.arr = arr;
+ }
+
+ hasNext() {
+ return this.i < this.arr.length;
+ }
+
+ next() {
+ return this.arr[this.i++];
+ }
+ }
+
+ const expected = new ArrayCursor([
+ {_id: 1, x: 5, note: "move into chunk range being migrated"},
+ {_id: 2, x: -10, note: "keep out of chunk range being migrated"},
+ {_id: 3, x: -20, note: "move out of chunk range being migrated"},
+ {_id: 4, x: 500, note: "keep in chunk range being migrated"},
+ {_id: 5, x: 5.01, note: "move into chunk range being migrated"},
+ {_id: 6, x: -10.01, note: "keep out of chunk range being migrated"},
+ {_id: 7, x: -20.01, note: "move out of chunk range being migrated"},
+ {_id: 8, x: 500.01, note: "keep in chunk range being migrated"},
+ ]);
+
+ const diff = ((diff) => {
+ return {
+ docsWithDifferentContents: diff.docsWithDifferentContents.map(
+ ({first, second}) => ({expected: first, actual: second})),
+ docsExtraAfterMigration: diff.docsMissingOnFirst,
+ docsMissingAfterMigration: diff.docsMissingOnSecond,
+ };
+ })(DataConsistencyChecker.getDiff(expected, collection.find().sort({_id: 1, x: 1})));
+
+ assert.eq(diff, {
+ docsWithDifferentContents: [],
+ docsExtraAfterMigration: [],
+ docsMissingAfterMigration: [],
+ });
+
+ st.stop();
};
-const txnNumber = 0;
-const stmtId = 0;
-
-assert.commandWorked(st.s0.getDB(dbName).runCommand({
- insert: collName,
- documents: [{_id: 2}, {_id: 5}, {_id: 15}],
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- startTransaction: true,
- autocommit: false,
-}));
-
-const res = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- prepareTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
-}));
-
-const joinMoveChunk =
- moveChunkParallel(staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName);
-
-// Wait for catchup to verify that the migration has exited the clone phase.
-waitForMigrateStep(st.shard1, migrateStepNames.catchup);
-
-assert.commandWorked(st.shard0.getDB(dbName).adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- commitTimestamp: res.prepareTimestamp,
-}));
-
-joinMoveChunk();
-
-assert.eq(sessionColl.find({_id: 2}).count(), 1);
-
-st.stop();
+
+runTest(false);
+// TODO: SERVER-71219 Enable test after fixing.
+// runTest(true);
+
MongoRunner.stopMongod(staticMongod);
})();
diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp
index 721189c13d2..a3a892fa516 100644
--- a/src/mongo/db/op_observer/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer/op_observer_impl.cpp
@@ -813,6 +813,14 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
operation.setChangeStreamPreImageRecordingMode(
ChangeStreamPreImageRecordingMode::kPreImagesCollection);
}
+
+ auto collectionDescription =
+ CollectionShardingState::get(opCtx, args.nss)->getCollectionDescription(opCtx);
+ if (collectionDescription.isSharded()) {
+ operation.setPostImageDocumentKey(
+ collectionDescription.extractDocumentKey(args.updateArgs->updatedDoc).getOwned());
+ }
+
operation.setDestinedRecipient(
shardingWriteRouter.getReshardingDestinedRecipient(args.updateArgs->updatedDoc));
operation.setFromMigrateIfTrue(args.updateArgs->source == OperationSource::kFromMigrate);
diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h
index ef96c66e39c..c97c1f5b031 100644
--- a/src/mongo/db/repl/oplog_entry.h
+++ b/src/mongo/db/repl/oplog_entry.h
@@ -88,11 +88,13 @@ public:
o.parseProtected(ctxt, bsonObject);
return o;
}
- const BSONObj& getPreImageDocumentKey() const {
- return _preImageDocumentKey;
+
+ const BSONObj& getPostImageDocumentKey() const {
+ return _postImageDocumentKey;
}
- void setPreImageDocumentKey(BSONObj value) {
- _preImageDocumentKey = std::move(value);
+
+ void setPostImageDocumentKey(BSONObj value) {
+ _postImageDocumentKey = std::move(value);
}
const BSONObj& getPreImage() const {
@@ -207,7 +209,8 @@ public:
}
private:
- BSONObj _preImageDocumentKey;
+ // Stores the post image _id + shard key values.
+ BSONObj _postImageDocumentKey;
// Used for storing the pre-image and post-image for the operation in-memory regardless of where
// the images should be persisted.
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 8d6b5050f1e..6989c313ed6 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -79,12 +79,22 @@ const Hours kMaxWaitToCommitCloneForJumboChunk(6);
MONGO_FAIL_POINT_DEFINE(failTooMuchMemoryUsed);
-bool isInRange(const BSONObj& obj,
- const BSONObj& min,
- const BSONObj& max,
- const ShardKeyPattern& shardKeyPattern) {
- BSONObj k = shardKeyPattern.extractShardKeyFromDoc(obj);
- return k.woCompare(min) >= 0 && k.woCompare(max) < 0;
+/**
+ * Returns true if the given BSON object in the shard key value pair format is within the given
+ * range.
+ */
+bool isShardKeyValueInRange(const BSONObj& shardKeyValue, const BSONObj& min, const BSONObj& max) {
+ return shardKeyValue.woCompare(min) >= 0 && shardKeyValue.woCompare(max) < 0;
+}
+
+/**
+ * Returns true if the given BSON document is within the given chunk range.
+ */
+bool isDocInRange(const BSONObj& obj,
+ const BSONObj& min,
+ const BSONObj& max,
+ const ShardKeyPattern& shardKeyPattern) {
+ return isShardKeyValueInRange(shardKeyPattern.extractShardKeyFromDoc(obj), min, max);
}
BSONObj createRequestWithSessionId(StringData commandName,
@@ -216,14 +226,13 @@ void LogTransactionOperationsForShardingHandler::commit(boost::optional<Timestam
continue;
}
- auto documentKey = getDocumentKeyFromReplOperation(stmt, opType);
+ auto preImageDocKey = getDocumentKeyFromReplOperation(stmt, opType);
- auto idElement = documentKey["_id"];
+ auto idElement = preImageDocKey["_id"];
if (idElement.eoo()) {
LOGV2_WARNING(21994,
- "Received a document without an _id field, ignoring: {documentKey}",
"Received a document without an _id and will ignore that document",
- "documentKey"_attr = redact(documentKey));
+ "documentKey"_attr = redact(preImageDocKey));
continue;
}
@@ -231,18 +240,42 @@ void LogTransactionOperationsForShardingHandler::commit(boost::optional<Timestam
auto const& maxKey = cloner->_args.getMax().value();
auto const& shardKeyPattern = cloner->_shardKeyPattern;
- if (!isInRange(documentKey, minKey, maxKey, shardKeyPattern)) {
- // If the preImageDoc is not in range but the postImageDoc was, we know that the
- // document has changed shard keys and no longer belongs in the chunk being cloned.
- // We will model the deletion of the preImage document so that the destination chunk
- // does not receive an outdated version of this document.
- if (opType == repl::OpTypeEnum::kUpdate &&
- isInRange(stmt.getPreImageDocumentKey(), minKey, maxKey, shardKeyPattern) &&
- !stmt.getPreImageDocumentKey()["_id"].eoo()) {
- opType = repl::OpTypeEnum::kDelete;
- idElement = stmt.getPreImageDocumentKey()["id"];
- } else {
- continue;
+ // Note: This assumes that prepared transactions will always have post document key
+ // set. There is a small window where create collection coordinator releases the critical
+ // section and before it writes down the chunks for non-empty collections. So in theory,
+ // it is possible to have a prepared transaction while collection is unsharded
+ // and becomes sharded midway. This doesn't happen in practice because the only way to
+ // have a prepared transactions without being sharded is by directly connecting to the
+ // shards and manually preparing the transaction. Another exception is when transaction
+ // is prepared on an older version that doesn't set the post image document key.
+ auto postImageDocKey = stmt.getPostImageDocumentKey();
+ if (postImageDocKey.isEmpty()) {
+ LOGV2_WARNING(
+ 6836102,
+ "Migration encountered a transaction operation without a post image document key",
+ "preImageDocKey"_attr = preImageDocKey);
+ } else {
+ auto postShardKeyValues =
+ shardKeyPattern.extractShardKeyFromDocumentKey(postImageDocKey);
+ fassert(6836100, !postShardKeyValues.isEmpty());
+
+ if (!isShardKeyValueInRange(postShardKeyValues, minKey, maxKey)) {
+ // If the preImageDoc is not in range but the postImageDoc was, we know that the
+ // document has changed shard keys and no longer belongs in the chunk being cloned.
+ // We will model the deletion of the preImage document so that the destination chunk
+ // does not receive an outdated version of this document.
+
+ auto preImageShardKeyValues =
+ shardKeyPattern.extractShardKeyFromDocumentKey(preImageDocKey);
+ fassert(6836101, !preImageShardKeyValues.isEmpty());
+
+ if (opType == repl::OpTypeEnum::kUpdate &&
+ isShardKeyValueInRange(preImageShardKeyValues, minKey, maxKey)) {
+ opType = repl::OpTypeEnum::kDelete;
+ idElement = postImageDocKey["_id"];
+ } else {
+ continue;
+ }
}
}
@@ -446,7 +479,7 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) noex
}
bool MigrationChunkClonerSourceLegacy::isDocumentInMigratingChunk(const BSONObj& doc) {
- return isInRange(doc, getMin(), getMax(), _shardKeyPattern);
+ return isDocInRange(doc, getMin(), getMax(), _shardKeyPattern);
}
void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx,
@@ -465,7 +498,7 @@ void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx,
return;
}
- if (!isInRange(insertedDoc, getMin(), getMax(), _shardKeyPattern)) {
+ if (!isDocInRange(insertedDoc, getMin(), getMax(), _shardKeyPattern)) {
return;
}
@@ -500,12 +533,12 @@ void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* opCtx,
return;
}
- if (!isInRange(postImageDoc, getMin(), getMax(), _shardKeyPattern)) {
+ if (!isDocInRange(postImageDoc, getMin(), getMax(), _shardKeyPattern)) {
// If the preImageDoc is not in range but the postImageDoc was, we know that the document
// has changed shard keys and no longer belongs in the chunk being cloned. We will model
// the deletion of the preImage document so that the destination chunk does not receive an
// outdated version of this document.
- if (preImageDoc && isInRange(*preImageDoc, getMin(), getMax(), _shardKeyPattern)) {
+ if (preImageDoc && isDocInRange(*preImageDoc, getMin(), getMax(), _shardKeyPattern)) {
onDeleteOp(opCtx, *preImageDoc, opTime, prePostImageOpTime);
}
return;
@@ -700,9 +733,22 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneRecordIds(
auto nextRecordId = *iter;
lk.unlock();
+ ON_BLOCK_EXIT([&lk] { lk.lock(); });
Snapshotted<BSONObj> doc;
if (collection->findDoc(opCtx, nextRecordId, &doc)) {
+ // Do not send documents that are no longer in the chunk range being moved. This can
+ // happen when document shard key value of the document changed after the initial
+ // index scan during cloning. This is needed because the destination is very
+ // conservative in processing xferMod deletes and won't delete docs that are not in
+ // the range of the chunk being migrated.
+ if (!isDocInRange(doc.value(),
+ _args.getMin().value(),
+ _args.getMax().value(),
+ _shardKeyPattern)) {
+ continue;
+ }
+
// Use the builder size instead of accumulating the document sizes directly so
// that we take into consideration the overhead of BSONArray indices.
if (arrBuilder->arrSize() &&
@@ -714,8 +760,6 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromCloneRecordIds(
arrBuilder->append(doc.value());
ShardingStatistics::get(opCtx).countDocsClonedOnDonor.addAndFetch(1);
}
-
- lk.lock();
}
_cloneRecordIds.erase(_cloneRecordIds.begin(), iter);