summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornandinibhartiyaMDB <nandini.bhartiya@mongodb.com>2022-06-08 13:55:15 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-08 15:32:02 +0000
commit224c25e0d3073978ae531c47ace605affae02664 (patch)
treea2039d0fed9d68a8b917f47698db5da3a0b4533c
parente97fa5d06b9f43a47ac29c85e3971e366f193d11 (diff)
downloadmongo-224c25e0d3073978ae531c47ace605affae02664.tar.gz
SERVER-62272 : Migration OK for chunks existing before schema validator
-rw-r--r--etc/backports_required_for_multiversion_tests.yml2
-rw-r--r--jstests/sharding/chunk_migration_with_schema_validation.js67
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp19
3 files changed, 82 insertions, 6 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 4b1ef62b188..e8af2495af2 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -207,6 +207,8 @@ all:
test_file: jstests/replsets/initial_sync_with_partial_transaction.js
- ticket: SERVER-66719
test_file: jstests/replsets/dbhash_lock_acquisition.js
+ - ticket: SERVER-62272
+ test_file: jstests/sharding/chunk_migration_with_schema_validation.js
suites:
diff --git a/jstests/sharding/chunk_migration_with_schema_validation.js b/jstests/sharding/chunk_migration_with_schema_validation.js
new file mode 100644
index 00000000000..28b8427d45a
--- /dev/null
+++ b/jstests/sharding/chunk_migration_with_schema_validation.js
@@ -0,0 +1,67 @@
+/**
+ * Ensure that schema validation rules applied after a collection has been populated, does not
+ * inhibit chunk migration for the data that existed before the rules were applied.
+ */
+
+(function() {
+'use strict';
+
+load("jstests/libs/fail_point_util.js");
+load('jstests/libs/parallel_shell_helpers.js');
+load("jstests/sharding/libs/create_sharded_collection_util.js");
+
+const st = new ShardingTest({mongos: 1, shards: 2, rs: {nodes: 3}});
+const dbName = "test";
+const collName = "foo";
+const ns = "test.foo";
+const testColl = st.s.getCollection(ns);
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+CreateShardedCollectionUtil.shardCollectionWithChunks(testColl, {x: 1}, [
+ {min: {x: MinKey}, max: {x: 50}, shard: st.shard0.shardName},
+ {min: {x: 50}, max: {x: MaxKey}, shard: st.shard0.shardName},
+]);
+
+for (let i = 0; i < 100; i++) {
+ assert.commandWorked(testColl.insert({x: i, name: "A"}));
+}
+assert.eq(100, testColl.find().itcount());
+
+assert.commandWorked(testColl.runCommand({collMod: "foo", validator: {name: {$type: "int"}}}));
+
+let failpoint = configureFailPoint(st.shard1, "migrateThreadHangAtStep4");
+
+const awaitResult = startParallelShell(
+ funWithArgs(function(ns, toShardName) {
+ assert.commandWorked(
+ db.adminCommand({moveChunk: ns, find: {x: 50}, to: toShardName, _waitForDelete: true}));
+ }, ns, st.shard1.shardName), st.s.port);
+
+failpoint.wait();
+
+for (let i = 100; i < 200; i++) {
+ assert.commandWorked(testColl.runCommand(
+ {insert: collName, documents: [{x: i, name: "B"}], bypassDocumentValidation: true}));
+}
+
+for (let i = 50; i < 75; ++i) {
+ assert.commandWorked(testColl.remove({x: i}));
+}
+
+failpoint.off();
+
+awaitResult();
+
+const donor = st.shard0.rs.getPrimary().getDB(dbName);
+const recipient = st.shard1.rs.getPrimary().getDB(dbName);
+assert.eq(50,
+ donor.foo.find().itcount(),
+ "Number of documents on the donor shard after moveChunk is incorrect.");
+assert.eq(125,
+ recipient.foo.find().itcount(),
+ "Number of documents on the recipient shard after moveChunk is incorrect.");
+assert.eq(175, testColl.find().itcount(), "Number of total documents is incorrect");
+
+st.stop();
+})();
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index f4ce80806b9..17b59d6eee4 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1171,15 +1171,20 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx) {
}
return toInsert;
}());
+ {
+ // Disable the schema validation for opCtx for performInserts()
+ DisableDocumentValidation documentValidationDisabler(opCtx);
- const WriteResult reply = performInserts(opCtx, insertOp, true);
+ const WriteResult reply = performInserts(opCtx, insertOp, true);
- for (unsigned long i = 0; i < reply.results.size(); ++i) {
- uassertStatusOKWithContext(
- reply.results[i],
- str::stream() << "Insert of " << insertOp.getDocuments()[i] << " failed.");
+ for (unsigned long i = 0; i < reply.results.size(); ++i) {
+ uassertStatusOKWithContext(reply.results[i],
+ str::stream()
+ << "Insert of " << insertOp.getDocuments()[i]
+ << " failed.");
+ }
+ // Revert to the original validation settings for opCtx
}
-
{
stdx::lock_guard<Latch> statsLock(_mutex);
_numCloned += batchNumCloned;
@@ -1427,6 +1432,8 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
bool didAnything = false;
+ DisableDocumentValidation documentValidationDisabler(opCtx);
+
// Deleted documents
if (xfer["deleted"].isABSONObj()) {
boost::optional<RemoveSaver> rs;