summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorMatthew Saltz <matthew.saltz@mongodb.com>2020-03-04 11:12:43 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-13 17:05:43 +0000
commit46b4b3d8f03e3dfc3ca2ae344bf7848732e986c3 (patch)
tree5aef742d77f7159ab93d880225529c774ac1c57c /jstests
parent9a905f68d4ec49ee62a20bdba779d6d726b297ba (diff)
downloadmongo-46b4b3d8f03e3dfc3ca2ae344bf7848732e986c3.tar.gz
SERVER-46386 Change MetadataManager::_findNewestOverlappingMetadata to work with refined shard keys
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js82
1 files changed, 70 insertions, 12 deletions
diff --git a/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js b/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js
index d8f17bee153..d5ed8b095ef 100644
--- a/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js
+++ b/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js
@@ -10,13 +10,10 @@
load("jstests/libs/fail_point_util.js");
load('jstests/libs/parallel_shell_helpers.js');
-const st = new ShardingTest({shards: 2});
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
const dbName = "test";
-const collName = "foo";
-const ns = dbName + "." + collName;
-let testDB = st.s.getDB(dbName);
-let testColl = testDB.foo;
+const ns = dbName + ".foo";
const originalShardKey = {
x: 1
@@ -36,14 +33,16 @@ const refinedShardKeyValueInChunk = {
y: 1
};
+const st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
+
function setUp() {
// Create a sharded collection with two chunk on shard0, split at key {x: -1}.
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: originalShardKey}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -1}}));
- // Insert documents into the collection, which contains two chunks. Insert documents only into
- // the second chunk
+ // Insert documents into the collection, which contains two chunks. Insert documents only
+ // into the second chunk
for (let i = 0; i < 100; i++) {
st.s.getCollection(ns).insert({x: i});
}
@@ -69,7 +68,8 @@ function test(description, testBody) {
test("Refining the shard key does not prevent removal of orphaned documents", () => {
// Enable failpoint which will cause range deletion to hang indefinitely.
- let suspendRangeDeletionFailpoint = configureFailPoint(st.shard0, "suspendRangeDeletion");
+ let suspendRangeDeletionFailpoint =
+ configureFailPoint(st.rs0.getPrimary(), "suspendRangeDeletion");
// Note that _waitForDelete has to be absent/false since we're suspending range deletion.
assert.commandWorked(
@@ -97,7 +97,7 @@ test("Refining the shard key does not prevent removal of orphaned documents", ()
// The range deletion should eventually succeed in the background.
assert.soon(() => {
- return st.shard0.getCollection(ns).find().itcount() == 0;
+ return st.rs0.getPrimary().getCollection(ns).find().itcount() == 0;
});
});
@@ -105,9 +105,11 @@ test("Chunks with a refined shard key cannot migrate back onto a shard with " +
"orphaned documents created with the prior shard key",
() => {
// Enable failpoint which will cause range deletion to hang indefinitely.
- let suspendRangeDeletionFailpoint = configureFailPoint(st.shard0, "suspendRangeDeletion");
+ let suspendRangeDeletionFailpoint =
+ configureFailPoint(st.rs0.getPrimary(), "suspendRangeDeletion");
- // Note that _waitForDelete has to be absent/false since we're suspending range deletion.
+ // Note that _waitForDelete has to be absent/false since we're suspending range
+ // deletion.
assert.commandWorked(st.s.adminCommand(
{moveChunk: ns, find: shardKeyValueInChunk, to: st.shard1.shardName}));
@@ -150,7 +152,7 @@ test("Chunks with a refined shard key cannot migrate back onto a shard with " +
// The range deletion should eventually succeed in the background.
assert.soon(() => {
- return st.shard0.getCollection(ns).find().itcount() == 0;
+ return st.rs0.getPrimary().getCollection(ns).find().itcount() == 0;
});
// Moving the chunk back to shard 0 should now succeed.
@@ -158,5 +160,61 @@ test("Chunks with a refined shard key cannot migrate back onto a shard with " +
{moveChunk: ns, find: refinedShardKeyValueInChunk, to: st.shard0.shardName}));
});
+// This test was created to reproduce a specific bug, which is why it may sound like an odd thing to
+// test. See SERVER-46386 for more details.
+test("Range deletion tasks created prior to refining the shard key do not " +
+ "conflict with non-overlapping ranges once the shard key is refined",
+ () => {
+ // Enable failpoint which will cause range deletion to hang indefinitely.
+ let suspendRangeDeletionFailpoint =
+ configureFailPoint(st.rs0.getPrimary(), "suspendRangeDeletion");
+
+ // Note that _waitForDelete has to be absent/false since we're suspending range deletion.
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: ns, find: shardKeyValueInChunk, to: st.shard1.shardName}));
+
+ jsTestLog("Waiting for the suspendRangeDeletion failpoint to be hit");
+
+ suspendRangeDeletionFailpoint.wait();
+
+ jsTestLog("Refining the shard key");
+
+ // Create an index on the refined shard key.
+ assert.commandWorked(st.s.getCollection(ns).createIndex(refinedShardKey));
+
+ // Refine the shard key from just the field 'x' to 'x' and 'y'.
+ assert.commandWorked(
+ st.s.adminCommand({refineCollectionShardKey: ns, key: refinedShardKey}));
+
+ // The index on the original shard key shouldn't be required anymore.
+ assert.commandWorked(st.s.getCollection(ns).dropIndex(originalShardKey));
+
+ // Step down current primary.
+ assert.commandWorked(st.rs0.getPrimary().adminCommand(
+ {replSetStepDown: ReplSetTest.kForeverSecs, force: 1}));
+
+ // Allow range deletion to continue on old node. This isn't required for this test to
+ // proceed since we only care about the new primary, but it's worth cleaning up.
+ suspendRangeDeletionFailpoint.off();
+
+ jsTestLog("Waiting for orphans to be removed from shard 0");
+
+ // The range deletion should eventually succeed in the background on the new primary.
+ assert.soon(() => {
+ return st.rs0.getPrimary().getCollection(ns).find().itcount() == 0;
+ });
+
+ // Wait for the donor to learn about the new primary on the recipient.
+ awaitRSClientHosts(st.rs1.getPrimary(), st.rs0.getPrimary(), {ok: true, ismaster: true});
+
+ // We should be able to move the chunk back to shard 0 now that orphans are gone.
+ assert.commandWorked(st.s.adminCommand({
+ moveChunk: ns,
+ find: refinedShardKeyValueInChunk,
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ }));
+ });
+
st.stop();
})();