summaryrefslogtreecommitdiff
path: root/jstests/sharding/cleanup_orphaned_cmd_hashed.js
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2020-02-04 16:22:19 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-16 19:29:38 +0000
commit2af596d72edc259df7b66d94169fc628391631ab (patch)
tree32ce8032e24e77a053ca335940ebf2417a493b52 /jstests/sharding/cleanup_orphaned_cmd_hashed.js
parent800da7962b1fed69986b099b694d12e16658be99 (diff)
downloadmongo-2af596d72edc259df7b66d94169fc628391631ab.tar.gz
SERVER-46889 Add back original tests of cleanupOrphaned, but with shards running with disableResumableRangeDeleter=true
Diffstat (limited to 'jstests/sharding/cleanup_orphaned_cmd_hashed.js')
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js80
1 files changed, 80 insertions, 0 deletions
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
new file mode 100644
index 00000000000..5efa4ad6c40
--- /dev/null
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -0,0 +1,80 @@
+//
+// Tests cleanup of orphaned data in hashed sharded coll via the orphaned data cleanup command
+//
+// requires_fcv_44 because the 'disableResumableRangeDeleter' parameter was introduced in v4.4.
+// @tags: [requires_fcv_44]
+
+(function() {
+"use strict";
+
+var st = new ShardingTest(
+ {shards: 2, mongos: 1, shardOptions: {setParameter: {"disableResumableRangeDeleter": true}}});
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
+assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+
+// Create two orphaned data holes, one bounded by min or max on each shard
+
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
+assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
+ to: st.shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+st.printShardingStatus();
+
+jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
+
+for (var s = 0; s < 2; s++) {
+ var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
+ assert.commandWorked(bulk.execute());
+}
+
+assert.eq(200,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
+
+jsTest.log("Cleaning up orphaned data in hashed coll...");
+
+for (var s = 0; s < 2; s++) {
+ var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
+
+ var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
+ while (result.ok && result.stoppedAtKey) {
+ printjson(result);
+ result = shardAdmin.runCommand(
+ {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
+ }
+
+ printjson(result);
+ assert(result.ok);
+}
+
+assert.eq(100,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+assert.eq(100, coll.find().itcount());
+
+jsTest.log("DONE!");
+
+st.stop();
+})();