summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2021-02-05 11:49:41 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-02-08 11:59:55 +0000
commitade18a70ec691e327b7efa800ad5ea56d51c9ac5 (patch)
treebabddb3de32f64c8f4971c4ea7e4cd4b54293e2e
parentc5a0ded0868acca1c6ee93a0bba394be16e4aa5c (diff)
downloadmongo-ade18a70ec691e327b7efa800ad5ea56d51c9ac5.tar.gz
SERVER-54331 Extend dropCollection test coverage
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_misc.yml1
-rw-r--r--jstests/sharding/basic_drop_coll.js82
-rw-r--r--jstests/sharding/drop_collection.js262
-rw-r--r--jstests/sharding/drop_configdb.js8
-rw-r--r--jstests/sharding/move_primary_with_drop_collection.js82
6 files changed, 270 insertions, 166 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 5c2d9494f2c..112102461d9 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -24,7 +24,6 @@ selector:
- jstests/sharding/addshard2.js
- jstests/sharding/autosplit.js
- jstests/sharding/auto_rebalance_parallel.js
- - jstests/sharding/basic_drop_coll.js
- jstests/sharding/basic_merge.js
- jstests/sharding/count1.js
- jstests/sharding/count2.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_misc.yml b/buildscripts/resmokeconfig/suites/sharding_misc.yml
index cc11b576973..5718c08dfac 100644
--- a/buildscripts/resmokeconfig/suites/sharding_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_misc.yml
@@ -205,7 +205,6 @@ selector:
- jstests/sharding/inserts_consistent.js
- jstests/sharding/shard4.js
- jstests/sharding/migration_with_source_ops.js
- - jstests/sharding/basic_drop_coll.js
- jstests/sharding/migration_failure.js
- jstests/sharding/query/views.js
- jstests/sharding/shard2.js
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
deleted file mode 100644
index 96095cee60b..00000000000
--- a/jstests/sharding/basic_drop_coll.js
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Basic test from the drop collection command on a sharded cluster that verifies collections are
- * cleaned up properly.
- *
- * @tags: [requires_fcv_47]
- */
-(function() {
-"use strict";
-
-load("jstests/sharding/libs/find_chunks_util.js");
-
-var st = new ShardingTest({shards: 2});
-
-var testDB = st.s.getDB('test');
-
-// Test dropping an unsharded collection.
-
-assert.commandWorked(testDB.bar.insert({x: 1}));
-assert.neq(null, testDB.bar.findOne({x: 1}));
-
-assert.commandWorked(testDB.runCommand({drop: 'bar'}));
-assert.eq(null, testDB.bar.findOne({x: 1}));
-
-assert.commandFailedWithCode(st.s.getDB('admin').runCommand({drop: 'secrets'}),
- ErrorCodes.IllegalOperation);
-assert.commandFailedWithCode(st.s.getDB('config').runCommand({drop: 'settings'}),
- ErrorCodes.IllegalOperation);
-
-// Test dropping a sharded collection.
-
-assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
-st.ensurePrimaryShard('test', st.shard0.shardName);
-st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
-st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
-assert.commandWorked(
- st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName}));
-assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
-assert.commandWorked(st.s.adminCommand(
- {updateZoneKeyRange: 'test.user', min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
-
-assert.commandWorked(testDB.user.insert({_id: 10}));
-assert.commandWorked(testDB.user.insert({_id: -10}));
-
-assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10}));
-assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
-
-var configDB = st.s.getDB('config');
-var collDoc = configDB.collections.findOne({_id: 'test.user'});
-
-assert.eq(2, findChunksUtil.countChunksForNs(configDB, 'test.user'));
-assert.eq(1, configDB.tags.count({ns: 'test.user'}));
-
-assert.commandWorked(testDB.runCommand({drop: 'user'}));
-
-assert.eq(null, st.shard0.getDB('test').user.findOne());
-assert.eq(null, st.shard1.getDB('test').user.findOne());
-
-// Call drop again to verify that the command is idempotent.
-assert.commandWorked(testDB.runCommand({drop: 'user'}));
-
-// Check for the collection with majority RC to verify that the write to remove the collection
-// document from the catalog has propagated to the majority snapshot. Note that here we explicitly
-// use a command instead of going through the driver's 'find' helper, in order to be able to specify
-// a 'majority' read concern.
-//
-// TODO (SERVER-51881): Remove this check after 5.0 is released
-var collEntry =
- assert
- .commandWorked(configDB.runCommand(
- {find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}}))
- .cursor.firstBatch;
-if (collEntry.length > 0) {
- assert.eq(1, collEntry.length);
- assert.eq(true, collEntry[0].dropped);
-}
-
-assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
-assert.eq(0, configDB.chunks.count({uuid: collDoc.uuid}));
-assert.eq(0, configDB.tags.count({ns: 'test.user'}));
-
-st.stop();
-})();
diff --git a/jstests/sharding/drop_collection.js b/jstests/sharding/drop_collection.js
new file mode 100644
index 00000000000..9e68b2c1ff7
--- /dev/null
+++ b/jstests/sharding/drop_collection.js
@@ -0,0 +1,262 @@
+/**
+ * Basic test from the drop collection command on a sharded cluster that verifies collections are
+ * cleaned up properly.
+ */
+(function() {
+"use strict";
+
+load("jstests/sharding/libs/find_chunks_util.js");
+
+var st = new ShardingTest({shards: 2});
+
+const configDB = st.s.getDB('config');
+const dbName = 'testDropCollDB';
+var dbCounter = 0;
+
+function getCollectionUUID(ns) {
+ return configDB.collections.findOne({_id: ns}).uuid;
+}
+
+function getNewDb() {
+ return st.s.getDB(dbName + dbCounter++);
+}
+
+function assertCollectionDropped(ns, uuid = null) {
+ // No more documents
+ assert.eq(
+ 0, st.s.getCollection(ns).countDocuments({}), "Found documents for dropped collection.");
+
+ // No more tags
+ assert.eq(0,
+ configDB.tags.countDocuments({ns: ns}),
+ "Found unexpected tag for a collection after drop.");
+
+ // No more chunks
+ const errMsg = "Found collection entry in 'config.collection' after drop.";
+ // Before 5.0 chunks were indexed by ns, now by uuid
+ assert.eq(0, configDB.chunks.countDocuments({ns: ns}), errMsg);
+ if (uuid != null) {
+ assert.eq(0, configDB.chunks.countDocuments({uuid: uuid}), errMsg);
+ }
+
+ // No more coll entry
+ assert.eq(null, st.s.getCollection(ns).exists());
+
+ // Check for the collection with majority RC to verify that the write to remove the collection
+ // document from the catalog has propagated to the majority snapshot. Note that here we
+ // explicitly use a command instead of going through the driver's 'find' helper, in order to be
+ // able to specify a 'majority' read concern.
+ //
+ // assert.eq(0, configDB.chunks.countDocuments({_id: ns{));
+ //
+ // TODO (SERVER-51881): Remove this check after 5.0 is released
+ var collEntry =
+ assert
+ .commandWorked(configDB.runCommand(
+ {find: 'collections', filter: {_id: ns}, readConcern: {'level': 'majority'}}))
+ .cursor.firstBatch;
+ if (collEntry.length > 0) {
+ assert.eq(1, collEntry.length);
+ assert.eq(true, collEntry[0].dropped);
+ }
+}
+
+// Drop unsharded collection
+{
+ const db = getNewDb();
+ const coll = db['unshardedColl0'];
+ // Create the collection
+ assert.commandWorked(coll.insert({x: 1}));
+ assert.eq(1, coll.countDocuments({x: 1}));
+ // Drop the collection
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName());
+}
+
+// Drop unsharded collection also remove tags
+{
+ const db = getNewDb();
+ const coll = db['unshardedColl1'];
+ // Create the database
+ assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()}));
+ // Add a zone
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'zone1'}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: coll.getFullName(), min: {x: 0}, max: {x: 10}, zone: 'zone1'}));
+ assert.eq(1, configDB.tags.countDocuments({ns: coll.getFullName()}));
+ // Create the collection
+ assert.commandWorked(coll.insert({x: 1}));
+ // Drop the collection
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName());
+}
+
+// Drop unexistent collections also remove tags
+{
+ const db = getNewDb();
+ const coll = db['unexistent'];
+ // Create the database
+ assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()}));
+ // Add a zone
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'zone1'}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: coll.getFullName(), min: {x: -1}, max: {x: 1}, zone: 'zone1'}));
+ assert.eq(1, configDB.tags.countDocuments({ns: coll.getFullName()}));
+ // Drop the collection
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName());
+}
+
+// Drop a sharded collection
+{
+ const db = getNewDb();
+ const coll = db['shardedColl1'];
+
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ // Spread chunks on all the shards
+ assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+ // Insert two documents
+ assert.commandWorked(coll.insert({_id: 10}));
+ assert.commandWorked(coll.insert({_id: -10}));
+
+ // Check that data is in place
+ assert.eq(2, coll.countDocuments({}));
+ assert.eq(1, configDB.collections.countDocuments({_id: coll.getFullName()}));
+
+ // Drop the collection
+ const uuid = getCollectionUUID(coll.getFullName());
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName(), uuid);
+
+ // Call drop again to verify that the command is idempotent.
+ assert.commandWorked(db.runCommand({drop: 'user'}));
+}
+
+// Drop a sharded collection with zones
+{
+ const db = getNewDb();
+ const coll = db['shardedColl2'];
+
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ // Spread chunks on all the shards
+ assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName}));
+ // Add tags
+ assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: 'foo'}));
+ assert.commandWorked(st.s.adminCommand(
+ {updateZoneKeyRange: coll.getFullName(), min: {_id: 0}, max: {_id: 10}, zone: 'foo'}));
+
+ assert.commandWorked(coll.insert({_id: -10}));
+ assert.commandWorked(coll.insert({_id: 10}));
+
+ // Checks that data and metadata are in place
+ assert.eq(1, configDB.tags.countDocuments({ns: coll.getFullName()}));
+ assert.eq(2, coll.countDocuments({}));
+ assert.eq(2, findChunksUtil.countChunksForNs(configDB, coll.getFullName()));
+ assert.neq(null, st.shard0.getCollection(coll.getFullName()).findOne({_id: -10}));
+ assert.neq(null, st.shard1.getCollection(coll.getFullName()).findOne({_id: 10}));
+
+ // Drop the collection
+ const uuid = getCollectionUUID(coll.getFullName());
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+ assertCollectionDropped(coll.getFullName(), uuid);
+
+ // Call drop again to verify that the command is idempotent.
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
+}
+
+/*
+ * Test that moving database primary works after dropping and recreating the same sharded
+ * collection.
+ * The new primary never owned a chunk of the sharded collection.
+ */
+{
+ const db = getNewDb();
+ const coll = db['movePrimaryNoChunks'];
+
+ jsTest.log("Create sharded collection with on chunk on shad 0");
+ assert.commandWorked(
+ st.s.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName}));
+ st.shardColl(coll, {skey: 1}, false, false);
+
+ jsTest.log("Move database primary back and forth shard 1");
+ st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
+ st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+
+ jsTest.log("Drop sharded collection");
+ var uuid = getCollectionUUID(coll.getFullName());
+ coll.drop();
+ assertCollectionDropped(coll.getFullName(), uuid);
+
+ jsTest.log("Re-Create sharded collection on shard 0");
+ st.shardColl(coll, {skey: 1}, false, false);
+
+ jsTest.log("Move database primary to shard 1");
+ st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
+
+ jsTest.log("Drop sharded collection");
+ uuid = getCollectionUUID(coll.getFullName());
+ coll.drop();
+ assertCollectionDropped(coll.getFullName(), uuid);
+}
+
+/*
+ * Test that moving database primary works after dropping and recreating the same sharded
+ * collection.
+ * The new primary previously owned a chunk of the original collection.
+ */
+{
+ const db = getNewDb();
+ const coll = db['movePrimaryWithChunks'];
+
+ assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()}));
+
+ jsTest.log("Create sharded collection with two chunks on each shard");
+ st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+ st.shardColl(coll, {skey: 1}, {skey: 0}, {skey: 0});
+
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ configDB, coll.getFullName(), {shard: st.shard0.shardName}));
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ configDB, coll.getFullName(), {shard: st.shard1.shardName}));
+ jsTest.log("Move all chunks to shard 0");
+ assert.commandWorked(st.s.adminCommand({
+ moveChunk: coll.getFullName(),
+ find: {skey: 10},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+ }));
+ assert.eq(2,
+ findChunksUtil.countChunksForNs(
+ configDB, coll.getFullName(), {shard: st.shard0.shardName}));
+ assert.eq(0,
+ findChunksUtil.countChunksForNs(
+ configDB, coll.getFullName(), {shard: st.shard1.shardName}));
+
+ jsTest.log("Drop sharded collection");
+ coll.drop();
+
+ jsTest.log("Re-Create sharded collection with one chunk on shard 0");
+ st.shardColl(coll, {skey: 1}, false, false);
+ assert.eq(1,
+ findChunksUtil.countChunksForNs(
+ configDB, coll.getFullName(), {shard: st.shard0.shardName}));
+
+ jsTest.log("Move primary of DB to shard 1");
+ st.ensurePrimaryShard(db.getName(), st.shard1.shardName);
+
+ jsTest.log("Drop sharded collection");
+ coll.drop();
+}
+
+st.stop();
+})();
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 151fc2b91fc..f2be32977a5 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -9,6 +9,14 @@ var st = new ShardingTest({shards: 1});
var mongos = st.s;
var config = st.configRS.getPrimary().getDB('config');
+jsTest.log("Dropping a collection in admin/config DB is illegal");
+{
+ assert.commandFailedWithCode(st.s.getDB('admin').runCommand({drop: 'secrets'}),
+ ErrorCodes.IllegalOperation);
+ assert.commandFailedWithCode(st.s.getDB('config').runCommand({drop: 'settings'}),
+ ErrorCodes.IllegalOperation);
+}
+
// Try to drop config db via configsvr
print("1: Try to drop config database via configsvr");
diff --git a/jstests/sharding/move_primary_with_drop_collection.js b/jstests/sharding/move_primary_with_drop_collection.js
deleted file mode 100644
index 3b16f3edc82..00000000000
--- a/jstests/sharding/move_primary_with_drop_collection.js
+++ /dev/null
@@ -1,82 +0,0 @@
-(function() {
-"use strict";
-
-load("jstests/sharding/libs/find_chunks_util.js");
-
-var st = new ShardingTest({shards: 2});
-var configDB = st.s.getDB('config');
-
-/*
- * Test that moving database primary works after dropping a recreating the same sharded collection,
- * the new primary never owned a chunk of the sharded collection.
- */
-var testDB = st.s.getDB(jsTest.name() + "_db1");
-var coll = testDB['coll'];
-
-assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
-
-jsTest.log("Create sharded collection with on chunk on shad 0");
-st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
-st.shardColl(coll, {skey: 1}, false, false);
-
-jsTest.log("Move database primary back and forth shard 1");
-st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
-
-jsTest.log("Drop sharded collection");
-coll.drop();
-
-jsTest.log("Re-Create sharded collection on shard 0");
-st.shardColl(coll, {skey: 1}, false, false);
-
-jsTest.log("Move database primary to shard 1");
-st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
-jsTest.log("Drop sharded collection");
-coll.drop();
-
-/*
- * Test that moving database primary works after dropping a recreating the same sharded collection,
- * the new primary previously owned a chunk of the original collection.
- */
-var testDB = st.s.getDB(jsTest.name() + "_db2");
-var coll = testDB['coll'];
-
-assert.commandWorked(st.s.adminCommand({enableSharding: testDB.getName()}));
-
-jsTest.log("Create sharded collection with two chunks on each shard");
-st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
-st.shardColl(coll, {skey: 1}, {skey: 0}, {skey: 0});
-
-assert.eq(
- 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
-assert.eq(
- 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard1.shardName}));
-jsTest.log("Move all chunks to shard 0");
-assert.commandWorked(st.s.adminCommand({
- moveChunk: coll.getFullName(),
- find: {skey: 10},
- to: st.shard0.shardName,
- _waitForDelete: true
-}));
-assert.eq(
- 2, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
-assert.eq(
- 0, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard1.shardName}));
-
-jsTest.log("Drop sharded collection");
-coll.drop();
-
-jsTest.log("Re-Create sharded collection with one chunk on shard 0");
-st.shardColl(coll, {skey: 1}, false, false);
-assert.eq(
- 1, findChunksUtil.countChunksForNs(configDB, coll.getFullName(), {shard: st.shard0.shardName}));
-
-jsTest.log("Move primary of DB to shard 1");
-st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName);
-
-jsTest.log("Drop sharded collection");
-coll.drop();
-
-st.stop();
-})();