summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikita Lapkov <nikita.lapkov@mongodb.com>2021-09-08 13:53:21 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-09-20 15:12:19 +0000
commit8e08ad9655b7bac2a6705c1aaf2f7feaa997fd69 (patch)
tree7020e5f8808c639a5da62253da1095f2e867cfeb
parentdc7f0f2f730fdb114657242190fac7a9cfc33fb2 (diff)
downloadmongo-8e08ad9655b7bac2a6705c1aaf2f7feaa997fd69.tar.gz
SERVER-59126 Delete buckets collection metadata from config servers on time-series collection drop
-rw-r--r--jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js93
-rw-r--r--jstests/core/timeseries/timeseries_bucket_drop.js37
-rw-r--r--jstests/core/timeseries/timeseries_list_collections_missing_buckets_collection.js37
-rw-r--r--jstests/sharding/timeseries_drop.js155
-rw-r--r--jstests/sharding/timeseries_indexes.js2
-rw-r--r--jstests/sharding/timeseries_insert.js10
-rw-r--r--jstests/sharding/timeseries_query.js10
-rw-r--r--jstests/sharding/timeseries_shard_collection.js11
-rw-r--r--jstests/sharding/timeseries_sharding_admin_commands.js8
-rw-r--r--jstests/sharding/timeseries_time_value_rounding.js7
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp9
-rw-r--r--src/mongo/db/s/shardsvr_drop_collection_command.cpp11
12 files changed, 283 insertions, 107 deletions
diff --git a/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js
new file mode 100644
index 00000000000..7a411309b74
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js
@@ -0,0 +1,93 @@
+/**
+ * Repeatedly creates and drops a sharded time-series collection.
+ *
+ * @tags: [
+ * requires_sharding,
+ * ]
+ */
+'use strict';
+
+const dbPrefix = 'fsm_db_for_sharded_timeseries_collection_';
+const dbCount = 2;
+const collPrefix = 'sharded_timeseries_collection_';
+const collCount = 2;
+const timeField = 'time';
+const metaField = 'hostId';
+
+load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
+
+function getRandomDb(db) {
+ return db.getSiblingDB(dbPrefix + Random.randInt(dbCount));
+}
+
+function getRandomTimeseriesView(db) {
+ return getRandomDb(db)[collPrefix + Random.randInt(collCount)];
+}
+
+var $config = (function() {
+ const setup = function(db, collName, cluster) {
+ // Check that necessary feature flags are enabled on each of the mongods.
+ let isEnabled = true;
+ cluster.executeOnMongodNodes(function(db) {
+ if (!TimeseriesTest.timeseriesCollectionsEnabled(db) ||
+ !TimeseriesTest.shardedtimeseriesCollectionsEnabled(db)) {
+ isEnabled = false;
+ }
+ });
+ this.isShardedTimeseriesEnabled = isEnabled;
+
+ if (!this.isShardedTimeseriesEnabled) {
+ jsTestLog(
+ "Feature flags for sharded time-series collections are not enabled. This test will do nothing.");
+ return;
+ }
+
+ // Enable sharding for the test databases.
+ for (var i = 0; i < dbCount; i++) {
+ const dbName = dbPrefix + i;
+ db.adminCommand({enablesharding: dbName});
+ }
+ };
+
+ const states = {
+ init: function(db, collName) {},
+ create: function(db, collName) {
+ if (!this.isShardedTimeseriesEnabled) {
+ return;
+ }
+
+ const coll = getRandomTimeseriesView(db);
+ jsTestLog("Executing create state on: " + coll.getFullName());
+ assertAlways.commandWorked(db.adminCommand({
+ shardCollection: coll.getFullName(),
+ key: {[metaField]: 1, [timeField]: 1},
+ timeseries: {timeField: timeField, metaField: metaField}
+ }));
+ },
+ dropView: function(db, collName) {
+ if (!this.isShardedTimeseriesEnabled) {
+ return;
+ }
+
+ const coll = getRandomTimeseriesView(db);
+ jsTestLog("Executing dropView state on: " + coll.getFullName());
+ assertAlways.commandWorked(coll.getDB().runCommand({drop: coll.getName()}));
+ },
+ };
+
+ const transitions = {
+ init: {create: 0.33, dropView: 0.33},
+ create: {create: 0.33, dropView: 0.33},
+ dropView: {create: 0.33, dropView: 0.33},
+ };
+
+ return {
+ threadCount: 12,
+ iterations: 64,
+ startState: 'init',
+ data: {},
+ states: states,
+ setup: setup,
+ transitions: transitions
+ };
+})();
diff --git a/jstests/core/timeseries/timeseries_bucket_drop.js b/jstests/core/timeseries/timeseries_bucket_drop.js
deleted file mode 100644
index db0324f19b3..00000000000
--- a/jstests/core/timeseries/timeseries_bucket_drop.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Tests dropping the bucket collection still results in a collection existing and being droppable
- *
- * @tags: [
- * assumes_no_implicit_collection_creation_after_drop,
- * does_not_support_stepdowns,
- * does_not_support_transactions,
- * requires_getmore,
- * ]
- */
-
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-TimeseriesTest.run((insert) => {
- const testDB = db.getSiblingDB(jsTestName());
- assert.commandWorked(testDB.dropDatabase());
-
- const coll = testDB.timeseries_bucket_drop;
- const buckets = testDB.getCollection('system.buckets.' + coll.getName());
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {timeseries: {timeField: 'time'}}));
- // Drop the buckets first
- assert.commandWorked(testDB.runCommand({drop: buckets.getName()}));
- let collections =
- assert.commandWorked(testDB.runCommand({listCollections: 1})).cursor.firstBatch;
- // Check that we delete bucket but not collection
- assert.isnull(collections.find(entry => entry.name == buckets.getName()));
- assert(collections.find(entry => entry.name == coll.getName()));
- // Still should be able to drop the collection
- assert.commandWorked(testDB.runCommand({drop: coll.getName()}));
- collections = assert.commandWorked(testDB.runCommand({listCollections: 1})).cursor.firstBatch;
- assert.isnull(collections.find(entry => entry.name == buckets.getName()));
- assert.isnull(collections.find(entry => entry.name == coll.getName()));
-});
-})();
diff --git a/jstests/core/timeseries/timeseries_list_collections_missing_buckets_collection.js b/jstests/core/timeseries/timeseries_list_collections_missing_buckets_collection.js
deleted file mode 100644
index 584a36a33f2..00000000000
--- a/jstests/core/timeseries/timeseries_list_collections_missing_buckets_collection.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Tests that listCollections shows the time-series view, but not the buckets collection, if the
- * backing time-series buckets collection is missing.
- *
- * @tags: [
- * assumes_no_implicit_collection_creation_after_drop,
- * does_not_support_transactions,
- * requires_getmore,
- * ]
- */
-(function() {
-'use strict';
-
-const testDB = db.getSiblingDB(jsTestName());
-assert.commandWorked(testDB.dropDatabase());
-
-const timeFieldName = 'time';
-const coll = testDB.getCollection('t');
-const bucketsColl = testDB.getCollection('system.buckets.' + coll.getName());
-
-assert.commandWorked(
- testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
-assert(bucketsColl.drop());
-
-let collections = assert.commandWorked(testDB.runCommand({listCollections: 1})).cursor.firstBatch;
-jsTestLog('Checking listCollections result: ' + tojson(collections));
-assert.eq(collections.length, 2);
-assert(collections.find(entry => entry.name === 'system.views'));
-assert.docEq(collections.find(entry => entry.name === coll.getName()),
- {name: coll.getName(), type: 'timeseries', options: {}, info: {readOnly: false}});
-
-collections =
- assert.commandWorked(testDB.runCommand({listCollections: 1, filter: {name: coll.getName()}}))
- .cursor.firstBatch;
-assert.eq(collections,
- [{name: coll.getName(), type: 'timeseries', options: {}, info: {readOnly: false}}]);
-})();
diff --git a/jstests/sharding/timeseries_drop.js b/jstests/sharding/timeseries_drop.js
new file mode 100644
index 00000000000..e17a539fc2f
--- /dev/null
+++ b/jstests/sharding/timeseries_drop.js
@@ -0,0 +1,155 @@
+/**
+ * Test drop of time-series collection.
+ *
+ * @tags: [
+ * requires_fcv_51,
+ * requires_find_command
+ * ]
+ */
+
+(function() {
+"use strict";
+
+load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
+
+Random.setRandomSeed();
+
+const dbName = 'testDB';
+const collName = 'testColl';
+const timeField = 'time';
+const metaField = 'hostid';
+
+// Connections.
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+const mongos = st.s0;
+
+// Sanity checks.
+if (!TimeseriesTest.timeseriesCollectionsEnabled(st.shard0)) {
+ jsTestLog("Skipping test because the time-series collection feature flag is disabled");
+ st.stop();
+ return;
+}
+
+if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
+ jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
+ st.stop();
+ return;
+}
+
+// Databases.
+const mainDB = mongos.getDB(dbName);
+const configDB = mongos.getDB('config');
+
+// Helpers.
+let currentId = 0;
+function generateId() {
+ return currentId++;
+}
+
+function generateBatch(size) {
+ return TimeseriesTest.generateHosts(size).map((host, index) => Object.assign(host, {
+ _id: generateId(),
+ [metaField]: index,
+ [timeField]: ISODate(`20${index}0-01-01`),
+ }));
+}
+
+function ensureCollectionDoesNotExist(collName) {
+ const databases = [mainDB, st.shard0.getDB(dbName), st.shard1.getDB(dbName)];
+ for (const db of databases) {
+ const collections = db.getCollectionNames();
+ assert(!collections.includes(collName), collections);
+ }
+}
+
+function runTest(getShardKey, performChunkSplit) {
+ mainDB.dropDatabase();
+
+ assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
+
+ // Create timeseries collection.
+ assert.commandWorked(mainDB.createCollection(
+ collName, {timeseries: {timeField: timeField, metaField: metaField}}));
+ const coll = mainDB.getCollection(collName);
+
+ // Shard timeseries collection.
+ const shardKey = getShardKey(1, 1);
+ assert.commandWorked(coll.createIndex(shardKey));
+ assert.commandWorked(mongos.adminCommand({
+ shardCollection: `${dbName}.${collName}`,
+ key: shardKey,
+ }));
+
+ // Insert initial set of documents.
+ const numDocs = 8;
+ const firstBatch = generateBatch(numDocs);
+ assert.commandWorked(coll.insert(firstBatch));
+
+ if (performChunkSplit) {
+ // Manually split the data into two chunks.
+ const splitIndex = numDocs / 2;
+ const splitPoint = {};
+ if (shardKey.hasOwnProperty(metaField)) {
+ splitPoint.meta = firstBatch[splitIndex][metaField];
+ }
+ if (shardKey.hasOwnProperty(timeField)) {
+ splitPoint[`control.min.${timeField}`] = firstBatch[splitIndex][timeField];
+ }
+
+ assert.commandWorked(mongos.adminCommand(
+ {split: `${dbName}.system.buckets.${collName}`, middle: splitPoint}));
+
+ // Ensure that currently both chunks reside on the primary shard.
+ let counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
+ const primaryShard = st.getPrimaryShard(dbName);
+ assert.eq(2, counts[primaryShard.shardName], counts);
+
+ // Move one of the chunks into the second shard.
+ const otherShard = st.getOther(primaryShard);
+ assert.commandWorked(mongos.adminCommand({
+ movechunk: `${dbName}.system.buckets.${collName}`,
+ find: splitPoint,
+ to: otherShard.name,
+ _waitForDelete: true
+ }));
+
+ // Ensure that each shard owns one chunk.
+ counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
+ assert.eq(1, counts[primaryShard.shardName], counts);
+ assert.eq(1, counts[otherShard.shardName], counts);
+ }
+
+ // Drop the time-series collection.
+ assert(coll.drop());
+
+ // Ensure that both time-series view and time-series buckets collections do not exist according
+ // to mongos and both shards.
+ ensureCollectionDoesNotExist(collName);
+ ensureCollectionDoesNotExist(`system.buckets.${collName}`);
+
+ // Ensure that the time-series buckets collection gets deleted from the config database as well.
+ assert.eq([],
+ configDB.collections.find({_id: `${dbName}.system.buckets.${collName}`}).toArray());
+}
+
+try {
+ for (let performChunkSplit of [false, true]) {
+ function metaShardKey(meta, _) {
+ return {[metaField]: meta};
+ }
+ runTest(metaShardKey, performChunkSplit);
+
+ function timeShardKey(_, time) {
+ return {[timeField]: time};
+ }
+ runTest(timeShardKey, performChunkSplit);
+
+ function timeAndMetaShardKey(meta, time) {
+ return {[metaField]: meta, [timeField]: time};
+ }
+ runTest(timeAndMetaShardKey, performChunkSplit);
+ }
+} finally {
+ st.stop();
+}
+})();
diff --git a/jstests/sharding/timeseries_indexes.js b/jstests/sharding/timeseries_indexes.js
index 6b4fdb992a5..bb0db01231c 100644
--- a/jstests/sharding/timeseries_indexes.js
+++ b/jstests/sharding/timeseries_indexes.js
@@ -130,7 +130,7 @@ function generateDoc(time, metaValue) {
indexKeys = bucketsColl.getIndexes().map(x => x.key);
assert.sameMembers([{'control.min.time': 1}, {'meta.subField2': 1}], indexKeys);
- mongosDB.dropDatabase();
+ assert(coll.drop());
})();
st.stop();
diff --git a/jstests/sharding/timeseries_insert.js b/jstests/sharding/timeseries_insert.js
index a961276f71d..7657a4d4c57 100644
--- a/jstests/sharding/timeseries_insert.js
+++ b/jstests/sharding/timeseries_insert.js
@@ -36,7 +36,8 @@ if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
return;
}
-// Databases.
+// Databases and collections.
+assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
const mainDB = mongos.getDB(dbName);
// Helpers.
@@ -78,11 +79,6 @@ function verifyBucketsOnShard(shard, expectedBuckets) {
}
function runTest(getShardKey, insert) {
- mainDB.dropDatabase();
-
- assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
-
- // Create timeseries collection.
assert.commandWorked(mainDB.createCollection(
collName, {timeseries: {timeField: timeField, metaField: metaField}}));
const coll = mainDB.getCollection(collName);
@@ -198,6 +194,8 @@ function runTest(getShardKey, insert) {
.toArray();
assert.docEq(expectedDocuments, actualDocuments);
}
+
+ assert(coll.drop());
}
try {
diff --git a/jstests/sharding/timeseries_query.js b/jstests/sharding/timeseries_query.js
index dd2a6ba8c55..0c6b3ce067e 100644
--- a/jstests/sharding/timeseries_query.js
+++ b/jstests/sharding/timeseries_query.js
@@ -22,6 +22,7 @@ const metaField = 'hostid';
const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
const sDB = st.s.getDB(dbName);
+assert.commandWorked(sDB.adminCommand({enableSharding: dbName}));
const shard0DB = st.shard0.getDB(dbName);
const shard1DB = st.shard1.getDB(dbName);
@@ -136,7 +137,6 @@ function runQuery(
// Shard key on just the time field.
(function timeShardKey() {
- assert.commandWorked(sDB.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
// Shard time-series collection.
@@ -307,12 +307,11 @@ function runQuery(
expectCollScan: true,
});
- sDB.dropDatabase();
+ assert(coll.drop());
})();
// Shard key on the metadata field and time fields.
(function metaAndTimeShardKey() {
- assert.commandWorked(sDB.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
assert.commandWorked(sDB.adminCommand({
@@ -467,12 +466,11 @@ function runQuery(
expectedShards: [otherShard.shardName]
});
- sDB.dropDatabase();
+ assert(coll.drop());
})();
// Shard key on the metadata fields.
(function metaFieldShardKey() {
- assert.commandWorked(sDB.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
// Shard timeseries collection.
@@ -532,7 +530,7 @@ function runQuery(
expectedShards: [primaryShard.shardName, otherShard.shardName]
});
- sDB.dropDatabase();
+ assert(coll.drop());
})();
st.stop();
diff --git a/jstests/sharding/timeseries_shard_collection.js b/jstests/sharding/timeseries_shard_collection.js
index f0ce1d9ec24..bdb12d463c5 100644
--- a/jstests/sharding/timeseries_shard_collection.js
+++ b/jstests/sharding/timeseries_shard_collection.js
@@ -14,6 +14,7 @@ Random.setRandomSeed();
const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
const dbName = 'test';
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
const sDB = st.s.getDB(dbName);
const timeseries = {
timeField: 'time',
@@ -54,8 +55,6 @@ if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
// Simple shard key on the metadata field.
function metaShardKey(implicit) {
- assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
-
// Command should fail since the 'timeseries' specification does not match that existing
// collection.
if (!implicit) {
@@ -92,7 +91,7 @@ if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
assert.eq(1, counts[st.shard0.shardName]);
assert.eq(1, counts[st.shard1.shardName]);
- sDB.dropDatabase();
+ assert(sDB.ts.drop());
}
// Sharding an existing timeseries collection.
@@ -139,7 +138,7 @@ if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
assert.eq(1, counts[st.shard0.shardName]);
assert.eq(1, counts[st.shard1.shardName]);
- sDB.dropDatabase();
+ assert(sDB.ts.drop());
}
// Sharding an existing timeseries collection.
@@ -150,8 +149,6 @@ if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
} else {
(function timeseriesCollectionsCannotBeSharded() {
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
assert.commandFailedWithCode(
st.s.adminCommand({shardCollection: 'test.ts', key: {meta: 1}, timeseries}), 5731502);
@@ -185,7 +182,7 @@ if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
st.s.adminCommand({shardCollection: 'test.system.buckets.ts', key: {meta: 1}}),
5731501);
- assert.commandWorked(sDB.dropDatabase());
+ assert(tsColl.drop());
})();
}
diff --git a/jstests/sharding/timeseries_sharding_admin_commands.js b/jstests/sharding/timeseries_sharding_admin_commands.js
index 493ae0ddf9e..0a0f0da2d0f 100644
--- a/jstests/sharding/timeseries_sharding_admin_commands.js
+++ b/jstests/sharding/timeseries_sharding_admin_commands.js
@@ -36,13 +36,13 @@ const bucketNss = `${dbName}.system.buckets.${collName}`;
const controlTimeField = `control.min.${timeField}`;
const numDocsInserted = 20;
const zone = 'Z';
+assert.commandWorked(mongo.s0.adminCommand({enableSharding: dbName}));
assert.commandWorked(mongo.s0.adminCommand({addShardToZone: mongo.shard0.shardName, zone: zone}));
function createTimeSeriesColl({index, shardKey}) {
const db = mongo.s0.getDB(dbName);
assert.commandWorked(
db.createCollection(collName, {timeseries: {timeField: timeField, metaField: metaField}}));
- assert.commandWorked(mongo.s0.adminCommand({enableSharding: dbName}));
assert.commandWorked(db[collName].createIndex(index));
for (let i = 0; i < numDocsInserted; i++) {
assert.commandWorked(db[collName].insert({[metaField]: i, [timeField]: ISODate()}));
@@ -58,10 +58,8 @@ function createTimeSeriesColl({index, shardKey}) {
function dropTimeSeriesColl() {
const db = mongo.s0.getDB(dbName);
- // Time-series collection dropping is not implemented yet so we drop the database instead.
- // Call dropDatabase twice according to the documentation.
- db.dropDatabase();
- db.dropDatabase();
+ const coll = db.getCollection(collName);
+ assert(coll.drop());
}
// Check the zone range against the extended range saved in config.tags collection.
diff --git a/jstests/sharding/timeseries_time_value_rounding.js b/jstests/sharding/timeseries_time_value_rounding.js
index 27eae95bdea..19a6c2e92a2 100644
--- a/jstests/sharding/timeseries_time_value_rounding.js
+++ b/jstests/sharding/timeseries_time_value_rounding.js
@@ -38,6 +38,7 @@ if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
}
// Databases.
+assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
const mainDB = mongos.getDB(dbName);
// Helpers.
@@ -64,10 +65,6 @@ function getDocumentsFromShard(shard, id) {
}
function runTest() {
- mainDB.dropDatabase();
-
- assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
-
// Create and shard timeseries collection.
const shardKey = {[timeField]: 1};
assert.commandWorked(mongos.adminCommand({
@@ -136,6 +133,8 @@ function runTest() {
const result = coll.find({[timeField]: document[timeField]}).toArray();
assert.docEq([document], result);
}
+
+ assert(coll.drop());
}
try {
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index b3fffc0e0d4..61846605fbe 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -282,11 +282,11 @@ Status _dropCollection(OperationContext* opCtx,
}
Status dropCollection(OperationContext* opCtx,
- const NamespaceString& collectionName,
+ const NamespaceString& nss,
DropReply* reply,
DropCollectionSystemCollectionMode systemCollectionMode) {
if (!serverGlobalParams.quiet.load()) {
- LOGV2(518070, "CMD: drop", logAttrs(collectionName));
+ LOGV2(518070, "CMD: drop", logAttrs(nss));
}
if (MONGO_unlikely(hangDropCollectionBeforeLockAcquisition.shouldFail())) {
@@ -294,6 +294,11 @@ Status dropCollection(OperationContext* opCtx,
hangDropCollectionBeforeLockAcquisition.pauseWhileSet();
}
+ // We rewrite drop of time-series buckets collection to drop of time-series view collection.
+ // This ensures that such drop will delete both collections.
+ const auto collectionName =
+ nss.isTimeseriesBucketsCollection() ? nss.getTimeseriesViewNamespace() : nss;
+
try {
return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] {
AutoGetDb autoDb(opCtx, collectionName.db(), MODE_IX);
diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
index 0e5f5146adf..a2ca5259363 100644
--- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
@@ -85,14 +86,20 @@ public:
// The collection is not sharded or doesn't exist.
}
+ // If 'ns()' is a sharded time-series view collection, 'targetNs' is a namespace
+ // for time-series buckets collection. For all other collections, 'targetNs' is equal
+ // to 'ns()'.
+ const auto targeter = ChunkManagerTargeter(opCtx, ns());
+ const auto targetNs = targeter.getNS();
+
// Since this operation is not directly writing locally we need to force its db
// profile level increase in order to be logged in "<db>.system.profile"
CurOp::get(opCtx)->raiseDbProfileLevel(
- CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(ns().db()));
+ CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(targetNs.db()));
auto coordinatorDoc = DropCollectionCoordinatorDocument();
coordinatorDoc.setShardingDDLCoordinatorMetadata(
- {{ns(), DDLCoordinatorTypeEnum::kDropCollection}});
+ {{targetNs, DDLCoordinatorTypeEnum::kDropCollection}});
auto service = ShardingDDLCoordinatorService::getService(opCtx);
auto dropCollCoordinator = checked_pointer_cast<DropCollectionCoordinator>(
service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON()));