summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-08-21 10:50:04 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-02-27 10:51:15 -0500
commitb049257fbd1d215388cffaf7544f6741dbce5b45 (patch)
tree90e385d3c5af33c64f2dc8b590447c606c58f611 /src/mongo
parent6df5463e57c068a653f27fa44daaa619453d602f (diff)
downloadmongo-b049257fbd1d215388cffaf7544f6741dbce5b45.tar.gz
SERVER-39495 Move ShardingState::needCollectionMetadata under OperationShardingState
ShardingState logically contains answers to questions about whether the current instance is node in a sharded cluster, whereas OperationShardingState is responsible for the 'shardedness' of the commands.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp28
-rw-r--r--src/mongo/db/catalog/create_collection.cpp3
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp6
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp27
-rw-r--r--src/mongo/db/commands/find_cmd.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp9
-rw-r--r--src/mongo/db/query/find.cpp4
-rw-r--r--src/mongo/db/query/get_executor.cpp15
-rw-r--r--src/mongo/db/query/get_executor.h2
-rw-r--r--src/mongo/db/query/query_planner_params.h9
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp8
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp11
-rw-r--r--src/mongo/db/s/operation_sharding_state.h7
-rw-r--r--src/mongo/db/s/sharding_state.cpp15
-rw-r--r--src/mongo/db/s/sharding_state.h6
-rw-r--r--src/mongo/s/commands/commands_public.cpp3
16 files changed, 78 insertions, 81 deletions
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 9eb7017130a..5c7b0fe4a60 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -53,7 +53,9 @@
#include "mongo/db/service_context.h"
#include "mongo/util/scopeguard.h"
-mongo::Status mongo::emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
+namespace mongo {
+
+Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
@@ -101,20 +103,20 @@ mongo::Status mongo::emptyCapped(OperationContext* opCtx, const NamespaceString&
return status;
}
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(
- opCtx, collection->ns(), collection->uuid());
+ const auto service = opCtx->getServiceContext();
+ service->getOpObserver()->onEmptyCapped(opCtx, collection->ns(), collection->uuid());
wuow.commit();
return Status::OK();
}
-void mongo::cloneCollectionAsCapped(OperationContext* opCtx,
- Database* db,
- const std::string& shortFrom,
- const std::string& shortTo,
- long long size,
- bool temp) {
+void cloneCollectionAsCapped(OperationContext* opCtx,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ long long size,
+ bool temp) {
NamespaceString fromNss(db->name(), shortFrom);
NamespaceString toNss(db->name(), shortTo);
@@ -235,9 +237,9 @@ void mongo::cloneCollectionAsCapped(OperationContext* opCtx,
MONGO_UNREACHABLE;
}
-void mongo::convertToCapped(OperationContext* opCtx,
- const NamespaceString& collectionName,
- long long size) {
+void convertToCapped(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ long long size) {
StringData dbname = collectionName.db();
StringData shortSource = collectionName.coll();
@@ -276,3 +278,5 @@ void mongo::convertToCapped(OperationContext* opCtx,
options.stayTemp = false;
uassertStatusOK(renameCollection(opCtx, longTmpName, collectionName, options));
}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index 1ca0f641f05..44605b2bea7 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -50,6 +50,7 @@
namespace mongo {
namespace {
+
/**
* Shared part of the implementation of the createCollection versions for replicated and regular
* collection creation.
@@ -98,8 +99,10 @@ Status createCollection(OperationContext* opCtx,
return writeConflictRetry(opCtx, "create", nss.ns(), [&] {
Lock::DBLock dbXLock(opCtx, nss.db(), MODE_X);
+
const bool shardVersionCheck = true;
OldClientContext ctx(opCtx, nss.ns(), shardVersionCheck);
+
if (opCtx->writesAreReplicated() &&
!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) {
return Status(ErrorCodes::NotMaster,
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 68550a06b4e..b96df4144c7 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -59,9 +59,10 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
-MONGO_FAIL_POINT_DEFINE(writeConfilctInRenameCollCopyToTmp);
namespace {
+MONGO_FAIL_POINT_DEFINE(writeConfilctInRenameCollCopyToTmp);
+
NamespaceString getNamespaceFromUUID(OperationContext* opCtx, const UUID& uuid) {
Collection* source = UUIDCatalog::get(opCtx).lookupCollectionByUUID(uuid);
return source ? source->ns() : NamespaceString();
@@ -141,7 +142,8 @@ Status renameCollectionCommon(OperationContext* opCtx,
// We stay in source context the whole time. This is mostly to set the CurOp namespace.
boost::optional<OldClientContext> ctx;
- ctx.emplace(opCtx, source.ns());
+ const bool shardVersionCheck = true;
+ ctx.emplace(opCtx, source.ns(), shardVersionCheck);
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
bool userInitiatedWritesAndNotPrimary =
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 52e7bbfe376..a57b5d66c88 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -43,10 +43,7 @@
#include "mongo/db/service_context.h"
namespace mongo {
-
-using std::unique_ptr;
-using std::string;
-using std::stringstream;
+namespace {
class CmdCloneCollectionAsCapped : public ErrmsgCommandDeprecated {
public:
@@ -84,9 +81,9 @@ public:
out->push_back(Privilege(ResourcePattern::forExactNamespace(nss), targetActions));
}
bool errmsgRun(OperationContext* opCtx,
- const string& dbname,
+ const std::string& dbname,
const BSONObj& jsobj,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
const auto fromElt = jsobj["cloneCollectionAsCapped"];
const auto toElt = jsobj["toCollection"];
@@ -135,13 +132,13 @@ public:
cloneCollectionAsCapped(opCtx, db, from.toString(), to.toString(), size, temp);
return true;
}
+
} cmdCloneCollectionAsCapped;
-/* jan2010:
- Converts the given collection to a capped collection w/ the specified size.
- This command is not highly used, and is not currently supported with sharded
- environments.
- */
+/**
+ * Converts the given collection to a capped collection w/ the specified size. This command is not
+ * highly used, and is not currently supported with sharded environments.
+ */
class CmdConvertToCapped : public ErrmsgCommandDeprecated {
public:
CmdConvertToCapped() : ErrmsgCommandDeprecated("convertToCapped") {}
@@ -163,9 +160,9 @@ public:
}
bool errmsgRun(OperationContext* opCtx,
- const string& dbname,
+ const std::string& dbname,
const BSONObj& jsobj,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, jsobj));
long long size = jsobj.getField("size").safeNumberLong();
@@ -180,4 +177,6 @@ public:
}
} cmdConvertToCapped;
-}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 4ea78429b54..b0bdcba8d65 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -212,8 +212,8 @@ public:
// execution tree with an EOFStage.
Collection* const collection = ctx->getCollection();
- // We have a parsed query. Time to get the execution plan for it.
- auto exec = uassertStatusOK(getExecutorFind(opCtx, collection, nss, std::move(cq)));
+ // Get the execution plan for the query.
+ auto exec = uassertStatusOK(getExecutorFind(opCtx, collection, std::move(cq)));
auto bodyBuilder = result->getBodyBuilder();
// Got the execution tree. Explain it.
@@ -395,7 +395,7 @@ public:
}
// Get the execution plan for the query.
- auto exec = uassertStatusOK(getExecutorFind(opCtx, collection, nss, std::move(cq)));
+ auto exec = uassertStatusOK(getExecutorFind(opCtx, collection, std::move(cq)));
{
stdx::lock_guard<Client> lk(*opCtx->getClient());
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 49f6475bd19..72d556508ea 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -70,7 +70,7 @@
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/s/collection_sharding_state.h"
-#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/storage/record_store.h"
@@ -124,9 +124,10 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
std::unique_ptr<PlanStage> root = std::make_unique<MultiIteratorStage>(opCtx, ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
- // Determine whether this collection is sharded. If so, retrieve its sharding metadata.
+ // If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
+ // collection, otherwise treat it as unsharded
boost::optional<ScopedCollectionMetadata> shardMetadata =
- (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, coll->ns().ns())
+ (OperationShardingState::isOperationVersioned(opCtx)
? CollectionShardingState::get(opCtx, coll->ns())->getMetadataForOperation(opCtx)
: boost::optional<ScopedCollectionMetadata>{});
@@ -235,7 +236,7 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
}
}
- return getExecutorFind(opCtx, collection, nss, std::move(cq.getValue()), plannerOpts);
+ return getExecutorFind(opCtx, collection, std::move(cq.getValue()), plannerOpts);
}
BSONObj removeSortKeyMetaProjection(BSONObj projectionObj) {
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 1a92ace2aa2..0f3add4eb20 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -607,8 +607,8 @@ std::string runQuery(OperationContext* opCtx,
repl::ReplicationCoordinator::get(opCtx)->checkCanServeReadsFor(opCtx, nss, slaveOK));
}
- // We have a parsed query. Time to get the execution plan for it.
- auto exec = uassertStatusOK(getExecutorLegacyFind(opCtx, collection, nss, std::move(cq)));
+ // Get the execution plan for the query.
+ auto exec = uassertStatusOK(getExecutorLegacyFind(opCtx, collection, std::move(cq)));
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 69500ef939e..12f62aea456 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -77,9 +77,8 @@
#include "mongo/db/query/stage_builder.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
-#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
@@ -705,7 +704,6 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getOplogStartHack(
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFind(
OperationContext* opCtx,
Collection* collection,
- const NamespaceString& nss,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
size_t plannerOptions) {
@@ -713,7 +711,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFind(
return getOplogStartHack(opCtx, collection, std::move(canonicalQuery), plannerOptions);
}
- if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, nss.ns())) {
+ if (OperationShardingState::isOperationVersioned(opCtx)) {
plannerOptions |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
return getExecutor(opCtx, collection, std::move(canonicalQuery), yieldPolicy, plannerOptions);
@@ -724,25 +722,22 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFind(
StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
OperationContext* opCtx,
Collection* collection,
- const NamespaceString& nss,
unique_ptr<CanonicalQuery> canonicalQuery,
size_t plannerOptions) {
- auto readConcernArgs = repl::ReadConcernArgs::get(opCtx);
+ const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
auto yieldPolicy = readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern
? PlanExecutor::INTERRUPT_ONLY
: PlanExecutor::YIELD_AUTO;
return _getExecutorFind(
- opCtx, collection, nss, std::move(canonicalQuery), yieldPolicy, plannerOptions);
+ opCtx, collection, std::move(canonicalQuery), yieldPolicy, plannerOptions);
}
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorLegacyFind(
OperationContext* opCtx,
Collection* collection,
- const NamespaceString& nss,
std::unique_ptr<CanonicalQuery> canonicalQuery) {
return _getExecutorFind(opCtx,
collection,
- nss,
std::move(canonicalQuery),
PlanExecutor::YIELD_AUTO,
QueryPlannerParams::DEFAULT);
@@ -1321,7 +1316,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount(
}
size_t plannerOptions = QueryPlannerParams::IS_COUNT;
- if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, request.getNs().ns())) {
+ if (OperationShardingState::isOperationVersioned(opCtx)) {
plannerOptions |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 97fd2df0c1b..4eecc309d99 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -119,7 +119,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind(
OperationContext* opCtx,
Collection* collection,
- const NamespaceString& nss,
std::unique_ptr<CanonicalQuery> canonicalQuery,
size_t plannerOptions = QueryPlannerParams::DEFAULT);
@@ -129,7 +128,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorFind
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorLegacyFind(
OperationContext* opCtx,
Collection* collection,
- const NamespaceString& nss,
std::unique_ptr<CanonicalQuery> canonicalQuery);
/**
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index 2ef20c6488a..9229dbb06ce 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -58,10 +58,11 @@ struct QueryPlannerParams {
// Set this if you're running on a sharded cluster. We'll add a "drop all docs that
// shouldn't be on this shard" stage before projection.
//
- // In order to set this, you must check
- // ShardingState::needCollectionMetadata(current_namespace) in the same lock that you use to
- // build the query executor. You must also wrap the PlanExecutor in a ClientCursor within
- // the same lock. See the comment on ShardFilterStage for details.
+ // In order to set this, you must check OperationShardingState::isOperationVersioned() in
+ // the same lock that you use to build the query executor. You must also wrap the
+ // PlanExecutor in a ClientCursor within the same lock.
+ //
+ // See the comment on ShardFilterStage for details.
INCLUDE_SHARD_FILTER = 1 << 2,
// Set this if you don't want any plans with a blocking sort stage. All sorts must be
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index b959633557a..28eab0d23bb 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -79,13 +79,7 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
{
AutoGetCollection autoColl(opCtx, ns, MODE_IX);
auto* const css = CollectionShardingRuntime::get(opCtx, ns);
- const auto optMetadata = css->getCurrentMetadataIfKnown();
- uassert(ErrorCodes::ConflictingOperationInProgress,
- str::stream() << "Unable to establish sharding status for collection " << ns.ns(),
- optMetadata);
-
- const auto& metadata = *optMetadata;
-
+ const auto metadata = css->getCurrentMetadata();
if (!metadata->isSharded()) {
LOG(0) << "skipping orphaned data cleanup for " << ns.ns()
<< ", collection is not sharded";
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index 49ed69f6107..3284b5e825c 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/s/sharded_connection_info.h"
namespace mongo {
-
namespace {
const OperationContext::Decoration<OperationShardingState> shardingMetadataDecoration =
@@ -48,6 +48,7 @@ const Milliseconds kMaxWaitForMovePrimaryCriticalSection = Minutes(5);
// The name of the field in which the client attaches its database version.
constexpr auto kDbVersionField = "databaseVersion"_sd;
+
} // namespace
OperationShardingState::OperationShardingState() = default;
@@ -60,6 +61,14 @@ OperationShardingState& OperationShardingState::get(OperationContext* opCtx) {
return shardingMetadataDecoration(opCtx);
}
+bool OperationShardingState::isOperationVersioned(OperationContext* opCtx) {
+ const auto client = opCtx->getClient();
+
+ // Shard version information received from mongos may either be attached to the Client or
+ // directly to the OperationContext
+ return ShardedConnectionInfo::get(client, false) || get(opCtx).hasShardVersion();
+}
+
void OperationShardingState::setAllowImplicitCollectionCreation(
const BSONElement& allowImplicitCollectionCreationElem) {
if (!allowImplicitCollectionCreationElem.eoo()) {
diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h
index 03c71c86fba..4358d6c9a21 100644
--- a/src/mongo/db/s/operation_sharding_state.h
+++ b/src/mongo/db/s/operation_sharding_state.h
@@ -63,6 +63,13 @@ public:
static OperationShardingState& get(OperationContext* opCtx);
/**
+ * Returns true if the the current operation was sent by the caller with shard version
+ * information attached, meaning that it must perform shard version checking and orphan
+ * filtering.
+ */
+ static bool isOperationVersioned(OperationContext* opCtx);
+
+ /**
* Requests on a sharded collection that are broadcast without a shardVersion should not cause
* the collection to be created on a shard that does not know about the collection already,
* since the collection options will not be propagated. Such requests specify to disallow
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 0148c843dac..b9c7e634a53 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -33,8 +33,7 @@
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/db/s/sharded_connection_info.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/server_options.h"
#include "mongo/util/log.h"
@@ -116,18 +115,6 @@ OID ShardingState::clusterId() {
return _clusterId;
}
-bool ShardingState::needCollectionMetadata(OperationContext* opCtx, const std::string& ns) {
- if (!enabled())
- return false;
-
- Client* client = opCtx->getClient();
-
- // Shard version information received from mongos may either by attached to the Client or
- // directly to the OperationContext.
- return ShardedConnectionInfo::get(client, false) ||
- OperationShardingState::get(opCtx).hasShardVersion();
-}
-
void ShardingState::clearForTests() {
_initializationState.store(static_cast<uint32_t>(InitializationState::kNew));
}
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index ca71f376e2f..1755d33293e 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -105,12 +105,6 @@ public:
OID clusterId();
/**
- * Returns true if this node is a shard and if the currently runnint operation must engage the
- * sharding subsystem (i.e., perform version checking, orphan filtering, etc).
- */
- bool needCollectionMetadata(OperationContext* opCtx, const std::string& ns);
-
- /**
* For testing only. This is a workaround for the fact that it is not possible to get a clean
* ServiceContext in between test executions. Because of this, tests which require that they get
* started with a clean (uninitialized) ShardingState must invoke this in their tearDown method.
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 2e4dfba1970..1f89e4e3f59 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -265,6 +265,9 @@ public:
const NamespaceString nss(parseNs(dbName, cmdObj));
const auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
+ uassert(ErrorCodes::IllegalOperation,
+ "You can't convertToCapped a sharded collection",
+ !routingInfo.cm());
// convertToCapped creates a temp collection and renames it at the end. It will require
// special handling for create collection.