summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2022-09-13 09:23:07 +0200
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-15 08:12:08 +0000
commitfa920335ed6a41efa417e5c41940cd28a4a36829 (patch)
treeeb7454e4661ce8e3d38c08a12b2609eea926edbb /src/mongo/db/s
parentd8901a2835d3f464d394631d85dc7aa9493fc095 (diff)
downloadmongo-fa920335ed6a41efa417e5c41940cd28a4a36829.tar.gz
SERVER-69523 Only allow METADATA and MUTEX lock acqisitions with OpContext
This makes mutex acquisitions both interruptible and allows the lock manager to be able to inspect the lock acqusitions so far.
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/balancer_stats_registry.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.h1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp24
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp6
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/database_sharding_state.cpp2
-rw-r--r--src/mongo/db/s/persistent_task_queue.h7
-rw-r--r--src/mongo/db/s/persistent_task_queue_test.cpp12
-rw-r--r--src/mongo/db/s/range_deletion_util.h1
-rw-r--r--src/mongo/db/s/resharding/resharding_util.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_util.h3
14 files changed, 39 insertions, 49 deletions
diff --git a/src/mongo/db/s/balancer_stats_registry.cpp b/src/mongo/db/s/balancer_stats_registry.cpp
index c2308b26d2d..bfd789acb52 100644
--- a/src/mongo/db/s/balancer_stats_registry.cpp
+++ b/src/mongo/db/s/balancer_stats_registry.cpp
@@ -27,7 +27,6 @@
* it in the license file.
*/
-
#include "mongo/db/s/balancer_stats_registry.h"
#include "mongo/db/dbdirectclient.h"
@@ -41,7 +40,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace {
@@ -73,7 +71,7 @@ ScopedRangeDeleterLock::ScopedRangeDeleterLock(OperationContext* opCtx, const UU
: _configLock(opCtx, DatabaseName(boost::none, NamespaceString::kConfigDb), MODE_IX),
_rangeDeletionLock(opCtx, NamespaceString::kRangeDeletionNamespace, MODE_IX),
_collectionUuidLock(Lock::ResourceLock(
- opCtx->lockState(),
+ opCtx,
ResourceId(RESOURCE_MUTEX, "RangeDeleterCollLock::" + collectionUuid.toString()),
MODE_X)) {}
diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h
index 47ed17a914e..94db822cf92 100644
--- a/src/mongo/db/s/collection_sharding_runtime.h
+++ b/src/mongo/db/s/collection_sharding_runtime.h
@@ -30,7 +30,6 @@
#pragma once
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/metadata_manager.h"
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index d85ab9fc272..a717b687596 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -498,7 +498,7 @@ Status ShardingCatalogManager::setFeatureCompatibilityVersionOnShards(OperationC
const BSONObj& cmdObj) {
// No shards should be added until we have forwarded featureCompatibilityVersion to all shards.
- Lock::SharedLock lk(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock lk(opCtx, _kShardMembershipLock);
// We do a direct read of the shards collection with local readConcern so no shards are missed,
// but don't go through the ShardRegistry to prevent it from caching data that may be rolled
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index b7ceee4717f..23083a77590 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -625,7 +625,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkSplit(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
@@ -848,7 +848,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunksMerge(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
// 1. Retrieve the initial collection version info to build up the logging info.
auto collVersion = uassertStatusOK(getCollectionVersion(opCtx, nss));
@@ -997,7 +997,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE();
// Must hold the shard lock until the entire commit finishes to serialize with removeShard.
- Lock::SharedLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock shardLock(opCtx, _kShardMembershipLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto shardResult = uassertStatusOK(
@@ -1019,7 +1019,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto findCollResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -1290,7 +1290,7 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations.
- Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const auto coll = [&] {
@@ -1426,7 +1426,7 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto findCollResponse = uassertStatusOK(
@@ -1547,7 +1547,7 @@ void ShardingCatalogManager::ensureChunkVersionIsGreaterThan(OperationContext* o
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
ScopeGuard earlyReturnBeforeDoingWriteGuard([&] {
// Ensure waiting for writeConcern of the data read.
@@ -1748,7 +1748,7 @@ void ShardingCatalogManager::bumpMultipleCollectionVersionsAndChangeMetadataInTx
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(
opCtx,
@@ -1792,7 +1792,7 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx,
// means that a subsequent incremental refresh will not see it. However, it is being
// marked in memory through the call to 'markAsJumbo' above so subsequent balancer
// iterations will not consider it for migration.
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto findCollResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig(
opCtx,
@@ -1857,7 +1857,7 @@ void ShardingCatalogManager::setAllowMigrationsAndBumpOneChunk(
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto cm = uassertStatusOK(
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
@@ -1934,7 +1934,7 @@ void ShardingCatalogManager::setChunkEstimatedSize(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto chunkQuery = BSON(ChunkType::collectionUUID()
<< chunk.getCollectionUUID() << ChunkType::min(chunk.getMin())
@@ -1959,7 +1959,7 @@ void ShardingCatalogManager::setChunkEstimatedSize(OperationContext* opCtx,
bool ShardingCatalogManager::clearChunkEstimatedSize(OperationContext* opCtx, const UUID& uuid) {
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
const auto query = BSON(ChunkType::collectionUUID() << uuid);
const auto update = BSON("$unset" << BSON(ChunkType::estimatedSizeBytes() << ""));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 5624b5dec37..19908f5d068 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -297,8 +297,8 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx,
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock chunkLk(opCtx, opCtx->lockState(), _kChunkOpLock);
- Lock::ExclusiveLock zoneLk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock chunkLk(opCtx, _kChunkOpLock);
+ Lock::ExclusiveLock zoneLk(opCtx, _kZoneOpLock);
struct RefineTimers {
Timer executionTimer;
@@ -546,7 +546,7 @@ void ShardingCatalogManager::configureCollectionBalancing(
{
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(opCtx,
CollectionType::ConfigNS,
@@ -592,7 +592,7 @@ void ShardingCatalogManager::applyLegacyConfigurationToSessionsCollection(Operat
auto updateStmt = BSON("$unset" << BSON(CollectionType::kMaxChunkSizeBytesFieldName << 0));
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and
// migrations
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kChunkOpLock);
withTransaction(opCtx,
CollectionType::ConfigNS,
@@ -641,8 +641,8 @@ void ShardingCatalogManager::renameShardedMetadata(
boost::optional<CollectionType> optFromCollType) {
// Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate
// strictly monotonously increasing collection versions
- Lock::ExclusiveLock chunkLk(opCtx, opCtx->lockState(), _kChunkOpLock);
- Lock::ExclusiveLock zoneLk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock chunkLk(opCtx, _kChunkOpLock);
+ Lock::ExclusiveLock zoneLk(opCtx, _kZoneOpLock);
std::string logMsg = str::stream() << from << " to " << to;
if (optFromCollType) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index c3bb155e702..95328a9b248 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -251,7 +251,7 @@ void ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
const DatabaseVersion& expectedDbVersion,
const ShardId& toShard) {
// Hold the shard lock until the entire commit finishes to serialize with removeShard.
- Lock::SharedLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::SharedLock shardLock(opCtx, _kShardMembershipLock);
const auto updateOp = [&] {
const auto query = [&] {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index 355c0b5520d..bd170413ab5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -585,7 +585,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Only one addShard operation can be in progress at a time.
- Lock::ExclusiveLock lk(opCtx->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock lk(opCtx, _kShardMembershipLock);
// Check if this shard has already been added (can happen in the case of a retry after a network
// error, for example) and thus this addShard request should be considered a no-op.
@@ -804,7 +804,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock shardLock(opCtx->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock shardLock(opCtx, _kShardMembershipLock);
auto findShardResponse = uassertStatusOK(
configShard->exhaustiveFindOnConfig(opCtx,
@@ -955,7 +955,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
}
Lock::SharedLock ShardingCatalogManager::enterStableTopologyRegion(OperationContext* opCtx) {
- return Lock::SharedLock(opCtx->lockState(), _kShardMembershipLock);
+ return Lock::SharedLock(opCtx, _kShardMembershipLock);
}
void ShardingCatalogManager::appendConnectionStats(executor::ConnectionPoolStats* stats) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index e8739c4ec62..049a73d2cf4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -213,7 +213,7 @@ Status checkForTimeseriesTimeFieldKeyRange(const ChunkRange& range, StringData t
Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
@@ -238,7 +238,7 @@ Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx,
Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace);
@@ -316,7 +316,7 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
auto zoneDoc = uassertStatusOK(configServer->exhaustiveFindOnConfig(
opCtx,
@@ -375,7 +375,7 @@ void ShardingCatalogManager::removeKeyRangeFromZone(OperationContext* opCtx,
const ChunkRange& givenRange) {
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx, _kZoneOpLock);
ChunkRange actualRange = givenRange;
KeyPattern keyPattern;
diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp
index da908b6ea38..541df998f01 100644
--- a/src/mongo/db/s/database_sharding_state.cpp
+++ b/src/mongo/db/s/database_sharding_state.cpp
@@ -77,7 +77,7 @@ const ServiceContext::Decoration<DatabaseShardingStateMap> DatabaseShardingState
DatabaseShardingState::ScopedDatabaseShardingState::ScopedDatabaseShardingState(
OperationContext* opCtx, const DatabaseName& dbName, LockMode mode)
- : _lock(nullptr, opCtx->lockState(), ResourceId(RESOURCE_MUTEX, dbName), mode),
+ : _lock(opCtx->lockState(), ResourceId(RESOURCE_MUTEX, dbName), mode),
_dss(DatabaseShardingStateMap::get(opCtx->getServiceContext()).getOrCreate(dbName)) {}
DatabaseShardingState::ScopedDatabaseShardingState::ScopedDatabaseShardingState(
diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h
index 32a1b4210fc..c8d94b4bdf6 100644
--- a/src/mongo/db/s/persistent_task_queue.h
+++ b/src/mongo/db/s/persistent_task_queue.h
@@ -120,8 +120,6 @@ PersistentTaskQueue<T>::PersistentTaskQueue(OperationContext* opCtx, NamespaceSt
template <typename T>
TaskId PersistentTaskQueue<T>::push(OperationContext* opCtx, const T& t) {
- DBDirectClient dbClient(opCtx);
-
TaskId recordId = 0;
BSONObjBuilder builder;
@@ -134,6 +132,7 @@ TaskId PersistentTaskQueue<T>::push(OperationContext* opCtx, const T& t) {
builder.append("_id", recordId);
builder.append("task", t.toBSON());
+ DBDirectClient dbClient(opCtx);
auto response = write_ops::checkWriteErrors(
dbClient.insert(write_ops::InsertCommandRequest(_storageNss, {builder.obj()})));
_count++;
@@ -171,13 +170,12 @@ TaskId PersistentTaskQueue<T>::pop(OperationContext* opCtx) {
template <typename T>
const typename BlockingTaskQueue<T>::Record& PersistentTaskQueue<T>::peek(OperationContext* opCtx) {
- DBDirectClient client(opCtx);
-
Lock::ExclusiveLock lock(opCtx->lockState(), _mutex);
opCtx->waitForConditionOrInterrupt(_cv, lock, [this] { return _count > 0 || _closed; });
uassert(ErrorCodes::Interrupted, "Task queue was closed", !_closed);
+ DBDirectClient client(opCtx);
_currentFront = _loadNextRecord(client);
uassert(ErrorCodes::InternalError, "Task queue is in an invalid state.", _currentFront);
@@ -195,6 +193,7 @@ void PersistentTaskQueue<T>::close(OperationContext* opCtx) {
template <typename T>
size_t PersistentTaskQueue<T>::size(OperationContext* opCtx) const {
Lock::ExclusiveLock lock(opCtx->lockState(), _mutex);
+
return _count;
}
diff --git a/src/mongo/db/s/persistent_task_queue_test.cpp b/src/mongo/db/s/persistent_task_queue_test.cpp
index 17b7a568613..06e9ad5514d 100644
--- a/src/mongo/db/s/persistent_task_queue_test.cpp
+++ b/src/mongo/db/s/persistent_task_queue_test.cpp
@@ -241,8 +241,8 @@ TEST_F(PersistentTaskQueueTest, TestWakeupOnEmptyQueue) {
auto opCtx = operationContext();
PersistentTaskQueue<TestTask> q(opCtx, kNss);
- auto result = stdx::async(stdx::launch::async, [&q] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q] {
+ ThreadClient tc("TestWakeupOnEmptyQueue", getServiceContext());
auto opCtx = tc->makeOperationContext();
stdx::this_thread::sleep_for(stdx::chrono::milliseconds(500));
@@ -261,8 +261,8 @@ TEST_F(PersistentTaskQueueTest, TestInterruptedWhileWaitingOnCV) {
unittest::Barrier barrier(2);
- auto result = stdx::async(stdx::launch::async, [opCtx, &q, &barrier] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] {
+ ThreadClient tc("TestInterruptedWhileWaitingOnCV", getServiceContext());
auto opCtx = tc->makeOperationContext();
barrier.countDownAndWait();
@@ -285,8 +285,8 @@ TEST_F(PersistentTaskQueueTest, TestKilledOperationContextWhileWaitingOnCV) {
unittest::Barrier barrier(2);
- auto result = stdx::async(stdx::launch::async, [opCtx, &q, &barrier] {
- ThreadClient tc("RangeDeletionService", getGlobalServiceContext());
+ auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] {
+ ThreadClient tc("TestKilledOperationContextWhileWaitingOnCV", getServiceContext());
{
stdx::lock_guard<Client> lk(*tc.get());
tc->setSystemOperationKillableByStepdown(lk);
diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h
index d720a0ab83a..dd78e97db6f 100644
--- a/src/mongo/db/s/range_deletion_util.h
+++ b/src/mongo/db/s/range_deletion_util.h
@@ -31,7 +31,6 @@
#include <boost/optional.hpp>
#include <list>
-#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/range_deletion_task_gen.h"
#include "mongo/executor/task_executor.h"
diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp
index f7d20c6d813..5e3721c89f2 100644
--- a/src/mongo/db/s/resharding/resharding_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_util.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/resharding/resharding_util.h"
#include <fmt/format.h>
@@ -61,7 +58,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding
-
namespace mongo {
namespace resharding {
diff --git a/src/mongo/db/s/resharding/resharding_util.h b/src/mongo/db/s/resharding/resharding_util.h
index 7c021858971..1b55c70a755 100644
--- a/src/mongo/db/s/resharding/resharding_util.h
+++ b/src/mongo/db/s/resharding/resharding_util.h
@@ -43,7 +43,6 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/resharding/common_types_gen.h"
-#include "mongo/util/str.h"
namespace mongo {
namespace resharding {
@@ -298,6 +297,7 @@ boost::optional<Milliseconds> estimateRemainingRecipientTime(bool applyingBegan,
int64_t oplogEntriesApplied,
int64_t oplogEntriesFetched,
Milliseconds timeSpentApplying);
+
/**
* Looks up the StateMachine by namespace of the collection being resharded. If it does not exist,
* returns boost::none.
@@ -322,5 +322,4 @@ std::vector<std::shared_ptr<Instance>> getReshardingStateMachines(OperationConte
}
} // namespace resharding
-
} // namespace mongo