summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/curop.cpp60
-rw-r--r--src/mongo/db/curop.h38
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp6
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp53
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp168
5 files changed, 201 insertions, 124 deletions
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 8db8695c99c..47fbb8fce82 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -220,6 +220,10 @@ public:
return retval;
}
+ const OperationContext* opCtx() {
+ return _opCtx;
+ }
+
private:
OperationContext* _opCtx = nullptr;
@@ -409,10 +413,49 @@ void CurOp::setNS_inlock(StringData ns) {
_ns = ns.toString();
}
-void CurOp::ensureStarted() {
- if (_start == 0) {
- _start = _tickSource->getTicks();
+TickSource::Tick CurOp::startTime() {
+ // It is legal for this function to get called multiple times, but all of those calls should be
+ // from the same thread, which should be the thread that "owns" this CurOp object. We define
+ // ownership here in terms of the Client object: each thread is associated with a Client
+ // (accessed by 'Client::getCurrent()'), which should be the same as the Client associated with
+ // this CurOp (by way of the OperationContext). Note that, if this is the "base" CurOp on the
+ // CurOpStack, then we don't yet hava an initialized pointer to the OperationContext, and we
+ // cannot perform this check. That is a rare case, however.
+ invariant(!_stack->opCtx() || Client::getCurrent() == _stack->opCtx()->getClient());
+
+ auto start = _start.load();
+ if (start != 0) {
+ return start;
+ }
+
+ // The '_start' value is initialized to 0 and gets assigned on demand the first time it gets
+ // accessed. The above thread ownership requirement ensures that there will never be concurrent
+ // calls to this '_start' assignment, but we use compare-exchange anyway as an additional check
+ // that writes to '_start' never race.
+ TickSource::Tick unassignedStart = 0;
+ invariant(_start.compare_exchange_strong(unassignedStart, _tickSource->getTicks()));
+ return _start.load();
+}
+
+void CurOp::done() {
+ // As documented in the 'CurOp::startTime()' member function, it is legal for this function to
+ // be called multiple times, but all calls must be in in the thread that "owns" this CurOp
+ // object.
+ invariant(!_stack->opCtx() || Client::getCurrent() == _stack->opCtx()->getClient());
+
+ _end = _tickSource->getTicks();
+}
+
+Microseconds CurOp::computeElapsedTimeTotal(TickSource::Tick startTime,
+ TickSource::Tick endTime) const {
+ invariant(startTime != 0);
+
+ if (!endTime) {
+ // This operation is ongoing.
+ return _tickSource->ticksTo<Microseconds>(_tickSource->getTicks() - startTime);
}
+
+ return _tickSource->ticksTo<Microseconds>(endTime - startTime);
}
void CurOp::enter_inlock(const char* ns, int dbProfileLevel) {
@@ -438,7 +481,7 @@ bool CurOp::completeAndLogOperation(OperationContext* opCtx,
}
// Obtain the total execution time of this operation.
- _end = _tickSource->getTicks();
+ done();
_debug.executionTime = duration_cast<Microseconds>(elapsedTimeExcludingPauses());
const auto executionTimeMillis = durationCount<Milliseconds>(_debug.executionTime);
@@ -609,9 +652,12 @@ BSONObj CurOp::truncateAndSerializeGenericCursor(GenericCursor* cursor,
}
void CurOp::reportState(OperationContext* opCtx, BSONObjBuilder* builder, bool truncateOps) {
- if (_start) {
- builder->append("secs_running", durationCount<Seconds>(elapsedTimeTotal()));
- builder->append("microsecs_running", durationCount<Microseconds>(elapsedTimeTotal()));
+ auto start = _start.load();
+ if (start) {
+ auto end = _end.load();
+ auto elapsedTimeTotal = computeElapsedTimeTotal(start, end);
+ builder->append("secs_running", durationCount<Seconds>(elapsedTimeTotal));
+ builder->append("microsecs_running", durationCount<Microseconds>(elapsedTimeTotal));
}
builder->append("op", logicalOpToString(_logicalOp));
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 0ee19dde2bf..1309d9835a2 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -469,13 +469,13 @@ public:
// negative, if the system time has been reset during the course of this operation.
//
- void ensureStarted();
- bool isStarted() const {
- return _start > 0;
+ void ensureStarted() {
+ static_cast<void>(startTime());
}
- void done() {
- _end = _tickSource->getTicks();
+ bool isStarted() const {
+ return _start.load() != 0;
}
+ void done();
bool isDone() const {
return _end > 0;
}
@@ -571,15 +571,12 @@ public:
* If this op has not yet been started, returns 0.
*/
Microseconds elapsedTimeTotal() {
- if (!isStarted()) {
+ auto start = _start.load();
+ if (start == 0) {
return Microseconds{0};
}
- if (!_end) {
- return _tickSource->ticksTo<Microseconds>(_tickSource->getTicks() - startTime());
- } else {
- return _tickSource->ticksTo<Microseconds>(_end - startTime());
- }
+ return computeElapsedTimeTotal(start, _end.load());
}
/**
@@ -593,11 +590,13 @@ public:
*/
Microseconds elapsedTimeExcludingPauses() {
invariant(!_lastPauseTime);
- if (!isStarted()) {
+
+ auto start = _start.load();
+ if (start == 0) {
return Microseconds{0};
}
- return elapsedTimeTotal() - _totalPausedDuration;
+ return computeElapsedTimeTotal(start, _end.load()) - _totalPausedDuration;
}
/**
@@ -727,10 +726,9 @@ public:
private:
class CurOpStack;
- TickSource::Tick startTime() {
- ensureStarted();
- return _start;
- }
+ TickSource::Tick startTime();
+ Microseconds computeElapsedTimeTotal(TickSource::Tick startTime,
+ TickSource::Tick endTime) const;
static const OperationContext::Decoration<CurOpStack> _curopStack;
@@ -741,10 +739,10 @@ private:
const Command* _command{nullptr};
// The time at which this CurOp instance was marked as started.
- TickSource::Tick _start{0};
+ std::atomic<TickSource::Tick> _start{0}; // NOLINT
- // The time at which this CurOp instance was marked as done.
- TickSource::Tick _end{0};
+ // The time at which this CurOp instance was marked as done or 0 if the CurOp is not yet done.
+ std::atomic<TickSource::Tick> _end{0}; // NOLINT
// The time at which this CurOp instance had its timer paused, or 0 if the timer is not
// currently paused.
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index eb1662fb626..2ef9fc6c4b6 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -528,7 +528,11 @@ TEST_F(RollbackImplTest, RollbackKillsNecessaryOperations) {
// Run rollback in a separate thread so the locking threads can check for interrupt.
Status status(ErrorCodes::InternalError, "Not set");
- stdx::thread rollbackThread([&] { status = _rollback->runRollback(_opCtx.get()); });
+ stdx::thread rollbackThread([&] {
+ ThreadClient tc(getGlobalServiceContext());
+ auto opCtx = tc.get()->makeOperationContext();
+ status = _rollback->runRollback(opCtx.get());
+ });
while (!(writeOpCtx->isKillPending() && readOpCtx->isKillPending())) {
// Do nothing.
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 4150b28dcbf..5607901f2c2 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -134,26 +134,30 @@ TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
ChunkType chunk = setUpChunk(
kNamespace, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
- auto assertRangeOverlapConflictWhenMoveChunk = [this, &chunk](const StringMap<ChunkRange>&
- tagChunkRanges) {
- // Set up two zones whose ranges overlap.
- setUpTags(kNamespace, tagChunkRanges);
-
- auto future = launchAsync([this, &chunk] {
- // Requesting chunks to be relocated requires running commands on each shard to get
- // shard statistics. Set up dummy hosts for the source shards.
- shardTargeterMock(operationContext(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(operationContext(), kShardId1)->setFindHostReturnValue(kShardHost1);
-
- auto migrateInfoStatus =
- _chunkSelectionPolicy.get()->selectSpecificChunkToMove(operationContext(), chunk);
- ASSERT_EQUALS(ErrorCodes::RangeOverlapConflict, migrateInfoStatus.getStatus().code());
- });
-
- expectGetStatsCommands(2);
- future.default_timed_get();
- removeAllTags(kNamespace);
- };
+ auto assertRangeOverlapConflictWhenMoveChunk =
+ [this, &chunk](const StringMap<ChunkRange>& tagChunkRanges) {
+ // Set up two zones whose ranges overlap.
+ setUpTags(kNamespace, tagChunkRanges);
+
+ auto future = launchAsync([this, &chunk] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+
+ // Requesting chunks to be relocated requires running commands on each shard to get
+ // shard statistics. Set up dummy hosts for the source shards.
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1);
+
+ auto migrateInfoStatus =
+ _chunkSelectionPolicy.get()->selectSpecificChunkToMove(opCtx.get(), chunk);
+ ASSERT_EQUALS(ErrorCodes::RangeOverlapConflict,
+ migrateInfoStatus.getStatus().code());
+ });
+
+ expectGetStatsCommands(2);
+ future.default_timed_get();
+ removeAllTags(kNamespace);
+ };
assertRangeOverlapConflictWhenMoveChunk(
{{"A", {kKeyPattern.globalMin(), BSON(kPattern << -10)}},
@@ -193,13 +197,16 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
}
auto future = launchAsync([this] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+
// Requests chunks to be relocated requires running commands on each shard to
// get shard statistics. Set up dummy hosts for the source shards.
- shardTargeterMock(operationContext(), kShardId0)->setFindHostReturnValue(kShardHost0);
- shardTargeterMock(operationContext(), kShardId1)->setFindHostReturnValue(kShardHost1);
+ shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
+ shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1);
auto candidateChunksStatus =
- _chunkSelectionPolicy.get()->selectChunksToMove(operationContext());
+ _chunkSelectionPolicy.get()->selectChunksToMove(opCtx.get());
ASSERT_OK(candidateChunksStatus.getStatus());
// The balancer does not bubble up the IllegalOperation error, but it is expected
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index de223d182b1..08c2dced011 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -384,13 +384,15 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
DatabaseType discoveredDB2(
"TestDB2", ShardId("StandaloneShard"), false, databaseVersion::makeNew());
- operationContext()->setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern);
+ auto expectWriteConcern = ShardingCatalogClient::kMajorityWriteConcern;
- auto future = launchAsync([this, expectedShardName] {
+ auto future = launchAsync([this, expectedShardName, expectWriteConcern] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ opCtx->setWriteConcern(expectWriteConcern);
auto shardName =
- assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
&expectedShardName,
assertGet(ConnectionString::parse("StandaloneHost:12345")),
100));
@@ -416,8 +418,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
expectAddShardCmdReturnSuccess(shardTarget, expectedShardName);
// The shard receives the setFeatureCompatibilityVersion command.
- expectSetFeatureCompatibilityVersion(
- shardTarget, BSON("ok" << 1), operationContext()->getWriteConcern().toBSON());
+ expectSetFeatureCompatibilityVersion(shardTarget, BSON("ok" << 1), expectWriteConcern.toBSON());
// Wait for the addShard to complete before checking the config database
future.timed_get(kLongFutureTimeout);
@@ -471,9 +472,10 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), nullptr, ConnectionString(shardTarget), 100));
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName =
+ assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, ConnectionString(shardTarget), 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -520,9 +522,11 @@ TEST_F(AddShardTest, AddSCCCConnectionStringAsShard) {
targeter->setConnectionStringReturnValue(invalidConn);
auto future = launchAsync([this, invalidConn] {
+ ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
const std::string shardName("StandaloneShard");
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &shardName, invalidConn, 100);
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &shardName, invalidConn, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "Invalid connection string");
});
@@ -536,8 +540,10 @@ TEST_F(AddShardTest, EmptyShardName) {
std::string expectedShardName = "";
auto future = launchAsync([this, expectedShardName] {
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
&expectedShardName,
assertGet(ConnectionString::parse("StandaloneHost:12345")),
100);
@@ -561,10 +567,10 @@ TEST_F(AddShardTest, UnreachableHost) {
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host unreachable");
});
@@ -588,10 +594,10 @@ TEST_F(AddShardTest, AddMongosAsShard) {
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
ASSERT_EQUALS(ErrorCodes::IllegalOperation, status);
});
@@ -615,10 +621,10 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
auto future = launchAsync([this, expectedShardName, shardTarget] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
auto status =
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), &expectedShardName, ConnectionString(shardTarget), 100);
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "use replica set url format");
});
@@ -646,8 +652,9 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host did not return a set name");
});
@@ -674,8 +681,9 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(), "does not match the actual set name");
});
@@ -703,8 +711,9 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"as a shard since it is a config server");
@@ -734,8 +743,9 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(status.getStatus().reason(),
"host2:12345 does not belong to replica set");
@@ -767,8 +777,9 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_EQUALS(status.getStatus().reason(),
"use of shard replica set with name 'config' is not allowed");
@@ -811,8 +822,9 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
auto future = launchAsync([this, expectedShardName, connString] {
ThreadClient tc(getServiceContext());
- auto status = ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), &expectedShardName, connString, 100);
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto status = ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
ASSERT_STRING_CONTAINS(
status.getStatus().reason(),
@@ -857,8 +869,9 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
auto future = launchAsync([this, &expectedShardName, &connString] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), nullptr, connString, 100));
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, connString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -921,8 +934,9 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
auto future = launchAsync([this, &expectedShardName, &seedString] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), nullptr, seedString, 100));
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, seedString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -996,10 +1010,10 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
auto shardName = assertGet(
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), &expectedShardName, ConnectionString(shardTarget), 100));
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &expectedShardName, ConnectionString(shardTarget), 100));
ASSERT_EQUALS(expectedShardName, shardName);
});
@@ -1086,9 +1100,10 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
&differentName,
ConnectionString(shardTarget),
existingShard.getMaxSizeMB()));
@@ -1101,9 +1116,10 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with a different maxSize should fail.
auto future2 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
nullptr,
ConnectionString(shardTarget),
existingShard.getMaxSizeMB() + 100));
@@ -1116,9 +1132,10 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// by calling addShard.
auto future3 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
nullptr,
ConnectionString::forReplicaSet("mySet", {shardTarget}),
existingShard.getMaxSizeMB()));
@@ -1131,8 +1148,9 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with the same options should succeed.
auto future4 = launchAsync([&] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
&existingShardName,
ConnectionString(shardTarget),
existingShard.getMaxSizeMB()));
@@ -1147,8 +1165,9 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// shard name) should succeed.
auto future5 = launchAsync([&] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
nullptr,
ConnectionString(shardTarget),
existingShard.getMaxSizeMB()));
@@ -1190,11 +1209,11 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), &differentName, connString, existingShard.getMaxSizeMB()));
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), &differentName, connString, existingShard.getMaxSizeMB()));
});
future1.timed_get(kLongFutureTimeout);
@@ -1204,11 +1223,11 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same connection string with a different maxSize should fail.
auto future2 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(
- operationContext(), nullptr, connString, existingShard.getMaxSizeMB() + 100));
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, connString, existingShard.getMaxSizeMB() + 100));
});
future2.timed_get(kLongFutureTimeout);
@@ -1222,9 +1241,10 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// addShard.
auto future3 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
nullptr,
ConnectionString(shardTarget),
existingShard.getMaxSizeMB()));
@@ -1240,9 +1260,10 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
std::string differentSetName = "differentSet";
auto future4 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(),
nullptr,
ConnectionString::forReplicaSet(differentSetName,
connString.getServers()),
@@ -1256,11 +1277,11 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same host with the same options should succeed.
auto future5 = launchAsync([&] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
- &existingShardName,
- connString,
- existingShard.getMaxSizeMB()));
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(
+ opCtx.get(), &existingShardName, connString, existingShard.getMaxSizeMB()));
ASSERT_EQUALS(existingShardName, shardName);
});
future5.timed_get(kLongFutureTimeout);
@@ -1269,9 +1290,10 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// should succeed.
auto future6 = launchAsync([&] {
ThreadClient tc(getServiceContext());
+ auto opCtx = Client::getCurrent()->makeOperationContext();
auto shardName = assertGet(
- ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(), nullptr, connString, existingShard.getMaxSizeMB()));
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(opCtx.get(), nullptr, connString, existingShard.getMaxSizeMB()));
ASSERT_EQUALS(existingShardName, shardName);
});
future6.timed_get(kLongFutureTimeout);
@@ -1293,11 +1315,11 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
}
auto future7 = launchAsync([&] {
ThreadClient tc(getServiceContext());
- auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
- ->addShard(operationContext(),
- nullptr,
- otherHostConnString,
- existingShard.getMaxSizeMB()));
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ auto shardName = assertGet(
+ ShardingCatalogManager::get(opCtx.get())
+ ->addShard(
+ opCtx.get(), nullptr, otherHostConnString, existingShard.getMaxSizeMB()));
ASSERT_EQUALS(existingShardName, shardName);
});
future7.timed_get(kLongFutureTimeout);