summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2022-09-29 14:59:16 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-29 15:47:53 +0000
commit540cff5317a7520ee4f8f288d37707094df3757c (patch)
tree4223c874ca34ce6197917365de706f953a1fd7df
parentaf8869a6b24a42d84aa67357ada3840e9508303c (diff)
downloadmongo-540cff5317a7520ee4f8f288d37707094df3757c.tar.gz
SERVER-70104 Avoid unnecessary atomicity of range deleter service state
-rw-r--r--src/mongo/db/s/range_deleter_service.cpp16
-rw-r--r--src/mongo/db/s/range_deleter_service.h5
2 files changed, 10 insertions, 11 deletions
diff --git a/src/mongo/db/s/range_deleter_service.cpp b/src/mongo/db/s/range_deleter_service.cpp
index 170e32ec944..1137f464b6a 100644
--- a/src/mongo/db/s/range_deleter_service.cpp
+++ b/src/mongo/db/s/range_deleter_service.cpp
@@ -248,9 +248,9 @@ void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long te
}
auto lock = _acquireMutexUnconditionally();
- dassert(_state.load() == kDown, "Service expected to be down before stepping up");
+ dassert(_state == kDown, "Service expected to be down before stepping up");
- _state.store(kInitializing);
+ _state = kInitializing;
if (_executor) {
// Join previously shutted down executor before reinstantiating it
@@ -279,7 +279,7 @@ void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long te
void RangeDeleterService::_recoverRangeDeletionsOnStepUp(OperationContext* opCtx) {
if (disableResumableRangeDeleter.load()) {
- _state.store(kDown);
+ _state = kDown;
return;
}
@@ -358,8 +358,8 @@ void RangeDeleterService::_recoverRangeDeletionsOnStepUp(OperationContext* opCtx
auto lock = _acquireMutexUnconditionally();
// Since the recovery is only spawned on step-up but may complete later, it's not
// assumable that the node is still primary when the all resubmissions finish
- if (_state.load() != kDown) {
- this->_state.store(kUp);
+ if (_state != kDown) {
+ this->_state = kUp;
}
})
.semi();
@@ -390,7 +390,7 @@ void RangeDeleterService::_stopService(bool joinExecutor) {
// Clear range deletion tasks map in order to notify potential waiters on completion futures
_rangeDeletionTasks.clear();
- _state.store(kDown);
+ _state = kDown;
}
void RangeDeleterService::onStepDown() {
@@ -477,9 +477,9 @@ SharedSemiFuture<void> RangeDeleterService::registerTask(
.then([this, rdt = rdt]() {
// Step 3: schedule the actual range deletion task
auto lock = _acquireMutexUnconditionally();
- invariant(_readyRangeDeletionsProcessorPtr || _state.load() == kDown,
+ invariant(_readyRangeDeletionsProcessorPtr || _state == kDown,
"The range deletions processor must be instantiated if the state != kDown");
- if (_state.load() != kDown) {
+ if (_state != kDown) {
_readyRangeDeletionsProcessorPtr->emplaceRangeDeletion(rdt);
}
});
diff --git a/src/mongo/db/s/range_deleter_service.h b/src/mongo/db/s/range_deleter_service.h
index 2ac88ffc07e..2b8293805e8 100644
--- a/src/mongo/db/s/range_deleter_service.h
+++ b/src/mongo/db/s/range_deleter_service.h
@@ -175,7 +175,7 @@ private:
enum State { kInitializing, kUp, kDown };
- AtomicWord<State> _state{kDown};
+ State _state{kDown};
// Future markes as ready when the state changes to "up"
SemiFuture<void> _stepUpCompletedFuture;
@@ -183,8 +183,7 @@ private:
/* Acquire mutex only if service is up (for "user" operation) */
[[nodiscard]] stdx::unique_lock<Latch> _acquireMutexFailIfServiceNotUp() {
stdx::unique_lock<Latch> lg(_mutex_DO_NOT_USE_DIRECTLY);
- uassert(
- ErrorCodes::NotYetInitialized, "Range deleter service not up", _state.load() == kUp);
+ uassert(ErrorCodes::NotYetInitialized, "Range deleter service not up", _state == kUp);
return lg;
}