summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2016-09-15 19:17:01 -0400
committerDianna Hohensee <dianna.hohensee@10gen.com>2016-09-15 19:17:01 -0400
commitc17b1c84bc6c18a47e4215d6b6238ae12d6854fc (patch)
tree685bef51dd4376b7a0ec422f20518f6d28cf16ad
parentf8212b6b37bea1bead354df86e8485761a519339 (diff)
downloadmongo-c17b1c84bc6c18a47e4215d6b6238ae12d6854fc.tar.gz
Revert "SERVER-25905 Release all config held distlocks and reacquire balancer distlocks in drain mode on config step up to primary"
This reverts commit f8212b6b37bea1bead354df86e8485761a519339.
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/s/balancer/balancer.cpp10
-rw-r--r--src/mongo/s/balancer/migration_manager.cpp83
-rw-r--r--src/mongo/s/balancer/migration_manager.h38
-rw-r--r--src/mongo/s/balancer/migration_manager_test.cpp30
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.h3
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.h3
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp3
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager.h3
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp8
11 files changed, 79 insertions, 108 deletions
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index f1e0e16e399..15484b6729f 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -694,10 +694,6 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
<< causedBy(shardAwareInitializationStatus);
}
- // Free any leftover locks from previous instantiations.
- auto distLockManager = Grid::get(txn)->catalogClient(txn)->getDistLockManager();
- distLockManager->unlockAll(txn, distLockManager->getProcessID());
-
// If this is a config server node becoming a primary, start the balancer
Balancer::get(txn)->startThread(txn);
} else if (ShardingState::get(txn)->enabled()) {
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index c3a77dee494..f2a4c52146c 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -182,7 +182,7 @@ Status Balancer::startThread(OperationContext* txn) {
invariant(!_thread.joinable());
_state = kRunning;
- _migrationManager.startRecoveryAndAcquireDistLocks(txn);
+ _migrationManager.startRecovery();
_thread = stdx::thread([this] { _mainThread(); });
// Intentional fall through
case kRunning:
@@ -291,6 +291,7 @@ void Balancer::_mainThread() {
const Seconds kInitBackoffInterval(10);
+ OID clusterIdentity = ClusterIdentityLoader::get(txn.get())->getClusterId();
// Take the balancer distributed lock and hold it permanently. Do the attempts with single
// attempts in order to not block the thread and be able to check for interrupt more frequently.
@@ -300,7 +301,7 @@ void Balancer::_mainThread() {
txn.get(),
"balancer",
"CSRS Balancer",
- OID::gen(),
+ clusterIdentity,
DistLockManager::kSingleLockAttemptTimeout);
if (!distLockHandleStatus.isOK()) {
warning() << "Balancer distributed lock could not be acquired and will be retried in "
@@ -315,15 +316,16 @@ void Balancer::_mainThread() {
}
if (!_stopRequested()) {
- log() << "CSRS balancer thread is recovering";
+ log() << "CSRS balancer thread for cluster " << clusterIdentity << " is recovering";
auto balancerConfig = Grid::get(txn.get())->getBalancerConfiguration();
_migrationManager.finishRecovery(txn.get(),
+ clusterIdentity,
balancerConfig->getMaxChunkSizeBytes(),
balancerConfig->getSecondaryThrottle(),
balancerConfig->waitForDelete());
- log() << "CSRS balancer thread is recovered";
+ log() << "CSRS balancer thread for cluster " << clusterIdentity << " is recovered";
}
// Main balancer loop
diff --git a/src/mongo/s/balancer/migration_manager.cpp b/src/mongo/s/balancer/migration_manager.cpp
index 4bd9ae0950a..aa5beb38729 100644
--- a/src/mongo/s/balancer/migration_manager.cpp
+++ b/src/mongo/s/balancer/migration_manager.cpp
@@ -104,13 +104,13 @@ Status extractMigrationStatusFromRemoteCommandResponse(const RemoteCommandRespon
* Blocking call to acquire the distributed collection lock for the specified namespace.
*/
StatusWith<DistLockHandle> acquireDistLock(OperationContext* txn,
- const OID& lockSessionID,
+ const OID& clusterIdentity,
const NamespaceString& nss) {
const std::string whyMessage(stream() << "Migrating chunk(s) in collection " << nss.ns());
auto statusWithDistLockHandle =
Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lockWithSessionID(
- txn, nss.ns(), whyMessage, lockSessionID, DistLockManager::kSingleLockAttemptTimeout);
+ txn, nss.ns(), whyMessage, clusterIdentity, DistLockManager::kSingleLockAttemptTimeout);
if (!statusWithDistLockHandle.isOK()) {
// If we get LockBusy while trying to acquire the collection distributed lock, this implies
@@ -135,7 +135,7 @@ StatusWith<DistLockHandle> acquireDistLock(OperationContext* txn,
} // namespace
MigrationManager::MigrationManager(ServiceContext* serviceContext)
- : _serviceContext(serviceContext), _lockSessionID(OID::gen()) {}
+ : _serviceContext(serviceContext) {}
MigrationManager::~MigrationManager() {
// The migration manager must be completely quiesced at destruction time
@@ -283,20 +283,29 @@ Status MigrationManager::executeManualMigration(
return status;
}
-void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
+void MigrationManager::startRecovery() {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ invariant(_state == State::kStopped);
+ _state = State::kRecovering;
+}
+
+void MigrationManager::finishRecovery(OperationContext* txn,
+ const OID& clusterIdentity,
+ uint64_t maxChunkSizeBytes,
+ const MigrationSecondaryThrottleOptions& secondaryThrottle,
+ bool waitForDelete) {
{
stdx::lock_guard<stdx::mutex> lock(_mutex);
- invariant(_state == State::kStopped);
- invariant(_migrationRecoveryMap.empty());
- _state = State::kRecovering;
+ invariant(_state == State::kRecovering);
+ if (!_clusterIdentity.isSet()) {
+ _clusterIdentity = clusterIdentity;
+ }
+ invariant(_clusterIdentity == clusterIdentity);
}
- auto scopedGuard = MakeGuard([&] {
- _migrationRecoveryMap.clear();
- _abandonActiveMigrationsAndEnableManager(txn);
- });
-
// Load the active migrations from the config.migrations collection.
+ vector<MigrateInfo> migrateInfos;
+
auto statusWithMigrationsQueryResponse =
Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
txn,
@@ -309,8 +318,10 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
if (!statusWithMigrationsQueryResponse.isOK()) {
warning() << "Unable to read config.migrations collection documents for balancer migration"
- << " recovery. Abandoning balancer recovery."
+ << " recovery. Abandoning recovery."
<< causedBy(redact(statusWithMigrationsQueryResponse.getStatus()));
+
+ _abandonActiveMigrationsAndEnableManager(txn);
return;
}
@@ -320,9 +331,11 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
// The format of this migration document is incorrect. The balancer holds a distlock for
// this migration, but without parsing the migration document we cannot identify which
// distlock must be released. So we must release all distlocks.
- warning() << "Unable to parse config.migrations document '" << migration
- << "' for balancer migration recovery. Abandoning balancer recovery."
+ warning() << "Unable to parse config.migrations collection documents for balancer"
+ << " migration recovery. Abandoning recovery."
<< causedBy(redact(statusWithMigrationType.getStatus()));
+
+ _abandonActiveMigrationsAndEnableManager(txn);
return;
}
MigrateInfo migrateInfo = statusWithMigrationType.getValue().toMigrateInfo();
@@ -332,47 +345,11 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
std::list<MigrateInfo> list;
it = _migrationRecoveryMap.insert(std::make_pair(NamespaceString(migrateInfo.ns), list))
.first;
-
- // Reacquire the matching distributed lock for this namespace.
- const std::string whyMessage(stream() << "Migrating chunk(s) in collection "
- << migrateInfo.ns);
- auto statusWithDistLockHandle =
- Grid::get(txn)
- ->catalogClient(txn)
- ->getDistLockManager()
- ->tryLockWithLocalWriteConcern(txn, migrateInfo.ns, whyMessage, _lockSessionID);
- if (!statusWithDistLockHandle.isOK() &&
- statusWithDistLockHandle.getStatus() != ErrorCodes::LockStateChangeFailed) {
- // LockStateChangeFailed is alright because that should mean a 3.2 shard has it for
- // the active migration.
- warning() << "Failed to acquire distributed lock for collection '" << migrateInfo.ns
- << "' during balancer recovery of an active migration. Abandoning"
- << " balancer recovery."
- << causedBy(redact(statusWithDistLockHandle.getStatus()));
- return;
- }
}
it->second.push_back(std::move(migrateInfo));
}
- scopedGuard.Dismiss();
-}
-
-void MigrationManager::finishRecovery(OperationContext* txn,
- uint64_t maxChunkSizeBytes,
- const MigrationSecondaryThrottleOptions& secondaryThrottle,
- bool waitForDelete) {
- {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- // Check if recovery was abandoned in startRecovery, in which case there is no more to do.
- if (_state == State::kEnabled) {
- invariant(_migrationRecoveryMap.empty());
- return;
- }
- invariant(_state == State::kRecovering);
- }
-
// Schedule recovered migrations.
vector<ScopedMigrationRequest> scopedMigrationRequests;
vector<shared_ptr<Notification<Status>>> responses;
@@ -409,7 +386,7 @@ void MigrationManager::finishRecovery(OperationContext* txn,
txn, NamespaceString(itMigrateInfo->ns), itMigrateInfo->minKey);
if (nssAndMigrateInfos.second.size() == 1) {
Grid::get(txn)->catalogClient(txn)->getDistLockManager()->unlock(
- txn, _lockSessionID, itMigrateInfo->ns);
+ txn, _clusterIdentity, itMigrateInfo->ns);
}
itMigrateInfo = nssAndMigrateInfos.second.erase(itMigrateInfo);
} else {
@@ -591,7 +568,7 @@ void MigrationManager::_scheduleWithDistLock_inlock(OperationContext* txn,
auto it = _activeMigrationsWithDistLock.find(nss);
if (it == _activeMigrationsWithDistLock.end()) {
// Acquire the collection distributed lock (blocking call)
- auto distLockHandleStatus = acquireDistLock(txn, _lockSessionID, nss);
+ auto distLockHandleStatus = acquireDistLock(txn, _clusterIdentity, nss);
if (!distLockHandleStatus.isOK()) {
migration.completionNotification->set(distLockHandleStatus.getStatus());
return;
diff --git a/src/mongo/s/balancer/migration_manager.h b/src/mongo/s/balancer/migration_manager.h
index 2c6d716b862..456452b1efd 100644
--- a/src/mongo/s/balancer/migration_manager.h
+++ b/src/mongo/s/balancer/migration_manager.h
@@ -100,24 +100,18 @@ public:
/**
* Non-blocking method that puts the migration manager in the kRecovering state, in which
- * new migration requests will block until finishRecovery is called. Then does local writes to
- * reacquire the distributed locks for active migrations.
- *
- * The active migration recovery may fail and be abandoned, setting the state to kEnabled.
+ * new migration requests will block until finishRecovery is called.
*/
- void startRecoveryAndAcquireDistLocks(OperationContext* txn);
+ void startRecovery();
/**
* Blocking method that must only be called after startRecovery has been called. Recovers the
* state of the migration manager (if necessary and able) and puts it in the kEnabled state,
* where it will accept new migrations. Any migrations waiting on the recovery state will be
- * unblocked once the state is kEnabled, and then this function waits for the recovered active
- * migrations to finish before returning.
- *
- * The active migration recovery may fail and be abandoned, setting the state to kEnabled and
- * unblocking any process waiting on the recovery state.
+ * unblocked.
*/
void finishRecovery(OperationContext* txn,
+ const OID& clusterIdentity,
uint64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete);
@@ -253,25 +247,16 @@ private:
void _waitForRecovery();
/**
- * Should only be called from startRecovery or finishRecovery functions when the migration
- * manager is in either the kStopped or kRecovering state. Releases all the distributed locks
- * that the balancer holds, clears the config.migrations collection, changes the state of the
- * migration manager to kEnabled. Then unblocks all processes waiting for kEnabled state.
+ * Should only be called from within the finishRecovery function because the migration manager
+ * must be in the kRecovering state. Releases all the distributed locks that the balancer holds,
+ * clears the config.migrations collection, changes the state of the migration manager from
+ * kRecovering to kEnabled, and unblocks all processes waiting on the recovery state.
*/
void _abandonActiveMigrationsAndEnableManager(OperationContext* txn);
// The service context under which this migration manager runs.
ServiceContext* const _serviceContext;
- // Used as a constant session ID for all distributed locks that this MigrationManager holds.
- // Currently required so that locks can be reacquired for the balancer in startRecovery and then
- // overtaken in later operations.
- OID _lockSessionID;
-
- // Carries migration information over from startRecovery to finishRecovery. Should only be set
- // in startRecovery and then accessed in finishRecovery.
- stdx::unordered_map<NamespaceString, std::list<MigrateInfo>> _migrationRecoveryMap;
-
// Protects the class state below.
stdx::mutex _mutex;
@@ -282,6 +267,10 @@ private:
// signaled when the state change is complete.
stdx::condition_variable _condVar;
+ // Identity of the cluster under which this migration manager runs. Used as a constant session
+ // ID for all distributed locks that the MigrationManager holds.
+ OID _clusterIdentity;
+
// Holds information about each collection's distributed lock and active migrations via a
// CollectionMigrationState object.
CollectionMigrationsStateMap _activeMigrationsWithDistLock;
@@ -289,6 +278,9 @@ private:
// Holds information about migrations, which have been scheduled without the collection
// distributed lock acquired (i.e., the shard is asked to acquire it).
MigrationsList _activeMigrationsWithoutDistLock;
+
+ // Carries migration information over from startRecovery to finishRecovery.
+ stdx::unordered_map<NamespaceString, std::list<MigrateInfo>> _migrationRecoveryMap;
};
} // namespace mongo
diff --git a/src/mongo/s/balancer/migration_manager_test.cpp b/src/mongo/s/balancer/migration_manager_test.cpp
index bd11a48521f..645de866c96 100644
--- a/src/mongo/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/s/balancer/migration_manager_test.cpp
@@ -155,6 +155,9 @@ protected:
const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1));
+ // Cluster identity to pass to the migration manager
+ const OID _clusterIdentity{OID::gen()};
+
std::unique_ptr<MigrationManager> _migrationManager;
private:
@@ -165,8 +168,9 @@ private:
void MigrationManagerTest::setUp() {
ConfigServerTestFixture::setUp();
_migrationManager = stdx::make_unique<MigrationManager>(getServiceContext());
- _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
- _migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle, false);
+ _migrationManager->startRecovery();
+ _migrationManager->finishRecovery(
+ operationContext(), _clusterIdentity, 0, kDefaultSecondaryThrottle, false);
}
void MigrationManagerTest::tearDown() {
@@ -830,8 +834,9 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
// Go through the lifecycle of the migration manager
_migrationManager->interruptAndDisableMigrations();
_migrationManager->drainActiveMigrations();
- _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
- _migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle, false);
+ _migrationManager->startRecovery();
+ _migrationManager->finishRecovery(
+ operationContext(), _clusterIdentity, 0, kDefaultSecondaryThrottle, false);
auto future = launchAsync([&] {
Client::initThreadIfNotAlready("Test");
@@ -876,6 +881,7 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
_migrationManager->interruptAndDisableMigrations();
_migrationManager->drainActiveMigrations();
+ _migrationManager->startRecovery();
// Set up two fake active migrations by writing documents to the config.migrations collection.
setUpMigration(collName,
@@ -889,8 +895,6 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
kShardId3.toString(),
chunk2.getShard().toString());
- _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
-
auto future = launchAsync([this] {
Client::initThreadIfNotAlready("Test");
auto txn = cc().makeOperationContext();
@@ -900,7 +904,8 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
shardTargeterMock(txn.get(), kShardId0)->setFindHostReturnValue(kShardHost0);
shardTargeterMock(txn.get(), kShardId2)->setFindHostReturnValue(kShardHost2);
- _migrationManager->finishRecovery(txn.get(), 0, kDefaultSecondaryThrottle, false);
+ _migrationManager->finishRecovery(
+ txn.get(), _clusterIdentity, 0, kDefaultSecondaryThrottle, false);
});
// Expect two moveChunk commands.
@@ -933,9 +938,6 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
ChunkType chunk2 =
setUpChunk(collName, BSON(kPattern << 49), kKeyPattern.globalMax(), kShardId2, version);
- _migrationManager->interruptAndDisableMigrations();
- _migrationManager->drainActiveMigrations();
-
// Set up a parsable fake active migration document in the config.migrations collection.
setUpMigration(collName,
chunk1.getMin(),
@@ -943,6 +945,10 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
kShardId1.toString(),
chunk1.getShard().toString());
+ _migrationManager->interruptAndDisableMigrations();
+ _migrationManager->drainActiveMigrations();
+ _migrationManager->startRecovery();
+
// Set up a fake active migration document that will fail MigrationType parsing -- missing
// field.
BSONObjBuilder builder;
@@ -965,8 +971,8 @@ TEST_F(MigrationManagerTest, FailMigrationRecovery) {
OID::gen(),
DistLockManager::kSingleLockAttemptTimeout));
- _migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
- _migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle, false);
+ _migrationManager->finishRecovery(
+ operationContext(), _clusterIdentity, 0, kDefaultSecondaryThrottle, false);
// MigrationManagerTest::tearDown checks that the config.migrations collection is empty and all
// distributed locks are unlocked.
diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/s/catalog/dist_lock_manager.h
index 0512a5dc481..400490b8cd1 100644
--- a/src/mongo/s/catalog/dist_lock_manager.h
+++ b/src/mongo/s/catalog/dist_lock_manager.h
@@ -153,8 +153,7 @@ public:
*/
virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
StringData name,
- StringData whyMessage,
- const OID& lockSessionID) = 0;
+ StringData whyMessage) = 0;
/**
* Unlocks the given lockHandle. Will attempt to retry again later if the config
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index 18bd8a8ba6c..194ec4b7918 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -95,7 +95,7 @@ StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationConte
}
StatusWith<DistLockHandle> DistLockManagerMock::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* txn, StringData name, StringData whyMessage) {
// Not yet implemented
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.h b/src/mongo/s/catalog/dist_lock_manager_mock.h
index d137b0239e4..19a0eb1c146 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.h
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.h
@@ -56,8 +56,7 @@ public:
StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
StringData name,
- StringData whyMessage,
- const OID& lockSessionID) override;
+ StringData whyMessage) override;
void unlockAll(OperationContext* txn, const std::string& processID) override;
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
index 6eab41a6bef..27b80627647 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
@@ -426,7 +426,8 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* txn, StringData name, StringData whyMessage) {
+ const DistLockHandle lockSessionID = OID::gen();
const string who = str::stream() << _processID << ":" << getThreadName();
auto lockStatus = _catalog->grabLock(txn,
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset/replset_dist_lock_manager.h
index 365f768f52b..4731cd0bf3a 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.h
@@ -76,8 +76,7 @@ public:
StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
StringData name,
- StringData whyMessage,
- const OID& lockSessionID) override;
+ StringData whyMessage) override;
void unlock(OperationContext* txn, const DistLockHandle& lockSessionID) override;
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
index d46a20c2913..cadd4f6cfe9 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
@@ -1987,7 +1987,7 @@ TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
// Will be different from the actual lock session id. For testing only.
retLockDoc.setLockID(OID::gen());
- OID lockSessionIDPassed = OID::gen();
+ OID lockSessionIDPassed;
getMockCatalog()->expectGrabLock(
[this, &lockName, &now, &whyMsg, &lockSessionIDPassed](StringData lockID,
@@ -2001,14 +2001,14 @@ TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, now);
ASSERT_EQUALS(whyMsg, why);
- ASSERT_EQUALS(lockSessionIDPassed, lockSessionID);
+ lockSessionIDPassed = lockSessionID;
getMockCatalog()->expectNoGrabLock(); // Call only once.
},
{ErrorCodes::LockStateChangeFailed, "Unable to take lock"});
- auto lockStatus = distLock()->tryLockWithLocalWriteConcern(
- operationContext(), lockName, whyMsg, lockSessionIDPassed);
+ auto lockStatus =
+ distLock()->tryLockWithLocalWriteConcern(operationContext(), lockName, whyMsg);
ASSERT_EQ(ErrorCodes::LockBusy, lockStatus.getStatus());
}