summaryrefslogtreecommitdiff
path: root/src/mongo/s/catalog/replset_dist_lock_manager.cpp
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
committerMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
commit589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch)
treec7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/s/catalog/replset_dist_lock_manager.cpp
parent3cba97198638df3750e3b455e2ad57af7ee536ae (diff)
downloadmongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn, _txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all .cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/s/catalog/replset_dist_lock_manager.cpp')
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp52
1 files changed, 26 insertions, 26 deletions
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index eff613f66b6..98fbcf94ceb 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -89,7 +89,7 @@ void ReplSetDistLockManager::startUp() {
}
}
-void ReplSetDistLockManager::shutDown(OperationContext* txn) {
+void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
_isShutDown = true;
@@ -103,7 +103,7 @@ void ReplSetDistLockManager::shutDown(OperationContext* txn) {
_execThread.reset();
}
- auto status = _catalog->stopPing(txn, _processID);
+ auto status = _catalog->stopPing(opCtx, _processID);
if (!status.isOK()) {
warning() << "error encountered while cleaning up distributed ping entry for " << _processID
<< causedBy(redact(status));
@@ -128,8 +128,8 @@ void ReplSetDistLockManager::doTask() {
while (!isShutDown()) {
{
- auto txn = cc().makeOperationContext();
- auto pingStatus = _catalog->ping(txn.get(), _processID, Date_t::now());
+ auto opCtx = cc().makeOperationContext();
+ auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now());
if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotMaster) {
warning() << "pinging failed for distributed lock pinger" << causedBy(pingStatus);
@@ -154,10 +154,10 @@ void ReplSetDistLockManager::doTask() {
"status unlock not initialized!");
if (toUnlock.second) {
// A non-empty _id (name) field was provided, unlock by ts (sessionId) and _id.
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first, *toUnlock.second);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first, *toUnlock.second);
nameMessage = " and " + LocksType::name() + ": " + *toUnlock.second;
} else {
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first);
}
if (!unlockStatus.isOK()) {
@@ -180,11 +180,11 @@ void ReplSetDistLockManager::doTask() {
}
}
-StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
+StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
LocksType lockDoc,
const Milliseconds& lockExpiration) {
const auto& processID = lockDoc.getProcess();
- auto pingStatus = _catalog->getPing(txn, processID);
+ auto pingStatus = _catalog->getPing(opCtx, processID);
Date_t pingValue;
if (pingStatus.isOK()) {
@@ -202,7 +202,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
} // else use default pingValue if ping document does not exist.
Timer timer(_serviceContext->getTickSource());
- auto serverInfoStatus = _catalog->getServerInfo(txn);
+ auto serverInfoStatus = _catalog->getServerInfo(opCtx);
if (!serverInfoStatus.isOK()) {
if (serverInfoStatus.getStatus() == ErrorCodes::NotMaster) {
return false;
@@ -277,7 +277,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
return false;
}
-StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* txn,
+StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -290,7 +290,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// independent write operations.
int networkErrorRetries = 0;
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
// Distributed lock acquisition works by tring to update the state of the lock to 'taken'. If
// the lock is currently taken, we will back off and try the acquisition again, repeating this
@@ -312,7 +312,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
auto lockResult = _catalog->grabLock(
- txn, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
+ opCtx, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
auto status = lockResult.getStatus();
@@ -333,7 +333,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
networkErrorRetries++;
- status = _catalog->unlock(txn, lockSessionID, name);
+ status = _catalog->unlock(opCtx, lockSessionID, name);
if (status.isOK()) {
// We certainly do not own the lock, so we can retry
continue;
@@ -355,7 +355,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
// Get info from current lock and check if we can overtake it.
- auto getLockStatusResult = _catalog->getLockByName(txn, name);
+ auto getLockStatusResult = _catalog->getLockByName(opCtx, name);
const auto& getLockStatus = getLockStatusResult.getStatus();
if (!getLockStatusResult.isOK() && getLockStatus != ErrorCodes::LockNotFound) {
@@ -366,14 +366,14 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// found, use the normal grab lock path to acquire it.
if (getLockStatusResult.isOK()) {
auto currentLock = getLockStatusResult.getValue();
- auto isLockExpiredResult = isLockExpired(txn, currentLock, lockExpiration);
+ auto isLockExpiredResult = isLockExpired(opCtx, currentLock, lockExpiration);
if (!isLockExpiredResult.isOK()) {
return isLockExpiredResult.getStatus();
}
if (isLockExpiredResult.getValue() || (lockSessionID == currentLock.getLockID())) {
- auto overtakeResult = _catalog->overtakeLock(txn,
+ auto overtakeResult = _catalog->overtakeLock(opCtx,
name,
lockSessionID,
currentLock.getLockID(),
@@ -429,7 +429,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
const string who = str::stream() << _processID << ":" << getThreadName();
LOG(1) << "trying to acquire new distributed lock for " << name
@@ -438,7 +438,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
<< " ms, process : " << _processID << " )"
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
- auto lockStatus = _catalog->grabLock(txn,
+ auto lockStatus = _catalog->grabLock(opCtx,
name,
lockSessionID,
who,
@@ -462,8 +462,8 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
return lockStatus.getStatus();
}
-void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle& lockSessionID) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID);
+void ReplSetDistLockManager::unlock(OperationContext* opCtx, const DistLockHandle& lockSessionID) {
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, boost::none);
@@ -473,10 +473,10 @@ void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle&
}
}
-void ReplSetDistLockManager::unlock(OperationContext* txn,
+void ReplSetDistLockManager::unlock(OperationContext* opCtx,
const DistLockHandle& lockSessionID,
StringData name) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID, name);
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID, name);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, name.toString());
@@ -486,17 +486,17 @@ void ReplSetDistLockManager::unlock(OperationContext* txn,
}
}
-void ReplSetDistLockManager::unlockAll(OperationContext* txn, const std::string& processID) {
- Status status = _catalog->unlockAll(txn, processID);
+void ReplSetDistLockManager::unlockAll(OperationContext* opCtx, const std::string& processID) {
+ Status status = _catalog->unlockAll(opCtx, processID);
if (!status.isOK()) {
warning() << "Error while trying to unlock existing distributed locks"
<< causedBy(redact(status));
}
}
-Status ReplSetDistLockManager::checkStatus(OperationContext* txn,
+Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
const DistLockHandle& lockHandle) {
- return _catalog->getLockByTS(txn, lockHandle).getStatus();
+ return _catalog->getLockByTS(opCtx, lockHandle).getStatus();
}
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,