summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-05-30 09:56:36 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2014-05-30 11:44:18 -0400
commit4edbe14669b7804180d8b58549e257ceb679bb1d (patch)
treea72389bb84137c11755fd4f55d7218f190b0456c /src
parentbee249ac8907cc9de6b19ba87c3fcb074d84b1a3 (diff)
downloadmongo-4edbe14669b7804180d8b58549e257ceb679bb1d.tar.gz
SERVER-13961 Pass LockState to DBWrite and DBRead directly
This is part of the changes to move LockState be part of OperationContext and not retrieved from TLS.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp7
-rw-r--r--src/mongo/db/auth/auth_index_d.h2
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp36
-rw-r--r--src/mongo/db/auth/authorization_manager.h18
-rw-r--r--src/mongo/db/auth/authorization_manager_global.cpp8
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp7
-rw-r--r--src/mongo/db/auth/authorization_session.cpp38
-rw-r--r--src/mongo/db/auth/authorization_session.h9
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp35
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.h12
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp14
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.h6
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp11
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h9
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp18
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.h9
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp8
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.h8
-rw-r--r--src/mongo/db/auth/authz_session_external_state.h3
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.cpp4
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_mock.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.cpp4
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.h2
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.cpp4
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.h2
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.cpp34
-rw-r--r--src/mongo/db/catalog/collection_cursor_cache.h8
-rw-r--r--src/mongo/db/catalog/database_holder.cpp5
-rw-r--r--src/mongo/db/catalog/database_holder.h5
-rw-r--r--src/mongo/db/client.cpp33
-rw-r--r--src/mongo/db/client.h15
-rw-r--r--src/mongo/db/clientcursor.cpp7
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/apply_ops.cpp2
-rw-r--r--src/mongo/db/commands/auth_schema_upgrade_d.cpp2
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp21
-rw-r--r--src/mongo/db/commands/authentication_commands.h9
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/compact.cpp2
-rw-r--r--src/mongo/db/commands/copydb.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp10
-rw-r--r--src/mongo/db/commands/dbhash.cpp2
-rw-r--r--src/mongo/db/commands/distinct.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp6
-rw-r--r--src/mongo/db/commands/geonear.cpp2
-rw-r--r--src/mongo/db/commands/group.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp28
-rw-r--r--src/mongo/db/commands/index_filter_commands.h22
-rw-r--r--src/mongo/db/commands/mr.cpp28
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp2
-rw-r--r--src/mongo/db/commands/parameters.cpp12
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp23
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h21
-rw-r--r--src/mongo/db/commands/test_commands.cpp6
-rw-r--r--src/mongo/db/commands/touch.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp59
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp6
-rw-r--r--src/mongo/db/d_concurrency.cpp4
-rw-r--r--src/mongo/db/d_concurrency.h6
-rw-r--r--src/mongo/db/db.cpp15
-rw-r--r--src/mongo/db/dbcommands.cpp30
-rw-r--r--src/mongo/db/dbhelpers.cpp21
-rw-r--r--src/mongo/db/dbhelpers.h3
-rw-r--r--src/mongo/db/dbwebserver.cpp20
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/fts/fts_command.cpp2
-rw-r--r--src/mongo/db/fts/fts_command.h5
-rw-r--r--src/mongo/db/fts/fts_command_mongod.cpp5
-rw-r--r--src/mongo/db/fts/fts_command_mongos.cpp3
-rw-r--r--src/mongo/db/geo/haystack.cpp2
-rw-r--r--src/mongo/db/index_builder.cpp5
-rw-r--r--src/mongo/db/index_rebuilder.cpp9
-rw-r--r--src/mongo/db/instance.cpp26
-rw-r--r--src/mongo/db/instance.h2
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp2
-rw-r--r--src/mongo/db/prefetch.cpp15
-rw-r--r--src/mongo/db/prefetch.h9
-rw-r--r--src/mongo/db/query/new_find.cpp4
-rw-r--r--src/mongo/db/range_deleter.cpp15
-rw-r--r--src/mongo/db/range_deleter.h4
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp5
-rw-r--r--src/mongo/db/range_deleter_db_env.h4
-rw-r--r--src/mongo/db/range_deleter_mock_env.cpp3
-rw-r--r--src/mongo/db/range_deleter_mock_env.h2
-rw-r--r--src/mongo/db/repair_database.cpp2
-rw-r--r--src/mongo/db/repl/heartbeat.cpp2
-rw-r--r--src/mongo/db/repl/master_slave.cpp17
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/repl_set_impl.cpp19
-rw-r--r--src/mongo/db/repl/repl_settings.cpp11
-rw-r--r--src/mongo/db/repl/rs.cpp2
-rw-r--r--src/mongo/db/repl/rs_config.cpp4
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp12
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp3
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp3
-rw-r--r--src/mongo/db/repl/sync_tail.cpp13
-rw-r--r--src/mongo/db/restapi.cpp4
-rw-r--r--src/mongo/db/restapi.h2
-rw-r--r--src/mongo/db/server_parameters.h5
-rw-r--r--src/mongo/db/server_parameters_test.cpp6
-rw-r--r--src/mongo/db/ttl.cpp8
-rw-r--r--src/mongo/dbtests/clienttests.cpp13
-rw-r--r--src/mongo/dbtests/counttests.cpp7
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp24
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp9
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp5
-rw-r--r--src/mongo/dbtests/matchertests.cpp5
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp3
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp17
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp4
-rw-r--r--src/mongo/dbtests/query_single_solution_runner.cpp32
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp71
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp37
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp19
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp38
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp20
-rw-r--r--src/mongo/dbtests/querytests.cpp70
-rw-r--r--src/mongo/dbtests/replsettests.cpp15
-rw-r--r--src/mongo/dbtests/runner_registry.cpp6
-rw-r--r--src/mongo/dbtests/threadedtests.cpp55
-rw-r--r--src/mongo/dbtests/updatetests.cpp26
-rw-r--r--src/mongo/s/commands/auth_schema_upgrade_s.cpp2
-rw-r--r--src/mongo/s/d_merge.cpp4
-rw-r--r--src/mongo/s/d_migrate.cpp51
-rw-r--r--src/mongo/s/d_split.cpp8
-rw-r--r--src/mongo/s/d_state.cpp7
-rw-r--r--src/mongo/s/request.cpp5
-rw-r--r--src/mongo/s/request.h4
-rw-r--r--src/mongo/tools/dump.cpp11
-rw-r--r--src/mongo/util/admin_access.h6
-rw-r--r--src/mongo/util/net/ssl_options.cpp2
144 files changed, 948 insertions, 700 deletions
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 2b510698b60..b8ed201226e 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -72,9 +72,10 @@ namespace {
} // namespace
- void configureSystemIndexes(const StringData& dbname) {
+ void configureSystemIndexes(OperationContext* txn, const StringData& dbname) {
int authzVersion;
- Status status = getGlobalAuthorizationManager()->getAuthorizationVersion(&authzVersion);
+ Status status = getGlobalAuthorizationManager()->getAuthorizationVersion(
+ txn, &authzVersion);
if (!status.isOK()) {
return;
}
@@ -83,8 +84,8 @@ namespace {
NamespaceString systemUsers(dbname, "system.users");
// Make sure the old unique index from v2.4 on system.users doesn't exist.
- Client::WriteContext wctx(systemUsers);
OperationContextImpl txn;
+ Client::WriteContext wctx(&txn, systemUsers);
Collection* collection = wctx.ctx().db()->getCollection(NamespaceString(systemUsers));
if (!collection) {
return;
diff --git a/src/mongo/db/auth/auth_index_d.h b/src/mongo/db/auth/auth_index_d.h
index 3d3cf711e40..964eec9fbb0 100644
--- a/src/mongo/db/auth/auth_index_d.h
+++ b/src/mongo/db/auth/auth_index_d.h
@@ -50,7 +50,7 @@ namespace authindex {
* It is appropriate to call this function on new or existing databases, though it is
* primarily intended for use on existing databases.
*/
- void configureSystemIndexes(const StringData& dbname);
+ void configureSystemIndexes(OperationContext* txn, const StringData& dbname);
} // namespace authindex
} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index c3466c922eb..cc60b795c0a 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -258,14 +258,14 @@ namespace mongo {
}
}
- Status AuthorizationManager::getAuthorizationVersion(int* version) {
+ Status AuthorizationManager::getAuthorizationVersion(OperationContext* txn, int* version) {
CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
int newVersion = _version;
if (schemaVersionInvalid == newVersion) {
while (guard.otherUpdateInFetchPhase())
guard.wait();
guard.beginFetchPhase();
- Status status = _externalState->getStoredAuthorizationVersion(&newVersion);
+ Status status = _externalState->getStoredAuthorizationVersion(txn, &newVersion);
guard.endFetchPhase();
if (!status.isOK()) {
warning() << "Problem fetching the stored schema version of authorization data: "
@@ -295,8 +295,8 @@ namespace mongo {
return _authEnabled;
}
- bool AuthorizationManager::hasAnyPrivilegeDocuments() const {
- return _externalState->hasAnyPrivilegeDocuments();
+ bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) const {
+ return _externalState->hasAnyPrivilegeDocuments(txn);
}
Status AuthorizationManager::writeAuthSchemaVersionIfNeeded() {
@@ -493,8 +493,10 @@ namespace mongo {
return Status::OK();
}
- Status AuthorizationManager::getUserDescription(const UserName& userName, BSONObj* result) {
- return _externalState->getUserDescription(userName, result);
+ Status AuthorizationManager::getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) {
+ return _externalState->getUserDescription(txn, userName, result);
}
Status AuthorizationManager::getRoleDescription(const RoleName& roleName,
@@ -513,7 +515,8 @@ namespace mongo {
result);
}
- Status AuthorizationManager::acquireUser(const UserName& userName, User** acquiredUser) {
+ Status AuthorizationManager::acquireUser(
+ OperationContext* txn, const UserName& userName, User** acquiredUser) {
if (userName == internalSecurity.user->getName()) {
*acquiredUser = internalSecurity.user;
return Status::OK();
@@ -549,7 +552,7 @@ namespace mongo {
Status status = Status::OK();
for (int i = 0; i < maxAcquireRetries; ++i) {
if (authzVersion == schemaVersionInvalid) {
- Status status = _externalState->getStoredAuthorizationVersion(&authzVersion);
+ Status status = _externalState->getStoredAuthorizationVersion(txn, &authzVersion);
if (!status.isOK())
return status;
}
@@ -562,7 +565,7 @@ namespace mongo {
break;
case schemaVersion26Final:
case schemaVersion26Upgrade:
- status = _fetchUserV2(userName, &user);
+ status = _fetchUserV2(txn, userName, &user);
break;
case schemaVersion24:
status = Status(ErrorCodes::AuthSchemaIncompatible, mongoutils::str::stream() <<
@@ -600,10 +603,11 @@ namespace mongo {
return Status::OK();
}
- Status AuthorizationManager::_fetchUserV2(const UserName& userName,
+ Status AuthorizationManager::_fetchUserV2(OperationContext* txn,
+ const UserName& userName,
std::auto_ptr<User>* acquiredUser) {
BSONObj userObj;
- Status status = getUserDescription(userName, &userObj);
+ Status status = getUserDescription(txn, userName, &userObj);
if (!status.isOK()) {
return status;
}
@@ -700,9 +704,10 @@ namespace mongo {
return _externalState->releaseAuthzUpdateLock();
}
- Status AuthorizationManager::upgradeSchemaStep(const BSONObj& writeConcern, bool* isDone) {
+ Status AuthorizationManager::upgradeSchemaStep(
+ OperationContext* txn, const BSONObj& writeConcern, bool* isDone) {
int authzVersion;
- Status status = getAuthorizationVersion(&authzVersion);
+ Status status = getAuthorizationVersion(txn, &authzVersion);
if (!status.isOK()) {
return status;
}
@@ -717,7 +722,8 @@ namespace mongo {
}
}
- Status AuthorizationManager::upgradeSchema(int maxSteps, const BSONObj& writeConcern) {
+ Status AuthorizationManager::upgradeSchema(
+ OperationContext* txn, int maxSteps, const BSONObj& writeConcern) {
if (maxSteps < 1) {
return Status(ErrorCodes::BadValue,
@@ -726,7 +732,7 @@ namespace mongo {
invalidateUserCache();
for (int i = 0; i < maxSteps; ++i) {
bool isDone;
- Status status = upgradeSchemaStep(writeConcern, &isDone);
+ Status status = upgradeSchemaStep(txn, writeConcern, &isDone);
invalidateUserCache();
if (!status.isOK() || isDone) {
return status;
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index be11d634e83..c03e5b3eb66 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -53,6 +53,7 @@ namespace mongo {
class AuthzManagerExternalState;
class UserDocumentParser;
+ class OperationContext;
/**
* Internal secret key info.
@@ -164,7 +165,7 @@ namespace mongo {
* returns a non-OK status. When returning a non-OK status, *version will be set to
* schemaVersionInvalid (0).
*/
- Status getAuthorizationVersion(int* version);
+ Status getAuthorizationVersion(OperationContext* txn, int* version);
/**
* Returns the user cache generation identifier.
@@ -172,7 +173,7 @@ namespace mongo {
OID getCacheGeneration();
// Returns true if there exists at least one privilege document in the system.
- bool hasAnyPrivilegeDocuments() const;
+ bool hasAnyPrivilegeDocuments(OperationContext* txn) const;
/**
* Updates the auth schema version document to reflect that the system is upgraded to
@@ -281,7 +282,7 @@ namespace mongo {
*
* If the user does not exist, returns ErrorCodes::UserNotFound.
*/
- Status getUserDescription(const UserName& userName, BSONObj* result);
+ Status getUserDescription(OperationContext* txn, const UserName& userName, BSONObj* result);
/**
* Writes into "result" a document describing the named role and returns Status::OK(). The
@@ -324,7 +325,7 @@ namespace mongo {
* The AuthorizationManager retains ownership of the returned User object.
* On non-OK Status return values, acquiredUser will not be modified.
*/
- Status acquireUser(const UserName& userName, User** acquiredUser);
+ Status acquireUser(OperationContext* txn, const UserName& userName, User** acquiredUser);
/**
* Decrements the refcount of the given User object. If the refcount has gone to zero,
@@ -389,7 +390,8 @@ namespace mongo {
* On failure, returns a status other than Status::OK(). In this case, is is typically safe
* to try again.
*/
- Status upgradeSchemaStep(const BSONObj& writeConcern, bool* isDone);
+ Status upgradeSchemaStep(
+ OperationContext* txn, const BSONObj& writeConcern, bool* isDone);
/**
* Performs up to maxSteps steps in the process of upgrading the stored authorization data
@@ -404,7 +406,7 @@ namespace mongo {
* progress performing the upgrade, and the specific code and message in the returned status
* may provide additional information.
*/
- Status upgradeSchema(int maxSteps, const BSONObj& writeConcern);
+ Status upgradeSchema(OperationContext* txn, int maxSteps, const BSONObj& writeConcern);
/**
* Hook called by replication code to let the AuthorizationManager observe changes
@@ -448,7 +450,9 @@ namespace mongo {
* Fetches user information from a v2-schema user document for the named user,
* and stores a pointer to a new user object into *acquiredUser on success.
*/
- Status _fetchUserV2(const UserName& userName, std::auto_ptr<User>* acquiredUser);
+ Status _fetchUserV2(OperationContext* txn,
+ const UserName& userName,
+ std::auto_ptr<User>* acquiredUser);
/**
* True if access control enforcement is enabled in this AuthorizationManager.
diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp
index 68e5cd1e7cb..5e6c680cd16 100644
--- a/src/mongo/db/auth/authorization_manager_global.cpp
+++ b/src/mongo/db/auth/authorization_manager_global.cpp
@@ -44,7 +44,7 @@ namespace {
MONGO_DISALLOW_COPYING(AuthzVersionParameter);
public:
AuthzVersionParameter(ServerParameterSet* sps, const std::string& name);
- virtual void append(BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status setFromString(const std::string& str);
};
@@ -60,9 +60,11 @@ namespace {
AuthzVersionParameter::AuthzVersionParameter(ServerParameterSet* sps, const std::string& name) :
ServerParameter(sps, name, false, false) {}
- void AuthzVersionParameter::append(BSONObjBuilder& b, const std::string& name) {
+ void AuthzVersionParameter::append(
+ OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
int authzVersion;
- uassertStatusOK(getGlobalAuthorizationManager()->getAuthorizationVersion(&authzVersion));
+ uassertStatusOK(
+ getGlobalAuthorizationManager()->getAuthorizationVersion(txn, &authzVersion));
b.append(name, authzVersion);
}
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index 0e4c71447bb..e230dc1e405 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/map_util.h"
@@ -186,8 +187,10 @@ namespace {
"roles" << BSON_ARRAY(BSON("role" << "clusterAdmin" << "db" << "admin"))),
BSONObj()));
+ OperationContextNoop txn;
+
User* v2read;
- ASSERT_OK(authzManager->acquireUser(UserName("v2read", "test"), &v2read));
+ ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
ASSERT_EQUALS(UserName("v2read", "test"), v2read->getName());
ASSERT(v2read->isValid());
ASSERT_EQUALS(1U, v2read->getRefCount());
@@ -198,7 +201,7 @@ namespace {
authzManager->releaseUser(v2read);
User* v2cluster;
- ASSERT_OK(authzManager->acquireUser(UserName("v2cluster", "admin"), &v2cluster));
+ ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2cluster", "admin"), &v2cluster));
ASSERT_EQUALS(UserName("v2cluster", "admin"), v2cluster->getName());
ASSERT(v2cluster->isValid());
ASSERT_EQUALS(1U, v2cluster->getRefCount());
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index 2946cbfd352..f731266721c 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -67,14 +67,15 @@ namespace {
return _externalState->getAuthorizationManager();
}
- void AuthorizationSession::startRequest() {
- _externalState->startRequest();
- _refreshUserInfoAsNeeded();
+ void AuthorizationSession::startRequest(OperationContext* txn) {
+ _externalState->startRequest(txn);
+ _refreshUserInfoAsNeeded(txn);
}
- Status AuthorizationSession::addAndAuthorizeUser(const UserName& userName) {
+ Status AuthorizationSession::addAndAuthorizeUser(
+ OperationContext* txn, const UserName& userName) {
User* user;
- Status status = getAuthorizationManager().acquireUser(userName, &user);
+ Status status = getAuthorizationManager().acquireUser(txn, userName, &user);
if (!status.isOK()) {
return status;
}
@@ -251,7 +252,8 @@ namespace {
<< resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName("admin"), ActionType::grantRole)) {
+ ResourcePattern::forDatabaseName("admin"),
+ ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
"To grant privileges affecting multiple databases or the cluster,"
" must be authorized to grant roles from the admin database");
@@ -271,7 +273,8 @@ namespace {
<< resource.databaseToMatch() << "database");
}
} else if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName("admin"), ActionType::revokeRole)) {
+ ResourcePattern::forDatabaseName("admin"),
+ ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
"To revoke privileges affecting multiple databases or the cluster,"
" must be authorized to revoke roles from the admin database");
@@ -281,14 +284,14 @@ namespace {
bool AuthorizationSession::isAuthorizedToGrantRole(const RoleName& role) {
return isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(role.getDB()),
- ActionType::grantRole);
+ ResourcePattern::forDatabaseName(role.getDB()),
+ ActionType::grantRole);
}
bool AuthorizationSession::isAuthorizedToRevokeRole(const RoleName& role) {
return isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(role.getDB()),
- ActionType::revokeRole);
+ ResourcePattern::forDatabaseName(role.getDB()),
+ ActionType::revokeRole);
}
bool AuthorizationSession::isAuthorizedForPrivilege(const Privilege& privilege) {
@@ -322,12 +325,14 @@ namespace {
bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
ActionType action) {
- return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), action));
+ return isAuthorizedForPrivilege(
+ Privilege(ResourcePattern::forExactNamespace(ns), action));
}
bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
- const ActionSet& actions) {
- return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), actions));
+ const ActionSet& actions) {
+ return isAuthorizedForPrivilege(
+ Privilege(ResourcePattern::forExactNamespace(ns), actions));
}
static const int resourceSearchListCapacity = 5;
@@ -422,7 +427,7 @@ namespace {
return false;
}
- void AuthorizationSession::_refreshUserInfoAsNeeded() {
+ void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
AuthorizationManager& authMan = getAuthorizationManager();
UserSet::iterator it = _authenticatedUsers.begin();
while (it != _authenticatedUsers.end()) {
@@ -434,7 +439,7 @@ namespace {
UserName name = user->getName();
User* updatedUser;
- Status status = authMan.acquireUser(name, &updatedUser);
+ Status status = authMan.acquireUser(txn, name, &updatedUser);
switch (status.code()) {
case ErrorCodes::OK: {
// Success! Replace the old User object with the updated one.
@@ -490,7 +495,6 @@ namespace {
for (UserSet::iterator it = _authenticatedUsers.begin();
it != _authenticatedUsers.end(); ++it) {
User* user = *it;
-
for (int i = 0; i < resourceSearchListLength; ++i) {
ActionSet userActions = user->getActionsForResource(resourceSearchList[i]);
unmetRequirements.removeAllActionsFromSet(userActions);
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index 1a12a0719e0..f0484e26f1b 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -71,13 +71,13 @@ namespace mongo {
// Should be called at the beginning of every new request. This performs the checks
// necessary to determine if localhost connections should be given full access.
// TODO: try to eliminate the need for this call.
- void startRequest();
+ void startRequest(OperationContext* txn);
/**
* Adds the User identified by "UserName" to the authorization session, acquiring privileges
* for it in the process.
*/
- Status addAndAuthorizeUser(const UserName& userName);
+ Status addAndAuthorizeUser(OperationContext* txn, const UserName& userName);
// Returns the authenticated user with the given name. Returns NULL
// if no such user is found.
@@ -182,7 +182,8 @@ namespace mongo {
// Utility function for
// isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns), actions).
- bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, const ActionSet& actions);
+ bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
+ const ActionSet& actions);
// Replaces the vector of UserNames that a system user is impersonating with a new vector.
// The auditing system adds these to each audit record in the log.
@@ -203,7 +204,7 @@ namespace mongo {
// If any users authenticated on this session are marked as invalid this updates them with
// up-to-date information. May require a read lock on the "admin" db to read the user data.
- void _refreshUserInfoAsNeeded();
+ void _refreshUserInfoAsNeeded(OperationContext* txn);
// Checks if this connection is authorized for the given Privilege, ignoring whether or not
// we should even be doing authorization checks in general. Note: this may acquire a read
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index f9410440af8..8c25e9b6e3d 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/map_util.h"
@@ -53,7 +54,8 @@ namespace {
void setFindsShouldFail(bool enable) { _findsShouldFail = enable; }
- virtual Status findOne(const NamespaceString& collectionName,
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
if (_findsShouldFail &&
@@ -62,7 +64,7 @@ namespace {
return Status(ErrorCodes::UnknownError,
"findOne on admin.system.users set to fail in mock.");
}
- return AuthzManagerExternalStateMock::findOne(collectionName, query, result);
+ return AuthzManagerExternalStateMock::findOne(txn, collectionName, query, result);
}
private:
@@ -72,6 +74,7 @@ namespace {
class AuthorizationSessionTest : public ::mongo::unittest::Test {
public:
FailureCapableAuthzManagerExternalStateMock* managerState;
+ OperationContextNoop _txn;
AuthzSessionExternalStateMock* sessionState;
scoped_ptr<AuthorizationManager> authzManager;
scoped_ptr<AuthorizationSession> authzSession;
@@ -129,7 +132,7 @@ namespace {
// Check that you can't authorize a user that doesn't exist.
ASSERT_EQUALS(ErrorCodes::UserNotFound,
- authzSession->addAndAuthorizeUser(UserName("spencer", "test")));
+ authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
// Add a user with readWrite and dbAdmin on the test DB
ASSERT_OK(managerState->insertPrivilegeDocument("admin",
@@ -141,7 +144,7 @@ namespace {
BSON("role" << "dbAdmin" <<
"db" << "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::insert));
@@ -158,7 +161,7 @@ namespace {
"roles" << BSON_ARRAY(BSON("role" << "readWriteAnyDatabase" <<
"db" << "admin"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("admin", "admin")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("admin", "admin")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forExactNamespace(
@@ -203,7 +206,7 @@ namespace {
BSON("role" << "readWrite" <<
"db" << "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::insert));
@@ -247,7 +250,7 @@ namespace {
"db" << "admin"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("rwany", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rwany", "test")));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
testUsersCollResource, ActionType::insert));
@@ -267,7 +270,7 @@ namespace {
otherProfileCollResource, ActionType::find));
// Logging in as useradminany@test implicitly logs out rwany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("useradminany", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradminany", "test")));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
testUsersCollResource, ActionType::insert));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
@@ -286,7 +289,7 @@ namespace {
otherProfileCollResource, ActionType::find));
// Logging in as rw@test implicitly logs out useradminany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("rw", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rw", "test")));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
testUsersCollResource, ActionType::insert));
@@ -307,7 +310,7 @@ namespace {
// Logging in as useradmin@test implicitly logs out rw@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("useradmin", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradmin", "test")));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
testUsersCollResource, ActionType::insert));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
@@ -335,7 +338,7 @@ namespace {
"roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
"db" << "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
@@ -362,7 +365,7 @@ namespace {
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
@@ -379,7 +382,7 @@ namespace {
&ignored);
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
@@ -396,7 +399,7 @@ namespace {
"roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
"db" << "test"))),
BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(UserName("spencer", "test")));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
@@ -426,7 +429,7 @@ namespace {
// document lookup to fail, the authz session should continue to use its known out-of-date
// privilege data.
authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
@@ -435,7 +438,7 @@ namespace {
// Once we configure document lookup to succeed again, authorization checks should
// observe the new values.
managerState->setFindsShouldFail(false);
- authzSession->startRequest(); // Refreshes cached data for invalid users
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
testFooCollResource, ActionType::find));
ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
diff --git a/src/mongo/db/auth/authz_manager_external_state.cpp b/src/mongo/db/auth/authz_manager_external_state.cpp
index 5a369cade31..4b9b3501a11 100644
--- a/src/mongo/db/auth/authz_manager_external_state.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state.cpp
@@ -40,9 +40,10 @@ namespace mongo {
AuthzManagerExternalState::AuthzManagerExternalState() {}
AuthzManagerExternalState::~AuthzManagerExternalState() {}
- bool AuthzManagerExternalState::hasAnyPrivilegeDocuments() {
+ bool AuthzManagerExternalState::hasAnyPrivilegeDocuments(OperationContext* txn) {
BSONObj userBSONObj;
Status status = findOne(
+ txn,
AuthorizationManager::usersCollectionNamespace,
BSONObj(),
&userBSONObj);
diff --git a/src/mongo/db/auth/authz_manager_external_state.h b/src/mongo/db/auth/authz_manager_external_state.h
index 47226e9735b..397d7cb718f 100644
--- a/src/mongo/db/auth/authz_manager_external_state.h
+++ b/src/mongo/db/auth/authz_manager_external_state.h
@@ -41,6 +41,8 @@
namespace mongo {
+ class OperationContext;
+
/**
* Public interface for a class that encapsulates all the information related to system
* state not stored in AuthorizationManager. This is primarily to make AuthorizationManager
@@ -64,7 +66,7 @@ namespace mongo {
* Retrieves the schema version of the persistent data describing users and roles.
* Will leave *outVersion unmodified on non-OK status return values.
*/
- virtual Status getStoredAuthorizationVersion(int* outVersion) = 0;
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion) = 0;
/**
* Writes into "result" a document describing the named user and returns Status::OK(). The
@@ -76,7 +78,8 @@ namespace mongo {
*
* If the user does not exist, returns ErrorCodes::UserNotFound.
*/
- virtual Status getUserDescription(const UserName& userName, BSONObj* result) = 0;
+ virtual Status getUserDescription(
+ OperationContext* txn, const UserName& userName, BSONObj* result) = 0;
/**
* Writes into "result" a document describing the named role and returns Status::OK(). The
@@ -114,7 +117,7 @@ namespace mongo {
/**
* Returns true if there exists at least one privilege document in the system.
*/
- bool hasAnyPrivilegeDocuments();
+ bool hasAnyPrivilegeDocuments(OperationContext* txn);
/**
* Creates the given user object in the given database.
@@ -153,7 +156,8 @@ namespace mongo {
* Returns Status::OK() on success. If no match is found, returns
* ErrorCodes::NoMatchingDocument. Other errors returned as appropriate.
*/
- virtual Status findOne(const NamespaceString& collectionName,
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) = 0;
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 2c009314f48..852f6d96b71 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -47,12 +47,13 @@ namespace mongo {
AuthzManagerExternalStateMongod::AuthzManagerExternalStateMongod() {}
AuthzManagerExternalStateMongod::~AuthzManagerExternalStateMongod() {}
- Status AuthzManagerExternalStateMongod::_getUserDocument(const UserName& userName,
- BSONObj* userDoc) {
+ Status AuthzManagerExternalStateMongod::_getUserDocument(
+ OperationContext* txn, const UserName& userName, BSONObj* userDoc) {
+
+ Client::ReadContext ctx(txn, "admin");
- Client::ReadContext ctx("admin");
int authzVersion;
- Status status = getStoredAuthorizationVersion(&authzVersion);
+ Status status = getStoredAuthorizationVersion(txn, &authzVersion);
if (!status.isOK())
return status;
@@ -67,6 +68,7 @@ namespace mongo {
}
status = findOne(
+ txn,
(authzVersion == AuthorizationManager::schemaVersion26Final ?
AuthorizationManager::usersCollectionNamespace :
AuthorizationManager::usersAltCollectionNamespace),
@@ -102,11 +104,13 @@ namespace mongo {
}
Status AuthzManagerExternalStateMongod::findOne(
+ OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
- Client::ReadContext ctx(collectionName.ns());
+ Client::ReadContext ctx(txn, collectionName.ns());
+
BSONObj found;
if (Helpers::findOne(ctx.ctx().db()->getCollection(collectionName),
query,
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h
index 380d4eb6bef..213fcc56152 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.h
+++ b/src/mongo/db/auth/authz_manager_external_state_d.h
@@ -52,7 +52,8 @@ namespace mongo {
virtual Status getAllDatabaseNames(std::vector<std::string>* dbnames);
- virtual Status findOne(const NamespaceString& collectionName,
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result);
virtual Status query(const NamespaceString& collectionName,
@@ -83,7 +84,8 @@ namespace mongo {
virtual void releaseAuthzUpdateLock();
private:
- virtual Status _getUserDocument(const UserName& userName, BSONObj* userDoc);
+ virtual Status _getUserDocument(
+ OperationContext* txn, const UserName& userName, BSONObj* userDoc);
boost::timed_mutex _authzDataUpdateLock;
};
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 099e5638020..926158bc740 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -59,9 +59,11 @@ namespace mongo {
return Status::OK();
}
- Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(int* outVersion) {
+ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(
+ OperationContext* txn, int* outVersion) {
BSONObj versionDoc;
- Status status = findOne(AuthorizationManager::versionCollectionNamespace,
+ Status status = findOne(txn,
+ AuthorizationManager::versionCollectionNamespace,
AuthorizationManager::versionDocumentQuery,
&versionDoc);
if (status.isOK()) {
@@ -85,7 +87,7 @@ namespace mongo {
}
}
else if (status == ErrorCodes::NoMatchingDocument) {
- if (hasAnyPrivilegeDocuments()) {
+ if (hasAnyPrivilegeDocuments(txn)) {
*outVersion = AuthorizationManager::schemaVersion24;
}
else {
@@ -136,11 +138,12 @@ namespace {
} // namespace
Status AuthzManagerExternalStateLocal::getUserDescription(
+ OperationContext* txn,
const UserName& userName,
BSONObj* result) {
BSONObj userDoc;
- Status status = _getUserDocument(userName, &userDoc);
+ Status status = _getUserDocument(txn, userName, &userDoc);
if (!status.isOK())
return status;
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 2c49c4b7cc7..ba48862e277 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -53,8 +53,9 @@ namespace mongo {
virtual Status initialize();
- virtual Status getStoredAuthorizationVersion(int* outVersion);
- virtual Status getUserDescription(const UserName& userName, BSONObj* result);
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
+ virtual Status getUserDescription(
+ OperationContext* txn, const UserName& userName, BSONObj* result);
virtual Status getRoleDescription(const RoleName& roleName,
bool showPrivileges,
BSONObj* result);
@@ -88,7 +89,9 @@ namespace mongo {
/**
* Fetches the user document for "userName" from local storage, and stores it into "result".
*/
- virtual Status _getUserDocument(const UserName& userName, BSONObj* result) = 0;
+ virtual Status _getUserDocument(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) = 0;
Status _getRoleDescription_inlock(const RoleName& roleName,
bool showPrivileges,
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 5ee19c863a5..809d4ecb747 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -97,10 +97,11 @@ namespace {
BSONObj()));
}
- Status AuthzManagerExternalStateMock::_getUserDocument(const UserName& userName,
+ Status AuthzManagerExternalStateMock::_getUserDocument(OperationContext* txn,
+ const UserName& userName,
BSONObj* userDoc) {
int authzVersion;
- Status status = getStoredAuthorizationVersion(&authzVersion);
+ Status status = getStoredAuthorizationVersion(txn, &authzVersion);
if (!status.isOK())
return status;
@@ -115,6 +116,7 @@ namespace {
}
status = findOne(
+ txn,
(authzVersion == AuthorizationManager::schemaVersion26Final ?
AuthorizationManager::usersCollectionNamespace :
AuthorizationManager::usersAltCollectionNamespace),
@@ -139,18 +141,8 @@ namespace {
return Status::OK();
}
- Status AuthzManagerExternalStateMock::_findUser(
- const std::string& usersNamespace,
- const BSONObj& query,
- BSONObj* result) {
- if (!findOne(NamespaceString(usersNamespace), query, result).isOK()) {
- return Status(ErrorCodes::UserNotFound,
- "No matching user for query " + query.toString());
- }
- return Status::OK();
- }
-
Status AuthzManagerExternalStateMock::findOne(
+ OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result) {
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h
index 6ec06f97692..06db6b77890 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.h
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.h
@@ -60,7 +60,8 @@ namespace mongo {
virtual Status getAllDatabaseNames(std::vector<std::string>* dbnames);
- virtual Status findOne(const NamespaceString& collectionName,
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result);
@@ -101,16 +102,14 @@ namespace mongo {
virtual bool tryAcquireAuthzUpdateLock(const StringData& why);
virtual void releaseAuthzUpdateLock();
- Status _findUser(const std::string& usersNamespace,
- const BSONObj& query,
- BSONObj* result);
std::vector<BSONObj> getCollectionContents(const NamespaceString& collectionName);
private:
typedef std::vector<BSONObj> BSONObjCollection;
typedef std::map<NamespaceString, BSONObjCollection> NamespaceDocumentMap;
- virtual Status _getUserDocument(const UserName& userName, BSONObj* userDoc);
+ virtual Status _getUserDocument(
+ OperationContext* txn, const UserName& userName, BSONObj* userDoc);
Status _findOneIter(const NamespaceString& collectionName,
const BSONObj& query,
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 8f1dd6d1256..0cd3760258c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -70,7 +70,8 @@ namespace mongo {
}
}
- Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(int* outVersion) {
+ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(
+ OperationContext* txn, int* outVersion) {
scoped_ptr<ScopedDbConnection> conn(getConnectionForAuthzCollection(
AuthorizationManager::usersCollectionNamespace));
Status status = auth::getRemoteStoredAuthorizationVersion(conn->get(), outVersion);
@@ -78,8 +79,8 @@ namespace mongo {
return status;
}
- Status AuthzManagerExternalStateMongos::getUserDescription(const UserName& userName,
- BSONObj* result) {
+ Status AuthzManagerExternalStateMongos::getUserDescription(
+ OperationContext* txn, const UserName& userName, BSONObj* result) {
try {
scoped_ptr<ScopedDbConnection> conn(getConnectionForAuthzCollection(
AuthorizationManager::usersCollectionNamespace));
@@ -190,6 +191,7 @@ namespace mongo {
}
Status AuthzManagerExternalStateMongos::findOne(
+ OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& queryDoc,
BSONObj* result) {
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.h b/src/mongo/db/auth/authz_manager_external_state_s.h
index 203ce25f5ac..c19e3ed056e 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.h
+++ b/src/mongo/db/auth/authz_manager_external_state_s.h
@@ -52,8 +52,9 @@ namespace mongo {
virtual ~AuthzManagerExternalStateMongos();
virtual Status initialize();
- virtual Status getStoredAuthorizationVersion(int* outVersion);
- virtual Status getUserDescription(const UserName& userName, BSONObj* result);
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
+ virtual Status getUserDescription(
+ OperationContext* txn, const UserName& userName, BSONObj* result);
virtual Status getRoleDescription(const RoleName& roleName,
bool showPrivileges,
BSONObj* result);
@@ -70,7 +71,8 @@ namespace mongo {
* NOTE: The data returned from this helper may be from any config server or replica set
* node. The first config server or primary node is preferred, when available.
*/
- virtual Status findOne(const NamespaceString& collectionName,
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
const BSONObj& query,
BSONObj* result);
diff --git a/src/mongo/db/auth/authz_session_external_state.h b/src/mongo/db/auth/authz_session_external_state.h
index dbd838cb68f..2e1b41a0565 100644
--- a/src/mongo/db/auth/authz_session_external_state.h
+++ b/src/mongo/db/auth/authz_session_external_state.h
@@ -39,6 +39,7 @@
namespace mongo {
class Principal;
+ class OperationContext;
/**
* Public interface for a class that encapsulates all the session information related to system
@@ -68,7 +69,7 @@ namespace mongo {
// Should be called at the beginning of every new request. This performs the checks
// necessary to determine if localhost connections should be given full access.
- virtual void startRequest() = 0;
+ virtual void startRequest(OperationContext* txn) = 0;
protected:
// This class should never be instantiated directly.
diff --git a/src/mongo/db/auth/authz_session_external_state_d.cpp b/src/mongo/db/auth/authz_session_external_state_d.cpp
index 0156ccf6c9b..614b5e67505 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_d.cpp
@@ -45,9 +45,9 @@ namespace mongo {
AuthzSessionExternalStateServerCommon(authzManager) {}
AuthzSessionExternalStateMongod::~AuthzSessionExternalStateMongod() {}
- void AuthzSessionExternalStateMongod::startRequest() {
+ void AuthzSessionExternalStateMongod::startRequest(OperationContext* txn) {
if (!Lock::isLocked()) {
- _checkShouldAllowLocalhost();
+ _checkShouldAllowLocalhost(txn);
}
}
diff --git a/src/mongo/db/auth/authz_session_external_state_d.h b/src/mongo/db/auth/authz_session_external_state_d.h
index f5b2c82cd03..0df26d507c7 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.h
+++ b/src/mongo/db/auth/authz_session_external_state_d.h
@@ -47,7 +47,7 @@ namespace mongo {
virtual bool shouldIgnoreAuthChecks() const;
- virtual void startRequest();
+ virtual void startRequest(OperationContext* txn);
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_mock.h b/src/mongo/db/auth/authz_session_external_state_mock.h
index c884654ac91..0acc98366ea 100644
--- a/src/mongo/db/auth/authz_session_external_state_mock.h
+++ b/src/mongo/db/auth/authz_session_external_state_mock.h
@@ -62,7 +62,7 @@ namespace mongo {
_allowLocalhostReturnValue = returnValue;
}
- virtual void startRequest() {}
+ virtual void startRequest(OperationContext* txn) {}
private:
bool _ignoreAuthChecksReturnValue;
diff --git a/src/mongo/db/auth/authz_session_external_state_s.cpp b/src/mongo/db/auth/authz_session_external_state_s.cpp
index 14801eae945..4009670c6c4 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_s.cpp
@@ -43,8 +43,8 @@ namespace mongo {
AuthzSessionExternalStateServerCommon(authzManager) {}
AuthzSessionExternalStateMongos::~AuthzSessionExternalStateMongos() {}
- void AuthzSessionExternalStateMongos::startRequest() {
- _checkShouldAllowLocalhost();
+ void AuthzSessionExternalStateMongos::startRequest(OperationContext* txn) {
+ _checkShouldAllowLocalhost(txn);
}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_s.h b/src/mongo/db/auth/authz_session_external_state_s.h
index 6672957ced4..777082faadc 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.h
+++ b/src/mongo/db/auth/authz_session_external_state_s.h
@@ -45,7 +45,7 @@ namespace mongo {
AuthzSessionExternalStateMongos(AuthorizationManager* authzManager);
virtual ~AuthzSessionExternalStateMongos();
- virtual void startRequest();
+ virtual void startRequest(OperationContext* txn);
};
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
index b5c6f6a4bc3..a62b472e5ab 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
@@ -49,7 +49,7 @@ namespace {
_allowLocalhost(enableLocalhostAuthBypass) {}
AuthzSessionExternalStateServerCommon::~AuthzSessionExternalStateServerCommon() {}
- void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost() {
+ void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(OperationContext* txn) {
if (!_authzManager->isAuthEnabled())
return;
// If we know that an admin user exists, don't re-check.
@@ -61,7 +61,7 @@ namespace {
return;
}
- _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments();
+ _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments(txn);
if (_allowLocalhost) {
ONCE {
log() << "note: no users configured in admin.system.users, allowing localhost "
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.h b/src/mongo/db/auth/authz_session_external_state_server_common.h
index f6e1a97f4a9..59599a6befd 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.h
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.h
@@ -53,7 +53,7 @@ namespace mongo {
// Checks whether or not localhost connections should be given full access and stores the
// result in _allowLocalhost. Currently localhost connections are only given full access
// if there are no users in the admin database.
- virtual void _checkShouldAllowLocalhost();
+ void _checkShouldAllowLocalhost(OperationContext* txn);
private:
diff --git a/src/mongo/db/catalog/collection_cursor_cache.cpp b/src/mongo/db/catalog/collection_cursor_cache.cpp
index 926baf7a137..766bb3cd4a0 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.cpp
+++ b/src/mongo/db/catalog/collection_cursor_cache.cpp
@@ -98,11 +98,11 @@ namespace mongo {
/**
* works globally
*/
- bool eraseCursor( CursorId id, bool checkAuth );
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
void appendStats( BSONObjBuilder& builder );
- std::size_t timeoutCursors( int millisSinceLastCall );
+ std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
int64_t nextSeed();
@@ -159,7 +159,7 @@ namespace mongo {
_idToNS.erase( id );
}
- bool GlobalCursorIdCache::eraseCursor(CursorId id, bool checkAuth) {
+ bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
string ns;
{
SimpleMutex::scoped_lock lk( _mutex );
@@ -175,8 +175,8 @@ namespace mongo {
if ( checkAuth ) {
AuthorizationSession* as = cc().getAuthorizationSession();
- bool isAuthorized = as->isAuthorizedForActionsOnNamespace(nss,
- ActionType::killCursors);
+ bool isAuthorized = as->isAuthorizedForActionsOnNamespace(
+ nss, ActionType::killCursors);
if ( !isAuthorized ) {
audit::logKillCursorsAuthzCheck( currentClient.get(),
nss,
@@ -186,8 +186,8 @@ namespace mongo {
}
}
- Lock::DBRead lock( ns );
- Database* db = dbHolder().get( ns, storageGlobalParams.dbpath );
+ Lock::DBRead lock(txn->lockState(), ns);
+ Database* db = dbHolder().get(ns, storageGlobalParams.dbpath);
if ( !db )
return false;
Client::Context context( ns, db );
@@ -204,7 +204,7 @@ namespace mongo {
return collection->cursorCache()->eraseCursor( id, checkAuth );
}
- std::size_t GlobalCursorIdCache::timeoutCursors( int millisSinceLastCall ) {
+ std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
vector<string> todo;
{
SimpleMutex::scoped_lock lk( _mutex );
@@ -216,7 +216,7 @@ namespace mongo {
for ( unsigned i = 0; i < todo.size(); i++ ) {
const string& ns = todo[i];
- Lock::DBRead lock( ns );
+ Lock::DBRead lock(txn->lockState(), ns);
Database* db = dbHolder().get( ns, storageGlobalParams.dbpath );
if ( !db )
continue;
@@ -235,25 +235,25 @@ namespace mongo {
// ---
- std::size_t CollectionCursorCache::timeoutCursorsGlobal( int millisSinceLastCall ) {
- return _globalCursorIdCache.timeoutCursors( millisSinceLastCall );
+ std::size_t CollectionCursorCache::timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall) {;
+ return _globalCursorIdCache.timeoutCursors(txn, millisSinceLastCall);
}
- int CollectionCursorCache::eraseCursorGlobalIfAuthorized(int n, long long* ids) {
+ int CollectionCursorCache::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, long long* ids) {
int numDeleted = 0;
for ( int i = 0; i < n; i++ ) {
- if ( eraseCursorGlobalIfAuthorized( ids[i] ) )
+ if ( eraseCursorGlobalIfAuthorized(txn, ids[i] ) )
numDeleted++;
if ( inShutdown() )
break;
}
return numDeleted;
}
- bool CollectionCursorCache::eraseCursorGlobalIfAuthorized(CursorId id) {
- return _globalCursorIdCache.eraseCursor( id, true );
+ bool CollectionCursorCache::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
+ return _globalCursorIdCache.eraseCursor(txn, id, true);
}
- bool CollectionCursorCache::eraseCursorGlobal( CursorId id ) {
- return _globalCursorIdCache.eraseCursor( id, false );
+ bool CollectionCursorCache::eraseCursorGlobal(OperationContext* txn, CursorId id) {
+ return _globalCursorIdCache.eraseCursor(txn, id, false );
}
diff --git a/src/mongo/db/catalog/collection_cursor_cache.h b/src/mongo/db/catalog/collection_cursor_cache.h
index bc057def73f..d08800d4d7b 100644
--- a/src/mongo/db/catalog/collection_cursor_cache.h
+++ b/src/mongo/db/catalog/collection_cursor_cache.h
@@ -109,15 +109,15 @@ namespace mongo {
// ----------------------
- static int eraseCursorGlobalIfAuthorized( int n, long long* ids );
- static bool eraseCursorGlobalIfAuthorized( CursorId id );
+ static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, long long* ids);
+ static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
- static bool eraseCursorGlobal( CursorId id );
+ static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
/**
* @return number timed out
*/
- static std::size_t timeoutCursorsGlobal( int millisSinceLastCall );
+ static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
private:
CursorId _allocateCursorId_inlock();
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 3516cc06a09..aeded6b196b 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -41,8 +41,7 @@
namespace mongo {
- Database* DatabaseHolder::getOrCreate( const string& ns, const string& path, bool& justCreated ) {
- OperationContextImpl txn; // TODO get rid of this once reads require transactions
+ Database* DatabaseHolder::getOrCreate(OperationContext* txn, const string& ns, const string& path, bool& justCreated) {
string dbname = _todb( ns );
{
SimpleMutex::scoped_lock lk(_m);
@@ -74,7 +73,7 @@ namespace mongo {
cc().writeHappened();
// this locks _m for defensive checks, so we don't want to be locked right here :
- Database *db = new Database( &txn, dbname.c_str() , justCreated , path );
+ Database *db = new Database(txn, dbname.c_str(), justCreated, path);
{
SimpleMutex::scoped_lock lk(_m);
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index a2901926db7..ad40b8601c4 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -82,7 +82,10 @@ namespace mongo {
return 0;
}
- Database* getOrCreate( const std::string& ns , const std::string& path , bool& justCreated );
+ Database* getOrCreate(OperationContext* txn,
+ const std::string& ns,
+ const std::string& path,
+ bool& justCreated);
void erase( const std::string& ns , const std::string& path ) {
SimpleMutex::scoped_lock lk(_m);
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 21dd08f9b92..97e5ba1c71e 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/repl/rs.h"
#include "mongo/db/storage_options.h"
#include "mongo/s/chunk_version.h"
@@ -191,14 +192,13 @@ namespace mongo {
/** "read lock, and set my context, all in one operation"
* This handles (if not recursively locked) opening an unopened database.
*/
- Client::ReadContext::ReadContext(const string& ns,
- const std::string& path,
- bool doVersion) {
+ Client::ReadContext::ReadContext(
+ OperationContext* txn, const string& ns, bool doVersion) {
{
- lk.reset( new Lock::DBRead(ns) );
- Database *db = dbHolder().get(ns, path);
+ _lk.reset(new Lock::DBRead(txn->lockState(), ns));
+ Database *db = dbHolder().get(ns, storageGlobalParams.dbpath);
if( db ) {
- c.reset( new Context(path, ns, db, doVersion) );
+ _c.reset(new Context(storageGlobalParams.dbpath, ns, db, doVersion));
return;
}
}
@@ -209,17 +209,18 @@ namespace mongo {
if( Lock::isW() ) {
// write locked already
DEV RARELY log() << "write locked on ReadContext construction " << ns << endl;
- c.reset(new Context(ns, path, doVersion));
+ _c.reset(new Context(ns, storageGlobalParams.dbpath, doVersion));
}
else if( !Lock::nested() ) {
- lk.reset(0);
+ _lk.reset(0);
{
Lock::GlobalWrite w;
- Context c(ns, path, doVersion);
+ Context c(ns, storageGlobalParams.dbpath, doVersion);
}
+
// db could be closed at this interim point -- that is ok, we will throw, and don't mind throwing.
- lk.reset( new Lock::DBRead(ns) );
- c.reset(new Context(ns, path, doVersion));
+ _lk.reset(new Lock::DBRead(txn->lockState(), ns));
+ _c.reset(new Context(ns, storageGlobalParams.dbpath, doVersion));
}
else {
uasserted(15928, str::stream() << "can't open a database from a nested read lock " << ns);
@@ -231,9 +232,10 @@ namespace mongo {
// it would be easy to first check that there is at least a .ns file, or something similar.
}
- Client::WriteContext::WriteContext(const string& ns, const std::string& path, bool doVersion)
- : _lk( ns ) ,
- _c(ns, path, doVersion) {
+ Client::WriteContext::WriteContext(
+ OperationContext* opCtx, const std::string& ns, bool doVersion)
+ : _lk(opCtx->lockState(), ns),
+ _c(ns, storageGlobalParams.dbpath, doVersion) {
}
@@ -279,7 +281,8 @@ namespace mongo {
uassert(14031, "Can't take a write lock while out of disk space", false);
}
- _db = dbHolderUnchecked().getOrCreate( _ns , _path , _justCreated );
+ OperationContextImpl txn; // TODO get rid of this once reads require transactions
+ _db = dbHolderUnchecked().getOrCreate(&txn, _ns, _path, _justCreated);
verify(_db);
if( _doVersion ) checkNotStale();
massert( 16107 , str::stream() << "Don't have a lock on: " << _ns , Lock::atLeastReadLocked( _ns ) );
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 6d4179c506a..17beba12098 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -146,13 +146,13 @@ namespace mongo {
*/
class ReadContext : boost::noncopyable {
public:
- ReadContext(const std::string& ns,
- const std::string& path=storageGlobalParams.dbpath,
+ ReadContext(OperationContext* txn,
+ const std::string& ns,
bool doVersion = true);
- Context& ctx() { return *c.get(); }
+ Context& ctx() { return *_c.get(); }
private:
- scoped_ptr<Lock::DBRead> lk;
- scoped_ptr<Context> c;
+ scoped_ptr<Lock::DBRead> _lk;
+ scoped_ptr<Context> _c;
};
/* Set database we want to use, then, restores when we finish (are out of scope)
@@ -219,10 +219,9 @@ namespace mongo {
class WriteContext : boost::noncopyable {
public:
- WriteContext(const std::string& ns,
- const std::string& path=storageGlobalParams.dbpath,
- bool doVersion = true);
+ WriteContext(OperationContext* opCtx, const std::string& ns, bool doVersion = true);
Context& ctx() { return _c; }
+
private:
Lock::DBWrite _lk;
Context _c;
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 628c4e110b7..3bac1b29535 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/db.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/kill_current_op.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/repl/rs.h"
#include "mongo/db/repl/write_concern.h"
@@ -219,8 +220,10 @@ namespace mongo {
Client& client = cc();
Timer t;
const int Secs = 4;
- while ( ! inShutdown() ) {
- cursorStatsTimedOut.increment( CollectionCursorCache::timeoutCursorsGlobal( t.millisReset() ) );
+ while (!inShutdown()) {
+ OperationContextImpl txn;
+ cursorStatsTimedOut.increment(
+ CollectionCursorCache::timeoutCursorsGlobal(&txn, t.millisReset()));
sleepsecs(Secs);
}
client.shutdown();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index d1b97e9be3a..519a1f6b9e8 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -301,7 +301,7 @@ namespace mongo {
bool copyIndexes,
bool logForRepl) {
- Client::WriteContext ctx(ns);
+ Client::WriteContext ctx(txn, ns);
// config
string temp = ctx.ctx().db()->name() + ".system.namespaces";
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index ef3c2ea2ab9..37c72610627 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -127,7 +127,7 @@ namespace mongo {
// operations are applied. We are already locked globally at this point, so taking
// a DBWrite on the namespace creates a nested lock, and yields are disallowed for
// operations that hold a nested lock.
- Lock::DBWrite lk(ns);
+ Lock::DBWrite lk(txn->lockState(), ns);
invariant(Lock::nested());
Client::Context ctx(ns);
diff --git a/src/mongo/db/commands/auth_schema_upgrade_d.cpp b/src/mongo/db/commands/auth_schema_upgrade_d.cpp
index 3a52792615e..4f63bacd173 100644
--- a/src/mongo/db/commands/auth_schema_upgrade_d.cpp
+++ b/src/mongo/db/commands/auth_schema_upgrade_d.cpp
@@ -147,7 +147,7 @@ namespace {
if (!status.isOK())
return appendCommandStatus(result, status);
- status = authzManager->upgradeSchema(maxSteps, writeConcern);
+ status = authzManager->upgradeSchema(txn, maxSteps, writeConcern);
if (status.isOK())
result.append("done", true);
return appendCommandStatus(result, status);
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 651ecb6cca9..144ea90b01b 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -160,7 +160,7 @@ namespace mongo {
if (mechanism.empty()) {
mechanism = "MONGODB-CR";
}
- Status status = _authenticate(mechanism, user, cmdObj);
+ Status status = _authenticate(txn, mechanism, user, cmdObj);
audit::logAuthentication(ClientBasic::getCurrent(),
mechanism,
user,
@@ -184,22 +184,24 @@ namespace mongo {
return true;
}
- Status CmdAuthenticate::_authenticate(const std::string& mechanism,
+ Status CmdAuthenticate::_authenticate(OperationContext* txn,
+ const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj) {
if (mechanism == "MONGODB-CR") {
- return _authenticateCR(user, cmdObj);
+ return _authenticateCR(txn, user, cmdObj);
}
#ifdef MONGO_SSL
if (mechanism == "MONGODB-X509") {
- return _authenticateX509(user, cmdObj);
+ return _authenticateX509(txn, user, cmdObj);
}
#endif
return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
}
- Status CmdAuthenticate::_authenticateCR(const UserName& user, const BSONObj& cmdObj) {
+ Status CmdAuthenticate::_authenticateCR(
+ OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
if (user == internalSecurity.user->getName() &&
serverGlobalParams.clusterAuthMode.load() ==
@@ -246,7 +248,7 @@ namespace mongo {
}
User* userObj;
- Status status = getGlobalAuthorizationManager()->acquireUser(user, &userObj);
+ Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
if (!status.isOK()) {
// Failure to find the privilege document indicates no-such-user, a fact that we do not
// wish to reveal to the client. So, we return AuthenticationFailed rather than passing
@@ -275,7 +277,7 @@ namespace mongo {
AuthorizationSession* authorizationSession =
ClientBasic::getCurrent()->getAuthorizationSession();
- status = authorizationSession->addAndAuthorizeUser(user);
+ status = authorizationSession->addAndAuthorizeUser(txn, user);
if (!status.isOK()) {
return status;
}
@@ -317,7 +319,8 @@ namespace mongo {
return true;
}
- Status CmdAuthenticate::_authenticateX509(const UserName& user, const BSONObj& cmdObj) {
+ Status CmdAuthenticate::_authenticateX509(
+ OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
if (!getSSLManager()) {
return Status(ErrorCodes::ProtocolError,
"SSL support is required for the MONGODB-X509 mechanism.");
@@ -356,7 +359,7 @@ namespace mongo {
return Status(ErrorCodes::BadValue,
_x509AuthenticationDisabledMessage);
}
- Status status = authorizationSession->addAndAuthorizeUser(user);
+ Status status = authorizationSession->addAndAuthorizeUser(txn, user);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h
index 4ccfb464aa7..e6b0dd87ab5 100644
--- a/src/mongo/db/commands/authentication_commands.h
+++ b/src/mongo/db/commands/authentication_commands.h
@@ -71,11 +71,14 @@ namespace mongo {
* mechanism, and ProtocolError, indicating an error in the use of the authentication
* protocol.
*/
- Status _authenticate(const std::string& mechanism,
+ Status _authenticate(OperationContext* txn,
+ const std::string& mechanism,
const UserName& user,
const BSONObj& cmdObj);
- Status _authenticateCR(const UserName& user, const BSONObj& cmdObj);
- Status _authenticateX509(const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateCR(
+ OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateX509(
+ OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
bool _clusterIdMatch(const std::string& subjectName, const std::string& srvSubjectName);
};
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index b75ee5ceb69..2e367e3dfaf 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -118,7 +118,7 @@ namespace mongo {
set<string> clonedColls;
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context context( dbname );
Cloner cloner;
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index d2a909f4af4..0186a17b643 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -153,7 +153,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(dbname);
Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true );
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 3355e222570..1002ec82ffd 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -140,7 +140,7 @@ namespace mongo {
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- Lock::DBWrite lk(ns.ns());
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(ns);
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 0db7a7950bb..db0025f43b2 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -155,8 +155,8 @@ namespace mongo {
// SERVER-4328 todo lock just the two db's not everything for the fromself case
scoped_ptr<Lock::ScopedLock> lk( fromSelf ?
- static_cast<Lock::ScopedLock*>( new Lock::GlobalWrite() ) :
- static_cast<Lock::ScopedLock*>( new Lock::DBWrite( todb ) ) );
+ static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite()) :
+ static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb)));
Cloner cloner;
string username = cmdObj.getStringField( "username" );
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 2519bc75cc4..841f9522ace 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -57,7 +57,7 @@ namespace mongo {
ActionSet actions;
actions.addAction(ActionType::createIndex);
Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if ( client->getAuthorizationSession()->isAuthorizedForPrivilege(p) )
+ if (client->getAuthorizationSession()->isAuthorizedForPrivilege(p))
return Status::OK();
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
@@ -132,9 +132,7 @@ namespace mongo {
// as many calls are ensureIndex (and hence no-ops), this is good so its a shared
// lock for common calls. We only take write lock if needed.
// Note: createIndexes command does not currently respect shard versioning.
- Client::ReadContext readContext( ns,
- storageGlobalParams.dbpath,
- false /* doVersion */ );
+ Client::ReadContext readContext(txn, ns, false /* doVersion */);
const Collection* collection = readContext.ctx().db()->getCollection( ns.ns() );
if ( collection ) {
for ( size_t i = 0; i < specs.size(); i++ ) {
@@ -164,9 +162,7 @@ namespace mongo {
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- Client::WriteContext writeContext( ns.ns(),
- storageGlobalParams.dbpath,
- false /* doVersion */ );
+ Client::WriteContext writeContext(txn, ns.ns(), false /* doVersion */ );
Database* db = writeContext.ctx().db();
Collection* collection = db->getCollection( txn, ns.ns() );
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index cddac188c94..3899e7f522d 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -145,7 +145,7 @@ namespace mongo {
list<string> colls;
const string ns = parseNs(dbname, cmdObj);
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Database* db = ctx.ctx().db();
if ( db )
db->getDatabaseCatalogEntry()->getCollectionNamespaces( &colls );
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index bc05c80195a..4c3a6bb5955 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -90,7 +90,7 @@ namespace mongo {
long long nscannedObjects = 0; // full objects looked at
long long n = 0; // matches
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index ab9144b7c96..c68aede5bc3 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -93,7 +93,7 @@ namespace mongo {
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder);
if (ok && !fromRepl)
repl::logOp(txn, "c",(dbname + ".$cmd").c_str(), jsobj);
@@ -220,7 +220,7 @@ namespace mongo {
MONGO_TLOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(toDeleteNs);
Collection* collection = ctx.db()->getCollection( toDeleteNs );
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 79b93f14e28..37d777b2da3 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -93,7 +93,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(ns);
return runNoDirectClient( txn, ns ,
@@ -133,7 +133,7 @@ namespace mongo {
BSONObjBuilder& result,
string& errmsg) {
- Lock::DBWrite lk( ns );
+ Lock::DBWrite lk(txn->lockState(), ns);
Client::Context cx( ns );
Collection* collection = cx.db()->getCollection( txn, ns );
@@ -325,7 +325,7 @@ namespace mongo {
}
}
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(ns);
BSONObj out = db.findOne(ns, q, fields);
diff --git a/src/mongo/db/commands/geonear.cpp b/src/mongo/db/commands/geonear.cpp
index 5976d8a4e3d..ebbdd6efd69 100644
--- a/src/mongo/db/commands/geonear.cpp
+++ b/src/mongo/db/commands/geonear.cpp
@@ -76,7 +76,7 @@ namespace mongo {
return false;
}
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Database* db = ctx.ctx().db();
if ( !db ) {
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 6e80bbef1f6..94920e4d61e 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -254,7 +254,7 @@ namespace mongo {
finalize = p["finalize"]._asCode();
const string ns = parseNs(dbname, jsobj);
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
return group( ctx.ctx().db() , ns , q ,
key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeDataUnsafe() ,
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 60346f782c4..efc92edd3a3 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -124,7 +124,7 @@ namespace mongo {
string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = parseNs(dbname, cmdObj);
- Status status = runIndexFilterCommand(ns, cmdObj, &result);
+ Status status = runIndexFilterCommand(txn, ns, cmdObj, &result);
if (!status.isOK()) {
addStatus(status, result);
@@ -144,8 +144,9 @@ namespace mongo {
ss << helpText;
}
- Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client, const std::string& dbname,
- const BSONObj& cmdObj) {
+ Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
AuthorizationSession* authzSession = client->getAuthorizationSession();
ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
@@ -159,9 +160,12 @@ namespace mongo {
ListFilters::ListFilters() : IndexFilterCommand("planCacheListFilters",
"Displays index filters for all query shapes in a collection.") { }
- Status ListFilters::runIndexFilterCommand(const string& ns, BSONObj& cmdObj, BSONObjBuilder* bob) {
+ Status ListFilters::runIndexFilterCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* unused;
@@ -218,9 +222,12 @@ namespace mongo {
"Clears index filter for a single query shape or, "
"if the query shape is omitted, all filters for the collection.") { }
- Status ClearFilters::runIndexFilterCommand(const string& ns, BSONObj& cmdObj, BSONObjBuilder* bob) {
+ Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* planCache;
@@ -306,9 +313,12 @@ namespace mongo {
SetFilter::SetFilter() : IndexFilterCommand("planCacheSetFilter",
"Sets index filter for a query shape. Overrides existing filter.") { }
- Status SetFilter::runIndexFilterCommand(const string& ns, BSONObj& cmdObj, BSONObjBuilder* bob) {
+ Status SetFilter::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
// This is a read lock. The query settings is owned by the collection.
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
QuerySettings* querySettings;
PlanCache* planCache;
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index a08ddd816db..f6ba8fa9efb 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -76,7 +76,8 @@ namespace mongo {
* One action type defined for index filter commands:
* - planCacheIndexFilter
*/
- virtual Status checkAuthForCommand(ClientBasic* client, const std::string& dbname,
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
const BSONObj& cmdObj);
/**
@@ -85,7 +86,9 @@ namespace mongo {
* Should contain just enough logic to invoke run*Command() function
* in query_settings.h
*/
- virtual Status runIndexFilterCommand(const std::string& ns, BSONObj& cmdObj,
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
private:
@@ -102,7 +105,10 @@ namespace mongo {
public:
ListFilters();
- virtual Status runIndexFilterCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
* Looks up index filters from collection's query settings.
@@ -121,7 +127,10 @@ namespace mongo {
public:
ClearFilters();
- virtual Status runIndexFilterCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
* If query shape is provided, clears index filter for a query.
@@ -149,7 +158,10 @@ namespace mongo {
public:
SetFilter();
- virtual Status runIndexFilterCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
* Sets index filter for a query shape.
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index af99dfa6818..97c5d1fb9b5 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -342,7 +342,7 @@ namespace mongo {
if (_useIncremental) {
// Create the inc collection and make sure we have index on "0" key.
// Intentionally not replicating the inc collection to secondaries.
- Client::WriteContext incCtx( _config.incLong );
+ Client::WriteContext incCtx(_txn, _config.incLong);
Collection* incColl = incCtx.ctx().db()->getCollection( _txn, _config.incLong );
if ( !incColl ) {
CollectionOptions options;
@@ -364,7 +364,7 @@ namespace mongo {
{
// copy indexes into temporary storage
- Client::WriteContext finalCtx( _config.outputOptions.finalNamespace );
+ Client::WriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
Collection* finalColl =
finalCtx.ctx().db()->getCollection( _config.outputOptions.finalNamespace );
if ( finalColl ) {
@@ -392,7 +392,7 @@ namespace mongo {
{
// create temp collection and insert the indexes from temporary storage
- Client::WriteContext tempCtx( _config.tempNamespace );
+ Client::WriteContext tempCtx(_txn, _config.tempNamespace);
Collection* tempColl = tempCtx.ctx().db()->getCollection( _txn, _config.tempNamespace );
if ( !tempColl ) {
CollectionOptions options;
@@ -559,7 +559,7 @@ namespace mongo {
_safeCount(_db, _config.tempNamespace, BSONObj()));
auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
while ( cursor->more() ) {
- Lock::DBWrite lock( _config.outputOptions.finalNamespace );
+ Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace);
BSONObj o = cursor->nextSafe();
Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
_txn->recoveryUnit()->commitIfNeeded();
@@ -619,7 +619,7 @@ namespace mongo {
void State::insert( const string& ns , const BSONObj& o ) {
verify( _onDisk );
- Client::WriteContext ctx( ns );
+ Client::WriteContext ctx(_txn, ns );
Collection* coll = ctx.ctx().db()->getCollection( ns );
if ( !coll )
uasserted(13630, str::stream() << "attempted to insert into nonexistent" <<
@@ -645,7 +645,7 @@ namespace mongo {
void State::_insertToInc( BSONObj& o ) {
verify( _onDisk );
- Client::WriteContext ctx( _config.incLong );
+ Client::WriteContext ctx(_txn, _config.incLong );
Collection* coll = ctx.ctx().db()->getCollection( _config.incLong );
if ( !coll )
uasserted(13631, str::stream() << "attempted to insert into nonexistent"
@@ -921,7 +921,7 @@ namespace mongo {
BSONObj sortKey = BSON( "0" << 1 );
{
- Client::WriteContext incCtx( _config.incLong );
+ Client::WriteContext incCtx(_txn, _config.incLong );
Collection* incColl = incCtx.ctx().db()->getCollection( _config.incLong );
bool foundIndex = false;
@@ -940,7 +940,7 @@ namespace mongo {
verify( foundIndex );
}
- scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(_config.incLong));
+ scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(_txn, _config.incLong));
BSONObj prev;
BSONList all;
@@ -989,7 +989,7 @@ namespace mongo {
// reduce a finalize array
finalReduce( all );
- ctx.reset(new Client::ReadContext(_config.incLong));
+ ctx.reset(new Client::ReadContext(_txn, _config.incLong));
all.clear();
prev = o;
@@ -1005,7 +1005,7 @@ namespace mongo {
ctx.reset();
// reduce and finalize last array
finalReduce( all );
- ctx.reset(new Client::ReadContext(_config.incLong));
+ ctx.reset(new Client::ReadContext(_txn, _config.incLong));
pm.finished();
}
@@ -1060,7 +1060,7 @@ namespace mongo {
if ( ! _onDisk )
return;
- Lock::DBWrite kl(_config.incLong);
+ Lock::DBWrite kl(_txn->lockState(), _config.incLong);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
BSONList& all = i->second;
@@ -1216,7 +1216,7 @@ namespace mongo {
// Prevent sharding state from changing during the MR.
auto_ptr<RangePreserver> rangePreserver;
{
- Client::ReadContext ctx(config.ns);
+ Client::ReadContext ctx(txn, config.ns);
Collection* collection = ctx.ctx().db()->getCollection( config.ns );
if ( collection )
rangePreserver.reset(new RangePreserver(collection));
@@ -1278,7 +1278,7 @@ namespace mongo {
// We've got a cursor preventing migrations off, now re-establish our useful cursor
// Need lock and context to use it
- scoped_ptr<Lock::DBRead> lock(new Lock::DBRead(config.ns));
+ scoped_ptr<Lock::DBRead> lock(new Lock::DBRead(txn->lockState(), config.ns));
// This context does no version check, safe b/c we checked earlier and have an
// open cursor
@@ -1340,7 +1340,7 @@ namespace mongo {
ctx.reset();
lock.reset();
state.reduceAndSpillInMemoryStateIfNeeded();
- lock.reset(new Lock::DBRead(config.ns));
+ lock.reset(new Lock::DBRead(txn->lockState(), config.ns));
ctx.reset(new Client::Context(config.ns, storageGlobalParams.dbpath, false));
reduceTime += t.micros();
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 061175638cd..593912cf0d4 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -165,7 +165,7 @@ namespace mongo {
NamespaceString ns( dbname, cmdObj[name].String() );
- Client::ReadContext ctx(ns.ns());
+ Client::ReadContext ctx(txn, ns.ns());
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( ns );
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index 981e04c26a5..a600be68787 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -96,7 +96,7 @@ namespace mongo {
const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
for ( ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i ) {
if ( all || cmdObj.hasElement( i->first.c_str() ) ) {
- i->second->append( result, i->second->name() );
+ i->second->append(txn, result, i->second->name() );
}
}
@@ -177,7 +177,7 @@ namespace mongo {
}
if ( s == 0 )
- j->second->append( result, "was" );
+ j->second->append(txn, result, "was" );
Status status = j->second->set( e );
if ( status.isOK() ) {
@@ -203,7 +203,7 @@ namespace mongo {
public:
LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
- virtual void append(BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
b << name << logger::globalLogDomain()->getMinimumLogSeverity().toInt();
}
@@ -257,7 +257,8 @@ namespace mongo {
}
}
- virtual void append(BSONObjBuilder& b, const std::string& name) {
+ virtual void append(
+ OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
b << name << sslModeStr();
}
@@ -324,7 +325,8 @@ namespace mongo {
}
}
- virtual void append(BSONObjBuilder& b, const std::string& name) {
+ virtual void append(
+ OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
b << name << clusterAuthModeStr();
}
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index ea373a92d1c..a43d77eeda0 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -317,7 +317,7 @@ namespace {
// sharding version that we synchronize on here. This is also why we always need to
// create a ClientCursor even when we aren't outputting to a cursor. See the comment
// on ShardFilterStage for more details.
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection(ns);
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index f7e97c68072..a8d32641646 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -118,7 +118,7 @@ namespace mongo {
string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = parseNs(dbname, cmdObj);
- Status status = runPlanCacheCommand(ns, cmdObj, &result);
+ Status status = runPlanCacheCommand(txn, ns, cmdObj, &result);
if (!status.isOK()) {
addStatus(status, result);
@@ -138,7 +138,8 @@ namespace mongo {
ss << helpText;
}
- Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client, const std::string& dbname,
+ Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
const BSONObj& cmdObj) {
AuthorizationSession* authzSession = client->getAuthorizationSession();
ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
@@ -206,10 +207,12 @@ namespace mongo {
"Displays all query shapes in a collection.",
ActionType::planCacheRead) { }
- Status PlanCacheListQueryShapes::runPlanCacheCommand(const string& ns, BSONObj& cmdObj,
+ Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
Status status = getPlanCache(ctx.db(), ns, &planCache);
@@ -252,10 +255,12 @@ namespace mongo {
"Drops one or all cached queries in a collection.",
ActionType::planCacheWrite) { }
- Status PlanCacheClear::runPlanCacheCommand(const string& ns, BSONObj& cmdObj,
+ Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob) {
// This is a read lock. The query cache is owned by the collection.
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
Status status = getPlanCache(ctx.db(), ns, &planCache);
@@ -322,9 +327,11 @@ namespace mongo {
"Displays the cached plans for a query shape.",
ActionType::planCacheRead) { }
- Status PlanCacheListPlans::runPlanCacheCommand(const string& ns, BSONObj& cmdObj,
+ Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob) {
- Client::ReadContext readCtx(ns);
+ Client::ReadContext readCtx(txn, ns);
Client::Context& ctx = readCtx.ctx();
PlanCache* planCache;
Status status = getPlanCache(ctx.db(), ns, &planCache);
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index 8e7eb9667a4..507d4fe4927 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -72,7 +72,8 @@ namespace mongo {
* - planCacheRead
* - planCacheWrite
*/
- virtual Status checkAuthForCommand(ClientBasic* client, const std::string& dbname,
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
const BSONObj& cmdObj);
/**
* Subset of command arguments used by plan cache commands
@@ -80,7 +81,9 @@ namespace mongo {
* Should contain just enough logic to invoke run*Command() function
* in plan_cache.h
*/
- virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj,
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob) = 0;
/**
@@ -103,7 +106,10 @@ namespace mongo {
class PlanCacheListQueryShapes : public PlanCacheCommand {
public:
PlanCacheListQueryShapes();
- virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
* Looks up cache keys for collection's plan cache.
@@ -126,7 +132,10 @@ namespace mongo {
class PlanCacheClear : public PlanCacheCommand {
public:
PlanCacheClear();
- virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
* Clears collection's plan cache.
@@ -149,7 +158,9 @@ namespace mongo {
class PlanCacheListPlans : public PlanCacheCommand {
public:
PlanCacheListPlans();
- virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj,
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
BSONObjBuilder* bob);
/**
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 006a86d3677..7265270beef 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -62,7 +62,7 @@ namespace mongo {
string ns = dbname + "." + coll;
BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
- Lock::DBWrite lk(ns);
+ Lock::DBWrite lk(txn->lockState(), ns);
Client::Context ctx( ns );
Database* db = ctx.db();
Collection* collection = db->getCollection( ns );
@@ -140,7 +140,7 @@ namespace mongo {
int n = cmdObj.getIntField( "n" );
bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
- Client::WriteContext ctx( nss.ns() );
+ Client::WriteContext ctx(txn, nss.ns() );
Collection* collection = ctx.ctx().db()->getCollection( nss.ns() );
massert( 13417, "captrunc collection not found or empty", collection);
@@ -185,7 +185,7 @@ namespace mongo {
uassert( 13428, "emptycapped must specify a collection", !coll.empty() );
NamespaceString nss( dbname, coll );
- Client::WriteContext ctx( nss.ns() );
+ Client::WriteContext ctx(txn, nss.ns() );
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( nss.ns() );
massert( 13429, "emptycapped no such collection", collection );
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index 841a738abf2..ec2fc972659 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -104,7 +104,7 @@ namespace mongo {
return false;
}
- Client::ReadContext context( nss.ns() );
+ Client::ReadContext context(txn, nss.ns());
Database* db = context.ctx().db();
Collection* collection = db->getCollection( nss.ns() );
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 40999d0cbe5..4682f9a055b 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -118,12 +118,13 @@ namespace mongo {
return Status::OK();
}
- static Status getCurrentUserRoles(AuthorizationManager* authzManager,
+ static Status getCurrentUserRoles(OperationContext* txn,
+ AuthorizationManager* authzManager,
const UserName& userName,
unordered_set<RoleName>* roles) {
User* user;
authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
- Status status = authzManager->acquireUser(userName, &user);
+ Status status = authzManager->acquireUser(txn, userName, &user);
if (!status.isOK()) {
return status;
}
@@ -265,9 +266,10 @@ namespace mongo {
return Status::OK();
}
- static Status requireAuthSchemaVersion26Final(AuthorizationManager* authzManager) {
+ static Status requireAuthSchemaVersion26Final(OperationContext* txn,
+ AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(&foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -282,9 +284,10 @@ namespace mongo {
return authzManager->writeAuthSchemaVersionIfNeeded();
}
- static Status requireAuthSchemaVersion26UpgradeOrFinal(AuthorizationManager* authzManager) {
+ static Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
+ AuthorizationManager* authzManager) {
int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(&foundSchemaVersion);
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
if (!status.isOK()) {
return status;
}
@@ -434,7 +437,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -579,7 +582,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -668,7 +671,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -754,7 +757,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -836,7 +839,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -856,7 +859,7 @@ namespace mongo {
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -934,7 +937,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -954,7 +957,7 @@ namespace mongo {
UserName userName(userNameString, dbname);
unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(authzManager, userName, &userRoles);
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1049,7 +1052,8 @@ namespace mongo {
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
+ getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1068,7 +1072,7 @@ namespace mongo {
for (size_t i = 0; i < args.userNames.size(); ++i) {
BSONObj userDetails;
status = getGlobalAuthorizationManager()->getUserDescription(
- args.userNames[i], &userDetails);
+ txn, args.userNames[i], &userDetails);
if (status.code() == ErrorCodes::UserNotFound) {
continue;
}
@@ -1107,7 +1111,7 @@ namespace mongo {
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
int authzVersion;
- Status status = authzManager->getAuthorizationVersion(&authzVersion);
+ Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1252,7 +1256,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1369,7 +1373,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1459,7 +1463,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1595,7 +1599,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1755,7 +1759,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1851,7 +1855,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -1970,7 +1974,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- Status status = requireAuthSchemaVersion26Final(authzManager);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2146,7 +2150,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2292,7 +2296,8 @@ namespace mongo {
return appendCommandStatus(result, status);
}
- status = requireAuthSchemaVersion26UpgradeOrFinal(getGlobalAuthorizationManager());
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
+ getGlobalAuthorizationManager());
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -2840,7 +2845,7 @@ namespace mongo {
Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- status = requireAuthSchemaVersion26Final(authzManager);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 28d06c9ae05..9b539a8b954 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -75,7 +75,7 @@ namespace mongo {
MONGO_TLOG(0) << "CMD: validate " << ns << endl;
}
- Client::ReadContext ctx(ns_string.ns());
+ Client::ReadContext ctx(txn, ns_string.ns());
Database* db = ctx.ctx().db();
if ( !db ) {
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index a6332a50288..b147446a93d 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -902,7 +902,7 @@ namespace mongo {
}
invariant(!_context.get());
- _writeLock.reset(new Lock::DBWrite(request->getNS()));
+ _writeLock.reset(new Lock::DBWrite(txn->lockState(), request->getNS()));
if (!checkIsMasterForCollection(request->getNS(), result)) {
return false;
}
@@ -1087,7 +1087,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock( nsString.ns() );
+ Lock::DBWrite writeLock(txn->lockState(), nsString.ns());
///////////////////////////////////////////
if ( !checkShardVersion( &shardingState, *updateItem.getRequest(), result ) )
@@ -1144,7 +1144,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBWrite writeLock( nss.ns() );
+ Lock::DBWrite writeLock(txn->lockState(), nss.ns());
///////////////////////////////////////////
// Check version once we're locked
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
index 7f84d776bb1..15ab51528fe 100644
--- a/src/mongo/db/d_concurrency.cpp
+++ b/src/mongo/db/d_concurrency.cpp
@@ -583,12 +583,12 @@ namespace mongo {
lockNestable(nested);
}
- Lock::DBWrite::DBWrite( const StringData& ns )
+ Lock::DBWrite::DBWrite(LockState* lockState, const StringData& ns)
: ScopedLock( 'w' ), _what(ns.toString()), _nested(false) {
lockDB( _what );
}
- Lock::DBRead::DBRead( const StringData& ns )
+ Lock::DBRead::DBRead(LockState* lockState, const StringData& ns)
: ScopedLock( 'r' ), _what(ns.toString()), _nested(false) {
lockDB( _what );
}
diff --git a/src/mongo/db/d_concurrency.h b/src/mongo/db/d_concurrency.h
index f99cb46184f..8359f23614d 100644
--- a/src/mongo/db/d_concurrency.h
+++ b/src/mongo/db/d_concurrency.h
@@ -112,6 +112,8 @@ namespace mongo {
virtual void _tempRelease() = 0;
virtual void _relock() = 0;
+ LockState* _lockState;
+
private:
class ParallelBatchWriterSupport : boost::noncopyable {
@@ -183,7 +185,7 @@ namespace mongo {
void _relock();
public:
- DBWrite(const StringData& dbOrNs);
+ DBWrite(LockState* lockState, const StringData& dbOrNs);
virtual ~DBWrite();
private:
@@ -207,7 +209,7 @@ namespace mongo {
void _relock();
public:
- DBRead(const StringData& dbOrNs);
+ DBRead(LockState* lockState, const StringData& dbOrNs);
virtual ~DBRead();
private:
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index cd9a98b3a7c..93408e92a23 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -752,14 +752,17 @@ namespace mongo {
#ifndef _WIN32
mongo::signalForkSuccess();
#endif
+ {
+ OperationContextImpl txn;
- if(getGlobalAuthorizationManager()->isAuthEnabled()) {
- // open admin db in case we need to use it later. TODO this is not the right way to
- // resolve this.
- Client::WriteContext c("admin", storageGlobalParams.dbpath);
- }
+ if (getGlobalAuthorizationManager()->isAuthEnabled()) {
+ // open admin db in case we need to use it later. TODO this is not the right way to
+ // resolve this.
+ Client::WriteContext ctx(&txn, "admin");
+ }
- authindex::configureSystemIndexes("admin");
+ authindex::configureSystemIndexes(&txn, "admin");
+ }
getDeleter()->startWorkers();
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 3fa6918ad50..e751dbaeebb 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -353,7 +353,7 @@ namespace mongo {
// Needs to be locked exclusively, because creates the system.profile collection
// in the local database.
//
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(dbname);
BSONElement e = cmdObj.firstElement();
@@ -403,7 +403,7 @@ namespace mongo {
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(dbname);
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
@@ -457,7 +457,7 @@ namespace mongo {
return false;
}
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(nsToDrop);
Database* db = ctx.db();
@@ -528,7 +528,7 @@ namespace mongo {
// This acquires the DB read lock
//
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
string err;
int errCode;
@@ -621,7 +621,7 @@ namespace mongo {
!options["capped"].trueValue() || options["size"].isNumber() ||
options.hasField("$nExtents"));
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(ns);
// Create collection.
@@ -667,7 +667,7 @@ namespace mongo {
totalSize += size;
{
- Client::ReadContext rc( *i + ".system.namespaces" );
+ Client::ReadContext rc(txn, *i + ".system.namespaces");
b.appendBool( "empty", rc.ctx().db()->isEmpty() );
}
@@ -695,7 +695,7 @@ namespace mongo {
b.append( "sizeOnDisk" , (double)1.0 );
{
- Client::ReadContext ctx( name );
+ Client::ReadContext ctx(txn, name);
b.appendBool( "empty", ctx.ctx().db()->isEmpty() );
}
@@ -812,7 +812,7 @@ namespace mongo {
// Check shard version at startup.
// This will throw before we've done any work if shard version is outdated
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Collection* coll = ctx.ctx().db()->getCollection(ns);
CanonicalQuery* cq;
@@ -920,7 +920,7 @@ namespace mongo {
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
bool estimate = jsobj["estimate"].trueValue();
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
@@ -1037,7 +1037,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Client::ReadContext cx( ns );
+ Client::ReadContext cx(txn, ns);
Database* db = cx.ctx().db();
Collection* collection = db->getCollection( ns );
if ( !collection ) {
@@ -1114,7 +1114,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx( ns );
Collection* coll = ctx.db()->getCollection( ns );
@@ -1244,7 +1244,7 @@ namespace mongo {
const string ns = parseNs(dbname, jsobj);
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Database* d = ctx.ctx().db();
d->getStats( &result, scale );
@@ -1395,8 +1395,10 @@ namespace mongo {
std::vector<UserName> parsedUserNames;
AuthorizationSession* authSession = client.getAuthorizationSession();
bool fieldIsPresent = false;
- audit::parseAndRemoveImpersonatedUserField(cmdObj, authSession,
- &parsedUserNames, &fieldIsPresent);
+ audit::parseAndRemoveImpersonatedUserField(cmdObj,
+ authSession,
+ &parsedUserNames,
+ &fieldIsPresent);
ImpersonationSessionGuard impersonationSession(authSession,
fieldIsPresent,
parsedUserNames);
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 767bf537d1f..1a2e0c4652f 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -297,10 +297,12 @@ namespace mongo {
return kpBuilder.obj();
}
- bool findShardKeyIndexPattern( const string& ns,
- const BSONObj& shardKeyPattern,
- BSONObj* indexPattern ) {
- Client::ReadContext context( ns );
+ static bool findShardKeyIndexPattern(OperationContext* txn,
+ const string& ns,
+ const BSONObj& shardKeyPattern,
+ BSONObj* indexPattern ) {
+
+ Client::ReadContext context(txn, ns);
Collection* collection = context.ctx().db()->getCollection( ns );
if ( !collection )
return false;
@@ -332,7 +334,8 @@ namespace mongo {
// The IndexChunk has a keyPattern that may apply to more than one index - we need to
// select the index and get the full index keyPattern here.
BSONObj indexKeyPatternDoc;
- if ( !findShardKeyIndexPattern( ns,
+ if ( !findShardKeyIndexPattern( txn,
+ ns,
range.keyPattern,
&indexKeyPatternDoc ) )
{
@@ -366,7 +369,7 @@ namespace mongo {
while ( 1 ) {
// Scoping for write lock.
{
- Client::WriteContext ctx(ns);
+ Client::WriteContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( txn, ns );
if ( !collection )
break;
@@ -476,7 +479,9 @@ namespace mongo {
// Used by migration clone step
// TODO: Cannot hook up quite yet due to _trackerLocks in shared migration code.
- Status Helpers::getLocsInRange( const KeyRange& range,
+ // TODO: This function is not used outside of tests
+ Status Helpers::getLocsInRange( OperationContext* txn,
+ const KeyRange& range,
long long maxChunkSizeBytes,
set<DiskLoc>* locs,
long long* numDocs,
@@ -486,7 +491,7 @@ namespace mongo {
*estChunkSizeBytes = 0;
*numDocs = 0;
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection ) return Status( ErrorCodes::NamespaceNotFound, ns );
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index a5e9947f918..2cba18345d5 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -188,7 +188,8 @@ namespace mongo {
* @return IndexNotFound if the index pattern doesn't match any indexes
* @return InvalidLength if the estimated size exceeds maxChunkSizeBytes
*/
- static Status getLocsInRange( const KeyRange& range,
+ static Status getLocsInRange( OperationContext* txn,
+ const KeyRange& range,
long long maxChunkSizeBytes,
std::set<DiskLoc>* locs,
long long* numDocs,
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index 26a0758ed67..66fbfeeebcd 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -99,13 +99,17 @@ namespace mongo {
ss << "</pre>";
}
- void _authorizePrincipal(const UserName& userName) {
- Status status = cc().getAuthorizationSession()->addAndAuthorizeUser(userName);
+ void _authorizePrincipal(OperationContext* txn, const UserName& userName) {
+ Status status = cc().getAuthorizationSession()->addAndAuthorizeUser(txn, userName);
uassertStatusOK(status);
}
- bool allowed( const char * rq , vector<string>& headers, const SockAddr &from ) {
- if ( from.isLocalHost() || !_webUsers->haveAdminUsers() ) {
+ bool allowed(OperationContext* txn,
+ const char * rq,
+ vector<string>& headers,
+ const SockAddr &from) {
+
+ if ( from.isLocalHost() || !_webUsers->haveAdminUsers(txn) ) {
// TODO(spencer): should the above check use "&&" not "||"? Currently this is much
// more permissive than the server's localhost auth bypass.
cc().getAuthorizationSession()->grantInternalAuthorization();
@@ -131,7 +135,7 @@ namespace mongo {
User* user;
AuthorizationManager& authzManager =
cc().getAuthorizationSession()->getAuthorizationManager();
- Status status = authzManager.acquireUser(userName, &user);
+ Status status = authzManager.acquireUser(txn, userName, &user);
if (!status.isOK()) {
if (status.code() != ErrorCodes::UserNotFound) {
uasserted(17051, status.reason());
@@ -159,7 +163,7 @@ namespace mongo {
string r1 = md5simpledigest( r.str() );
if ( r1 == parms["response"] ) {
- _authorizePrincipal(userName);
+ _authorizePrincipal(txn, userName);
return true;
}
}
@@ -191,7 +195,7 @@ namespace mongo {
if ( url.size() > 1 ) {
- if ( ! allowed( rq , headers, from ) ) {
+ if (!allowed(txn.get(), rq, headers, from)) {
responseCode = 401;
headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg = "not allowed\n";
@@ -240,7 +244,7 @@ namespace mongo {
// generate home page
- if ( ! allowed( rq , headers, from ) ) {
+ if (!allowed(txn.get(), rq, headers, from)) {
responseCode = 401;
headers.push_back( "Content-Type: text/plain;charset=utf-8" );
responseMsg = "not allowed\n";
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 5446d2b5c2d..ca10066c960 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -116,7 +116,7 @@ namespace mongo {
string collName = collElt.String();
// Need a context to get the actual Collection*
- Client::ReadContext ctx(dbname);
+ Client::ReadContext ctx(txn, dbname);
// Make sure the collection is valid.
Database* db = ctx.ctx().db();
diff --git a/src/mongo/db/fts/fts_command.cpp b/src/mongo/db/fts/fts_command.cpp
index c95a6c52bb1..128b5a9f255 100644
--- a/src/mongo/db/fts/fts_command.cpp
+++ b/src/mongo/db/fts/fts_command.cpp
@@ -92,7 +92,7 @@ namespace mongo {
projection = cmdObj["project"].Obj();
}
- return _run( dbname, cmdObj, options,
+ return _run( txn, dbname, cmdObj, options,
ns, search, language, limit, filter, projection, errmsg, result );
}
diff --git a/src/mongo/db/fts/fts_command.h b/src/mongo/db/fts/fts_command.h
index 4c0ced7efea..7f6b4d7bc4d 100644
--- a/src/mongo/db/fts/fts_command.h
+++ b/src/mongo/db/fts/fts_command.h
@@ -40,6 +40,8 @@
namespace mongo {
+ class OperationContext;
+
namespace fts {
class FTSCommand : public Command {
@@ -64,7 +66,8 @@ namespace mongo {
bool fromRepl);
protected:
- bool _run( const std::string& dbName,
+ bool _run( OperationContext* txn,
+ const std::string& dbName,
BSONObj& cmdObj,
int cmdOptions,
const std::string& ns,
diff --git a/src/mongo/db/fts/fts_command_mongod.cpp b/src/mongo/db/fts/fts_command_mongod.cpp
index 230c6d00fb4..c422d9d8863 100644
--- a/src/mongo/db/fts/fts_command_mongod.cpp
+++ b/src/mongo/db/fts/fts_command_mongod.cpp
@@ -54,7 +54,8 @@ namespace mongo {
* @param fromRepl
* @return true if successful, false otherwise
*/
- bool FTSCommand::_run(const string& dbname,
+ bool FTSCommand::_run(OperationContext* txn,
+ const string& dbname,
BSONObj& cmdObj,
int cmdOptions,
const string& ns,
@@ -92,7 +93,7 @@ namespace mongo {
projBob.appendElements(sortSpec);
BSONObj projObj = projBob.obj();
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
CanonicalQuery* cq;
Status canonicalizeStatus =
diff --git a/src/mongo/db/fts/fts_command_mongos.cpp b/src/mongo/db/fts/fts_command_mongos.cpp
index 0c0f09e957b..df152856e2d 100644
--- a/src/mongo/db/fts/fts_command_mongos.cpp
+++ b/src/mongo/db/fts/fts_command_mongos.cpp
@@ -54,7 +54,8 @@ namespace mongo {
double score;
};
- bool FTSCommand::_run(const string& dbName,
+ bool FTSCommand::_run(OperationContext* txn,
+ const string& dbName,
BSONObj& cmdObj,
int cmdOptions,
const string& ns,
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index b5c61765b79..644c2ba60d2 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -71,7 +71,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int,
string& errmsg, BSONObjBuilder& result, bool fromRepl) {
const string ns = dbname + "." + cmdObj.firstElement().valuestr();
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(txn, ns);
Database* db = ctx.ctx().db();
if ( !db ) {
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 6014608300c..1e97c3da193 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -55,6 +55,8 @@ namespace mongo {
void IndexBuilder::run() {
LOG(2) << "IndexBuilder building index " << _index;
+ OperationContextImpl txn;
+
Client::initThread(name().c_str());
Lock::ParallelBatchWriterMode::iAmABatchParticipant();
@@ -62,8 +64,7 @@ namespace mongo {
cc().curop()->reset(HostAndPort(), dbInsert);
NamespaceString ns(_index["ns"].String());
- Client::WriteContext ctx(ns.getSystemIndexesCollection());
- OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns.getSystemIndexesCollection());
Database* db = dbHolder().get(ns.db().toString(), storageGlobalParams.dbpath);
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 3e45d143cab..b9c89ab8b04 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -63,7 +63,9 @@ namespace mongo {
for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
dbName < dbNames.end();
dbName++) {
- Client::ReadContext ctx(*dbName);
+ OperationContextImpl txn;
+ Client::ReadContext ctx(&txn, *dbName);
+
Database* db = ctx.ctx().db();
db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
}
@@ -88,10 +90,11 @@ namespace mongo {
LOG(3) << "IndexRebuilder::checkNS: " << ns;
+ OperationContextImpl txn; // XXX???
+
// This write lock is held throughout the index building process
// for this namespace.
- Client::WriteContext ctx(ns);
- OperationContextImpl txn; // XXX???
+ Client::WriteContext ctx(&txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( collection == NULL )
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 613165ff4d2..3e2566bde51 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -182,6 +182,7 @@ namespace mongo {
DbMessage d(m);
QueryMessage q(d);
BSONObj obj;
+
const bool isAuthorized = cc().getAuthorizationSession()->isAuthorizedForActionsOnResource(
ResourcePattern::forClusterResource(), ActionType::killop);
audit::logKillOpAuthzCheck(&cc(),
@@ -210,6 +211,7 @@ namespace mongo {
bool _unlockFsync();
void unlockFsync(const char *ns, Message& m, DbResponse &dbresponse) {
BSONObj obj;
+
const bool isAuthorized = cc().getAuthorizationSession()->isAuthorizedForActionsOnResource(
ResourcePattern::forClusterResource(), ActionType::unlock);
audit::logFsyncUnlockAuthzCheck(
@@ -341,7 +343,7 @@ namespace mongo {
Client& c = cc();
if (!c.isGod())
- c.getAuthorizationSession()->startRequest();
+ c.getAuthorizationSession()->startRequest(txn);
if ( op == dbQuery ) {
if( strstr(ns, ".$cmd") ) {
@@ -528,7 +530,7 @@ namespace mongo {
verify( n < 30000 );
}
- int found = CollectionCursorCache::eraseCursorGlobalIfAuthorized(n, (long long *) x);
+ int found = CollectionCursorCache::eraseCursorGlobalIfAuthorized(txn, n, (long long *) x);
if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1)) || found != n ) {
LOG( found == n ? 1 : 0 ) << "killcursors: found " << found << " of " << n << endl;
@@ -603,7 +605,7 @@ namespace mongo {
UpdateExecutor executor(&request, &op.debug());
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(ns.ns());
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
// if this ever moves to outside of lock, need to adjust check
// Client::Context::_finishInit
@@ -643,7 +645,7 @@ namespace mongo {
request.setUpdateOpLog(true);
DeleteExecutor executor(&request);
uassertStatusOK(executor.prepare());
- Lock::DBWrite lk(ns.ns());
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
// if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
@@ -717,7 +719,7 @@ namespace mongo {
// because it may now be out of sync with the client's iteration state.
// SERVER-7952
// TODO Temporary code, see SERVER-4563 for a cleanup overview.
- CollectionCursorCache::eraseCursorGlobal( cursorid );
+ CollectionCursorCache::eraseCursorGlobal(txn, cursorid );
}
ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );
ok = false;
@@ -880,7 +882,7 @@ namespace mongo {
uassertStatusOK(status);
}
- Lock::DBWrite lk(ns);
+ Lock::DBWrite lk(txn->lockState(), ns);
// CONCURRENCY TODO: is being read locked in big log sufficient here?
// writelock is used to synchronize stepdowns w/ writes
@@ -924,7 +926,7 @@ namespace mongo {
local database does NOT count except for rsoplog collection.
used to set the hasData field on replset heartbeat command response
*/
- bool replHasDatabases() {
+ bool replHasDatabases(OperationContext* txn) {
vector<string> names;
getDatabaseNames(names);
if( names.size() >= 2 ) return true;
@@ -933,7 +935,7 @@ namespace mongo {
return true;
// we have a local database. return true if oplog isn't empty
{
- Lock::DBRead lk(repl::rsoplog);
+ Lock::DBRead lk(txn->lockState(), repl::rsoplog);
BSONObj o;
if( Helpers::getFirst(repl::rsoplog, o) )
return true;
@@ -1002,7 +1004,9 @@ namespace {
}
void DBDirectClient::killCursor( long long id ) {
- CollectionCursorCache::eraseCursorGlobal( id );
+ // The killCursor command on the DB client is only used by sharding,
+ // so no need to have it for MongoD.
+ verify(!"killCursor should not be used in MongoD");
}
HostAndPort DBDirectClient::_clientHost = HostAndPort( "0.0.0.0" , 0 );
@@ -1013,7 +1017,9 @@ namespace {
<< " to zero in query: " << query << endl;
skip = 0;
}
- Lock::DBRead lk( ns );
+
+ OperationContextImpl txn;
+ Lock::DBRead lk(txn.lockState(), ns);
string errmsg;
int errCode;
long long res = runCount( ns, _countCmd( ns , query , options , limit , skip ) , errmsg, errCode );
diff --git a/src/mongo/db/instance.h b/src/mongo/db/instance.h
index ff8e655b608..b7039b30c4c 100644
--- a/src/mongo/db/instance.h
+++ b/src/mongo/db/instance.h
@@ -79,7 +79,7 @@ namespace mongo {
/* returns true if there is no data on this server. useful when starting replication.
local database does NOT count.
*/
- bool replHasDatabases();
+ bool replHasDatabases(OperationContext* txn);
/**
* Embedded calls to the local server using the DBClientBase API without going over the network.
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 79894bf88c7..ff9004ab6e8 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -137,7 +137,7 @@ namespace {
try {
// NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
// we're sometimes inside the lock already
- Lock::DBWrite lk( currentOp.getNS() );
+ Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
if (dbHolder()._isLoaded(nsToDatabase(currentOp.getNS()), storageGlobalParams.dbpath)) {
Client::Context cx(currentOp.getNS(), storageGlobalParams.dbpath, false);
_profile(txn, c, cx.db(),
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 87131cbc17b..70ac51d2000 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -78,7 +78,7 @@ namespace mongo {
// We have already validated the sharding version when we constructed the Runner
// so we shouldn't check it again.
- Lock::DBRead lk(_ns);
+ Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns);
Client::Context ctx(_ns, storageGlobalParams.dbpath, /*doVersion=*/false);
_runner->restoreState(pExpCtx->opCtx);
@@ -199,7 +199,7 @@ namespace {
Status explainStatus(ErrorCodes::InternalError, "");
scoped_ptr<TypeExplain> plan;
{
- Lock::DBRead lk(_ns);
+ Lock::DBRead lk(pExpCtx->opCtx->lockState(), _ns);
Client::Context ctx(_ns, storageGlobalParams.dbpath, /*doVersion=*/false);
massert(17392, "No _runner. Were we disposed before explained?",
_runner);
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index df8a6716987..4409b899c4c 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -64,7 +64,7 @@ namespace {
}
bool isCapped(const NamespaceString& ns) {
- Client::ReadContext ctx(ns.ns());
+ Client::ReadContext ctx(_ctx->opCtx, ns.ns());
Collection* collection = ctx.ctx().db()->getCollection(ns);
return collection && collection->isCapped();
}
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 7634b2d862a..36ad06ef964 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -55,8 +55,12 @@ namespace mongo {
"repl.preload.docs",
&prefetchDocStats );
+ void prefetchIndexPages(Collection* collection, const BSONObj& obj);
+ void prefetchRecordPages(OperationContext* txn, const char* ns, const BSONObj& obj);
+
+
// prefetch for an oplog operation
- void prefetchPagesForReplicatedOp(Database* db, const BSONObj& op) {
+ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op) {
const char *opField;
const char *opType = op.getStringField("op");
switch (*opType) {
@@ -110,10 +114,11 @@ namespace mongo {
// do not prefetch the data for capped collections because
// they typically do not have an _id index for findById() to use.
!collection->isCapped()) {
- prefetchRecordPages(ns, obj);
+ prefetchRecordPages(txn, ns, obj);
}
}
+ // page in pages needed for all index lookups on a given object
void prefetchIndexPages(Collection* collection, const BSONObj& obj) {
DiskLoc unusedDl; // unused
BSONObjSet unusedKeys;
@@ -170,8 +175,8 @@ namespace mongo {
}
}
-
- void prefetchRecordPages(const char* ns, const BSONObj& obj) {
+ // page in the data pages for a record associated with an object
+ void prefetchRecordPages(OperationContext* txn, const char* ns, const BSONObj& obj) {
BSONElement _id;
if( obj.getObjectID(_id) ) {
TimerHolder timer(&prefetchDocStats);
@@ -181,7 +186,7 @@ namespace mongo {
try {
// we can probably use Client::Context here instead of ReadContext as we
// have locked higher up the call stack already
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns);
if( Helpers::findById(ctx.ctx().db(), ns, builder.done(), result) ) {
// do we want to use Record::touch() here? it's pretty similar.
volatile char _dummy_char = '\0';
diff --git a/src/mongo/db/prefetch.h b/src/mongo/db/prefetch.h
index 3e97753da23..36310b40676 100644
--- a/src/mongo/db/prefetch.h
+++ b/src/mongo/db/prefetch.h
@@ -33,13 +33,8 @@
namespace mongo {
class Collection;
class Database;
+ class OperationContext;
// page in both index and data pages for an op from the oplog
- void prefetchPagesForReplicatedOp(Database* db, const BSONObj& op);
-
- // page in pages needed for all index lookups on a given object
- void prefetchIndexPages(Collection *nsd, const BSONObj& obj);
-
- // page in the data pages for a record associated with an object
- void prefetchRecordPages(const char *ns, const BSONObj& obj);
+ void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op);
}
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index d78032fed07..7ed222b9f06 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -152,7 +152,7 @@ namespace mongo {
exhaust = false;
// This is a read lock.
- scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(ns));
+ scoped_ptr<Client::ReadContext> ctx(new Client::ReadContext(txn, ns));
Collection* collection = ctx->ctx().db()->getCollection(ns);
uassert( 17356, "collection dropped between getMore calls", collection );
@@ -459,7 +459,7 @@ namespace mongo {
// This is a read lock. We require this because if we're parsing a $where, the
// where-specific parsing code assumes we have a lock and creates execution machinery that
// requires it.
- Client::ReadContext ctx(q.ns);
+ Client::ReadContext ctx(txn, q.ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
// Parse the qm into a CanonicalQuery.
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index fd90b65c289..90cce2fcb4c 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -232,7 +232,10 @@ namespace mongo {
_stats->incPendingDeletes_inlock();
}
- _env->getCursorIds(ns, &toDelete->cursorsToWait);
+ {
+ boost::scoped_ptr<OperationContext> txn(transactionFactory());
+ _env->getCursorIds(txn.get(), ns, &toDelete->cursorsToWait);
+ }
{
scoped_lock sl(_queueMutex);
@@ -284,7 +287,7 @@ namespace mongo {
}
set<CursorId> cursorsToWait;
- _env->getCursorIds(ns, &cursorsToWait);
+ _env->getCursorIds(txn, ns, &cursorsToWait);
long long checkIntervalMillis = 5;
@@ -295,7 +298,7 @@ namespace mongo {
while (!cursorsToWait.empty()) {
set<CursorId> cursorsNow;
- _env->getCursorIds(ns, &cursorsNow);
+ _env->getCursorIds(txn, ns, &cursorsNow);
set<CursorId> cursorsLeft;
std::set_intersection(cursorsToWait.begin(),
@@ -438,7 +441,11 @@ namespace mongo {
RangeDeleteEntry* entry = *iter;
set<CursorId> cursorsNow;
- _env->getCursorIds(entry->ns, &cursorsNow);
+ {
+ boost::scoped_ptr<OperationContext> txn(
+ entry->transactionFactory()); // XXX?
+ _env->getCursorIds(txn.get(), entry->ns, &cursorsNow);
+ }
set<CursorId> cursorsLeft;
std::set_intersection(entry->cursorsToWait.begin(),
diff --git a/src/mongo/db/range_deleter.h b/src/mongo/db/range_deleter.h
index 9a3b8c6b1fa..15da3560513 100644
--- a/src/mongo/db/range_deleter.h
+++ b/src/mongo/db/range_deleter.h
@@ -304,7 +304,9 @@ namespace mongo {
* Must be a synchronous call. CursorIds should be populated after call.
* Must not throw exception.
*/
- virtual void getCursorIds(const StringData& ns, std::set<CursorId>* openCursors) = 0;
+ virtual void getCursorIds(OperationContext* txn,
+ const StringData& ns,
+ std::set<CursorId>* openCursors) = 0;
};
} // namespace mongo
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 0a99f21cc55..39e0c81a465 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -156,9 +156,10 @@ namespace mongo {
return true;
}
- void RangeDeleterDBEnv::getCursorIds(const StringData& ns,
+ void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
+ const StringData& ns,
std::set<CursorId>* openCursors) {
- Client::ReadContext ctx(ns.toString());
+ Client::ReadContext ctx(txn, ns.toString());
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection )
return;
diff --git a/src/mongo/db/range_deleter_db_env.h b/src/mongo/db/range_deleter_db_env.h
index 9708fa49495..c5956db5b98 100644
--- a/src/mongo/db/range_deleter_db_env.h
+++ b/src/mongo/db/range_deleter_db_env.h
@@ -62,6 +62,8 @@ namespace mongo {
/**
* Gets the list of open cursors on a given namespace.
*/
- virtual void getCursorIds(const StringData& ns, std::set<CursorId>* openCursors);
+ virtual void getCursorIds(OperationContext* txn,
+ const StringData& ns,
+ std::set<CursorId>* openCursors);
};
}
diff --git a/src/mongo/db/range_deleter_mock_env.cpp b/src/mongo/db/range_deleter_mock_env.cpp
index 2b40a3e7bc6..29877a1a252 100644
--- a/src/mongo/db/range_deleter_mock_env.cpp
+++ b/src/mongo/db/range_deleter_mock_env.cpp
@@ -139,7 +139,8 @@ namespace mongo {
return true;
}
- void RangeDeleterMockEnv::getCursorIds(const StringData& ns, set<CursorId>* in) {
+ void RangeDeleterMockEnv::getCursorIds(
+ OperationContext* txn, const StringData& ns, set<CursorId>* in) {
{
scoped_lock sl(_cursorMapMutex);
const set<CursorId>& _cursors = _cursorMap[ns.toString()];
diff --git a/src/mongo/db/range_deleter_mock_env.h b/src/mongo/db/range_deleter_mock_env.h
index ce9457889d6..b1164ef830b 100644
--- a/src/mongo/db/range_deleter_mock_env.h
+++ b/src/mongo/db/range_deleter_mock_env.h
@@ -139,7 +139,7 @@ namespace mongo {
* RangeDeleterEnv::getCursorIds. The cursors returned can be modified with
* the setCursorId and clearCursorMap methods.
*/
- void getCursorIds(const StringData& ns, std::set<CursorId>* in);
+ void getCursorIds(OperationContext* txn, const StringData& ns, std::set<CursorId>* in);
private:
// mutex acquisition ordering:
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 72620d77609..52ecec48a95 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -318,7 +318,7 @@ namespace mongo {
Database* tempDatabase = NULL;
{
bool justCreated = false;
- tempDatabase = dbHolderW().getOrCreate( dbName, reservedPathString, justCreated );
+ tempDatabase = dbHolderW().getOrCreate(txn, dbName, reservedPathString, justCreated);
invariant( justCreated );
}
diff --git a/src/mongo/db/repl/heartbeat.cpp b/src/mongo/db/repl/heartbeat.cpp
index 006998a57ef..5f1f1179aca 100644
--- a/src/mongo/db/repl/heartbeat.cpp
+++ b/src/mongo/db/repl/heartbeat.cpp
@@ -112,7 +112,7 @@ namespace repl {
result.append("rs", true);
if( cmdObj["checkEmpty"].trueValue() ) {
- result.append("hasData", replHasDatabases());
+ result.append("hasData", replHasDatabases(txn));
}
if( (theReplSet == 0) || (theReplSet->startupStatus == ReplSetImpl::LOADINGCONFIG) ) {
string from( cmdObj.getStringField("from") );
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 48226902b95..ff2ff4921f5 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -60,7 +60,7 @@
namespace mongo {
namespace repl {
- void pretouchOperation(const BSONObj& op);
+ void pretouchOperation(OperationContext* txn, const BSONObj& op);
void pretouchN(vector<BSONObj>&, unsigned a, unsigned b);
/* if 1 sync() is running */
@@ -162,8 +162,8 @@ namespace repl {
void ReplSource::ensureMe() {
string myname = getHostName();
{
- Client::WriteContext ctx("local");
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, "local");
// local.me is an identifier for a server for getLastError w:2+
if (!Helpers::getSingleton("local.me", _me) ||
!_me.hasField("host") ||
@@ -560,6 +560,8 @@ namespace repl {
if ( !only.empty() && only != clientName )
return;
+ OperationContextImpl txn; // XXX?
+
if (replSettings.pretouch &&
!alreadyLocked/*doesn't make sense if in write lock already*/) {
if (replSettings.pretouch > 1) {
@@ -588,18 +590,17 @@ namespace repl {
a += m;
}
// we do one too...
- pretouchOperation(op);
+ pretouchOperation(&txn, op);
tp->join();
countdown = v.size();
}
}
else {
- pretouchOperation(op);
+ pretouchOperation(&txn, op);
}
}
scoped_ptr<Lock::GlobalWrite> lk( alreadyLocked ? 0 : new Lock::GlobalWrite() );
- OperationContextImpl txn; // XXX?
if ( replAllDead ) {
// hmmm why is this check here and not at top of this function? does it get set between top and here?
@@ -679,7 +680,7 @@ namespace repl {
int get() const { return _value; }
- virtual void append( BSONObjBuilder& b, const string& name ) {
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
b.append( name, _value );
}
@@ -1276,7 +1277,7 @@ namespace repl {
}
}
- void pretouchOperation(const BSONObj& op) {
+ void pretouchOperation(OperationContext* txn, const BSONObj& op) {
if( Lock::somethingWriteLocked() )
return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
@@ -1299,7 +1300,7 @@ namespace repl {
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns );
if( Helpers::findById(ctx.ctx().db(), ns, b.done(), result) )
_dummy_z += result.objsize(); // touch
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 5996e180401..474416dd250 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -113,8 +113,8 @@ namespace repl {
todo : make _logOpRS() call this so we don't repeat ourself?
*/
void _logOpObjRS(const BSONObj& op) {
- Lock::DBWrite lk("local");
OperationContextImpl txn;
+ Lock::DBWrite lk(txn.lockState(), "local");
const OpTime ts = op["ts"]._opTime();
long long h = op["h"].numberLong();
@@ -231,7 +231,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk1("local");
+ Lock::DBWrite lk1(txn->lockState(), "local");
if ( strncmp(ns, "local.", 6) == 0 ) {
if ( strncmp(ns, "local.slaves", 12) == 0 )
@@ -321,7 +321,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBWrite lk("local");
+ Lock::DBWrite lk(txn->lockState(), "local");
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
if ( strncmp(ns, "local.", 6) == 0 ) {
diff --git a/src/mongo/db/repl/repl_set_impl.cpp b/src/mongo/db/repl/repl_set_impl.cpp
index 01f78853e04..5274e461f07 100644
--- a/src/mongo/db/repl/repl_set_impl.cpp
+++ b/src/mongo/db/repl/repl_set_impl.cpp
@@ -435,7 +435,8 @@ namespace {
}
void ReplSetImpl::loadLastOpTimeWritten(bool quiet) {
- Lock::DBRead lk(rsoplog);
+ OperationContextImpl txn; // XXX?
+ Lock::DBRead lk(txn.lockState(), rsoplog);
BSONObj o;
if (Helpers::getLast(rsoplog, o)) {
lastH = o["h"].numberLong();
@@ -445,7 +446,8 @@ namespace {
}
OpTime ReplSetImpl::getEarliestOpTimeWritten() const {
- Lock::DBRead lk(rsoplog);
+ OperationContextImpl txn; // XXX?
+ Lock::DBRead lk(txn.lockState(), rsoplog);
BSONObj o;
uassert(17347, "Problem reading earliest entry from oplog", Helpers::getFirst(rsoplog, o));
return o["ts"]._opTime();
@@ -859,19 +861,20 @@ namespace {
const BSONObj ReplSetImpl::_initialSyncFlag(BSON(_initialSyncFlagString << true));
void ReplSetImpl::clearInitialSyncFlag() {
- Lock::DBWrite lk("local");
OperationContextImpl txn; // XXX?
+ Lock::DBWrite lk(txn.lockState(), "local");
Helpers::putSingleton(&txn, "local.replset.minvalid", BSON("$unset" << _initialSyncFlag));
}
void ReplSetImpl::setInitialSyncFlag() {
- Lock::DBWrite lk("local");
OperationContextImpl txn; // XXX?
+ Lock::DBWrite lk(txn.lockState(), "local");
Helpers::putSingleton(&txn, "local.replset.minvalid", BSON("$set" << _initialSyncFlag));
}
bool ReplSetImpl::getInitialSyncFlag() {
- Lock::DBRead lk ("local");
+ OperationContextImpl txn; // XXX?
+ Lock::DBRead lk (txn.lockState(), "local");
BSONObj mv;
if (Helpers::getSingleton("local.replset.minvalid", mv)) {
return mv[_initialSyncFlagString].trueValue();
@@ -884,13 +887,15 @@ namespace {
BSONObjBuilder subobj(builder.subobjStart("$set"));
subobj.appendTimestamp("ts", obj["ts"].date());
subobj.done();
- Lock::DBWrite lk("local");
+
OperationContextImpl txn; // XXX?
+ Lock::DBWrite lk(txn.lockState(), "local");
Helpers::putSingleton(&txn, "local.replset.minvalid", builder.obj());
}
OpTime ReplSetImpl::getMinValid() {
- Lock::DBRead lk("local.replset.minvalid");
+ OperationContextImpl txn; // XXX?
+ Lock::DBRead lk(txn.lockState(), "local.replset.minvalid");
BSONObj mv;
if (Helpers::getSingleton("local.replset.minvalid", mv)) {
return mv["ts"]._opTime();
diff --git a/src/mongo/db/repl/repl_settings.cpp b/src/mongo/db/repl/repl_settings.cpp
index e8cb54d8728..48dad4218cc 100644
--- a/src/mongo/db/repl/repl_settings.cpp
+++ b/src/mongo/db/repl/repl_settings.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/master_slave.h"
#include "mongo/db/repl/oplogreader.h"
#include "mongo/db/repl/rs.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/wire_version.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -55,7 +56,7 @@ namespace repl {
return replSettings.slave || replSettings.master || theReplSet;
}
- void appendReplicationInfo(BSONObjBuilder& result, int level) {
+ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) {
if ( replSet ) {
if( theReplSet == 0 || theReplSet->state().shunned() ) {
result.append("ismaster", false);
@@ -88,7 +89,7 @@ namespace repl {
list<BSONObj> src;
{
const char* localSources = "local.sources";
- Client::ReadContext ctx(localSources, storageGlobalParams.dbpath);
+ Client::ReadContext ctx(txn, localSources);
auto_ptr<Runner> runner(InternalPlanner::collectionScan(localSources,
ctx.ctx().db()->getCollection(localSources)));
BSONObj obj;
@@ -151,7 +152,9 @@ namespace repl {
int level = configElement.numberInt();
BSONObjBuilder result;
- appendReplicationInfo(result, level);
+
+ OperationContextImpl txn; // XXX?
+ appendReplicationInfo(&txn, result, level);
return result.obj();
}
} replicationInfoServerStatus;
@@ -196,7 +199,7 @@ namespace repl {
if ( cmdObj["forShell"].trueValue() )
lastError.disableForCommand();
- appendReplicationInfo(result, 0);
+ appendReplicationInfo(txn, result, 0);
result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes);
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 798269c7c29..ee32241039d 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -171,7 +171,7 @@ namespace repl {
}
}
- virtual void append( BSONObjBuilder& b, const string& name ) {
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
b.append( name, _value() );
}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index 0e29c52e323..da705f14659 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -28,8 +28,6 @@
* it in the license file.
*/
-#include "mongo/pch.h"
-
#include <boost/algorithm/string.hpp>
#include "mongo/db/dbhelpers.h"
@@ -82,8 +80,8 @@ namespace repl {
log() << "replSet info saving a newer config version to local.system.replset: "
<< newConfigBSON << rsLog;
{
- Client::WriteContext cx( rsConfigNs );
OperationContextImpl txn;
+ Client::WriteContext cx(&txn, rsConfigNs);
//theReplSet->lastOpTimeWritten = ??;
//rather than above, do a logOp()? probably
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index f3120fb246a..32d9d7e6e3c 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -99,8 +99,8 @@ namespace repl {
else
sethbmsg( str::stream() << "initial sync cloning indexes for : " << db , 0);
- Client::WriteContext ctx(db);
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, db);
string err;
int errCode;
@@ -130,8 +130,9 @@ namespace repl {
void _logOpObjRS(const BSONObj& op);
static void emptyOplog() {
- Client::WriteContext ctx(rsoplog);
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, rsoplog);
+
Collection* collection = ctx.ctx().db()->getCollection(rsoplog);
// temp
@@ -321,7 +322,8 @@ namespace repl {
log() << "replSet cleaning up [1]" << rsLog;
{
- Client::WriteContext cx( "local." );
+ OperationContextImpl txn; // XXX?
+ Client::WriteContext cx(&txn, "local.");
cx.ctx().db()->flushFiles(true);
}
log() << "replSet cleaning up [2]" << rsLog;
@@ -465,7 +467,9 @@ namespace repl {
verify( !box.getState().primary() ); // wouldn't make sense if we were.
{
- Client::WriteContext cx( "local." );
+ OperationContextImpl txn;
+ Client::WriteContext cx(&txn, "local.");
+
cx.ctx().db()->flushFiles(true);
try {
log() << "replSet set minValid=" << minValid["ts"]._opTime().toString() << rsLog;
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 45b24c899df..b8bc672753f 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -654,7 +654,8 @@ namespace repl {
// check that we are at minvalid, otherwise we cannot rollback as we may be in an
// inconsistent state
{
- Lock::DBRead lk("local.replset.minvalid");
+ OperationContextImpl txn;
+ Lock::DBRead lk(txn.lockState(), "local.replset.minvalid");
BSONObj mv;
if (Helpers::getSingleton("local.replset.minvalid", mv)) {
OpTime minvalid = mv["ts"]._opTime();
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 8a8ac1da5f9..d30eb517898 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -66,8 +66,9 @@ namespace repl {
void SyncSourceFeedback::ensureMe() {
string myname = getHostName();
{
- Client::WriteContext ctx("local");
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, "local");
+
// local.me is an identifier for a server for getLastError w:2+
if (!Helpers::getSingleton("local.me", _me) ||
!_me.hasField("host") ||
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 4526c4af3d3..92eea631595 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -94,6 +94,7 @@ namespace repl {
bool isCommand(op["op"].valuestrsafe()[0] == 'c');
+ OperationContextImpl txn;
boost::scoped_ptr<Lock::ScopedLock> lk;
if(isCommand) {
@@ -102,11 +103,10 @@ namespace repl {
lk.reset(new Lock::GlobalWrite());
} else {
// DB level lock for this operation
- lk.reset(new Lock::DBWrite(ns));
+ lk.reset(new Lock::DBWrite(txn.lockState(), ns));
}
Client::Context ctx(ns, storageGlobalParams.dbpath);
- OperationContextImpl txn;
ctx.getClient()->curop()->reset();
// For non-initial-sync, we convert updates to upserts
// to suppress errors when replaying oplog entries.
@@ -126,8 +126,9 @@ namespace repl {
try {
// one possible tweak here would be to stay in the read lock for this database
// for multiple prefetches if they are for the same database.
- Client::ReadContext ctx(ns);
- prefetchPagesForReplicatedOp(ctx.ctx().db(), op);
+ OperationContextImpl txn;
+ Client::ReadContext ctx(&txn, ns);
+ prefetchPagesForReplicatedOp(&txn, ctx.ctx().db(), op);
}
catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl;
@@ -475,7 +476,9 @@ namespace repl {
void SyncTail::applyOpsToOplog(std::deque<BSONObj>* ops) {
{
- Lock::DBWrite lk("local");
+ OperationContextImpl txn; // XXX?
+ Lock::DBWrite lk(txn.lockState(), "local");
+
while (!ops->empty()) {
const BSONObj& op = ops->front();
// this updates theReplSet->lastOpTimeWritten
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index 1e9e2708efa..657dc627d16 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -260,9 +260,9 @@ namespace mongo {
} restHandler;
- bool RestAdminAccess::haveAdminUsers() const {
+ bool RestAdminAccess::haveAdminUsers(OperationContext* txn) const {
AuthorizationSession* authzSession = cc().getAuthorizationSession();
- return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments();
+ return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments(txn);
}
class LowLevelMongodStatus : public WebStatusPlugin {
diff --git a/src/mongo/db/restapi.h b/src/mongo/db/restapi.h
index e170e740b20..d73103ab785 100644
--- a/src/mongo/db/restapi.h
+++ b/src/mongo/db/restapi.h
@@ -43,7 +43,7 @@ namespace mongo {
public:
virtual ~RestAdminAccess() { }
- virtual bool haveAdminUsers() const;
+ virtual bool haveAdminUsers(OperationContext* txn) const;
};
} // namespace mongo
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index c4feb946ad5..9c281e4499c 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -39,6 +39,7 @@
namespace mongo {
class ServerParameterSet;
+ class OperationContext;
/**
* Lets you make server level settings easily configurable.
@@ -66,7 +67,7 @@ namespace mongo {
bool allowedToChangeAtRuntime() const { return _allowedToChangeAtRuntime; }
- virtual void append( BSONObjBuilder& b, const std::string& name ) = 0;
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name ) = 0;
virtual Status set( const BSONElement& newValueElement ) = 0;
@@ -114,7 +115,7 @@ namespace mongo {
_value( value ) {}
virtual ~ExportedServerParameter() {}
- virtual void append( BSONObjBuilder& b, const std::string& name ) {
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
b.append( name, *_value );
}
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index 212b31dbacc..c2a6f56156c 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -30,6 +30,7 @@
#include "mongo/unittest/unittest.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/server_parameters.h"
namespace mongo {
@@ -68,7 +69,10 @@ namespace mongo {
ASSERT_EQUALS( "c", v[2] );
BSONObjBuilder b;
- vv.append( b, vv.name() );
+
+ OperationContextNoop txn;
+ vv.append(&txn, b, vv.name());
+
BSONObj y = b.obj();
ASSERT( x.firstElement().woCompare( y.firstElement(), false ) == 0 );
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index be47f16d3d3..c497c8ed7e4 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -113,9 +113,10 @@ namespace mongo {
long long n = 0;
{
- string ns = idx["ns"].String();
- Client::WriteContext ctx( ns );
+ const string ns = idx["ns"].String();
+
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns );
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection ) {
// collection was dropped
@@ -171,7 +172,8 @@ namespace mongo {
set<string> dbs;
{
- Lock::DBRead lk( "local" );
+ OperationContextImpl txn; // XXX?
+ Lock::DBRead lk(txn.lockState(), "local");
dbHolder().getAllShortNames( dbs );
}
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index e47e67ea4ea..194cc048046 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -1,3 +1,5 @@
+// client.cpp
+
/*
* Copyright (C) 2010 10gen Inc.
*
@@ -26,16 +28,13 @@
* then also delete it in the license file.
*/
-// client.cpp
-
-#include "mongo/pch.h"
-
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
-#include "mongo/db/d_concurrency.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/dbtests/dbtests.h"
+
namespace ClientTests {
class Base {
@@ -123,8 +122,8 @@ namespace ClientTests {
public:
BuildIndex() : Base("buildIndex") {}
void run() {
- Lock::DBWrite lock(ns());
- Client::WriteContext ctx(ns());
+ OperationContextNoop txn;
+ Client::WriteContext ctx(&txn, ns());
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 1ff84d735f8..729443e2835 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -41,13 +41,16 @@
namespace CountTests {
class Base {
+ OperationContextImpl _txn;
Lock::DBWrite lk;
+
Client::Context _context;
+
Database* _database;
Collection* _collection;
- OperationContextImpl _txn;
+
public:
- Base() : lk(ns()), _context( ns() ) {
+ Base() : lk(_txn.lockState(), ns()), _context( ns() ) {
_database = _context.db();
_collection = _database->getCollection( ns() );
if ( _collection ) {
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 535dab6124d..0cee60de170 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -58,8 +58,9 @@ namespace mongo {
{
// Remove _id range [_min, _max).
OperationContextImpl txn;
- Lock::DBWrite lk( ns );
+ Lock::DBWrite lk(txn.lockState(), ns);
Client::Context ctx( ns );
+
KeyRange range( ns,
BSON( "_id" << _min ),
BSON( "_id" << _max ),
@@ -112,6 +113,8 @@ namespace mongo {
TEST(DBHelperTests, FindDiskLocs) {
DBDirectClient client;
+ OperationContextImpl txn;
+
// Some unique tag we can use to make sure we're pulling back the right data
OID tag = OID::gen();
client.remove( ns, BSONObj() );
@@ -128,14 +131,15 @@ namespace mongo {
long long estSizeBytes;
{
// search _id range (0, 10)
- Lock::DBRead lk( ns );
+ Lock::DBRead lk(txn.lockState(), ns);
KeyRange range( ns,
BSON( "_id" << 0 ),
BSON( "_id" << numDocsInserted ),
BSON( "_id" << 1 ) );
- Status result = Helpers::getLocsInRange( range,
+ Status result = Helpers::getLocsInRange( &txn,
+ range,
maxSizeBytes,
&locs,
&numDocsFound,
@@ -164,6 +168,8 @@ namespace mongo {
TEST(DBHelperTests, FindDiskLocsNoIndex) {
DBDirectClient client;
+ OperationContextImpl txn;
+
client.remove( ns, BSONObj() );
client.insert( ns, BSON( "_id" << OID::gen() ) );
@@ -173,7 +179,7 @@ namespace mongo {
long long numDocsFound;
long long estSizeBytes;
{
- Lock::DBRead lk( ns );
+ Lock::DBRead lk(txn.lockState(), ns);
Client::Context ctx( ns );
// search invalid index range
@@ -182,7 +188,8 @@ namespace mongo {
BSON( "badIndex" << 10 ),
BSON( "badIndex" << 1 ) );
- Status result = Helpers::getLocsInRange( range,
+ Status result = Helpers::getLocsInRange( &txn,
+ range,
maxSizeBytes,
&locs,
&numDocsFound,
@@ -203,6 +210,8 @@ namespace mongo {
TEST(DBHelperTests, FindDiskLocsTooBig) {
DBDirectClient client;
+ OperationContextImpl txn;
+
client.remove( ns, BSONObj() );
int numDocsInserted = 10;
@@ -217,14 +226,15 @@ namespace mongo {
long long numDocsFound;
long long estSizeBytes;
{
- Lock::DBRead lk( ns );
+ Lock::DBRead lk(txn.lockState(), ns);
Client::Context ctx( ns );
KeyRange range( ns,
BSON( "_id" << 0 ),
BSON( "_id" << numDocsInserted ),
BSON( "_id" << 1 ) );
- Status result = Helpers::getLocsInRange( range,
+ Status result = Helpers::getLocsInRange( &txn,
+ range,
maxSizeBytes,
&locs,
&numDocsFound,
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 593c310eb93..652958c2efe 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -166,7 +166,7 @@ namespace DocumentSourceTests {
_registration.reset();
_runner.reset();
- Client::WriteContext ctx (ns);
+ Client::WriteContext ctx(&_opCtx, ns);
CanonicalQuery* cq;
uassertStatusOK(CanonicalQuery::canonicalize(ns, /*query=*/BSONObj(), &cq));
Runner* runnerBare;
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 8afba25af39..bf9b61cf5d3 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -31,22 +31,25 @@ namespace IndexCatalogTests {
class IndexIteratorTests {
public:
IndexIteratorTests() {
- Client::WriteContext ctx(_ns);
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, _ns);
+
_db = ctx.ctx().db();
_coll = _db->createCollection(&txn, _ns);
_catalog = _coll->getIndexCatalog();
}
~IndexIteratorTests() {
- Client::WriteContext ctx(_ns);
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, _ns);
+
_db->dropCollection(&txn, _ns);
}
void run() {
- Client::WriteContext ctx(_ns);
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, _ns);
+
int numFinishedIndexesStart = _catalog->numIndexesReady();
BSONObjBuilder b1;
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 7c2f03662f6..792c1071d3e 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -55,7 +55,7 @@ namespace IndexUpdateTests {
class IndexBuildBase {
public:
IndexBuildBase() :
- _ctx( _ns ) {
+ _ctx(&_txn, _ns) {
_client.createCollection( _ns );
}
~IndexBuildBase() {
@@ -91,8 +91,9 @@ namespace IndexUpdateTests {
return collection()->getIndexCatalog()->findIndexByName( "a_1" );
}
#endif
- Client::WriteContext _ctx;
+
OperationContextImpl _txn;
+ Client::WriteContext _ctx;
};
/** addKeysToPhaseOne() adds keys from a collection's documents to an external sorter. */
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
index 5bee238cda6..67bff7ef74b 100644
--- a/src/mongo/dbtests/matchertests.cpp
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/json.h"
#include "mongo/db/matcher/matcher.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/timer.h"
@@ -216,7 +217,9 @@ namespace MatcherTests {
class WhereSimple1 {
public:
void run() {
- Client::ReadContext ctx( "unittests.matchertests" );
+ OperationContextImpl txn;
+ Client::ReadContext ctx(&txn, "unittests.matchertests");
+
M m(BSON("$where" << "function(){ return this.a == 1; }"),
WhereCallbackReal(StringData("unittests")));
ASSERT( m.matches( BSON( "a" << 1 ) ) );
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 59d5f1bc483..5ef98e0b08b 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -164,7 +164,8 @@ namespace PdfileTests {
void run() {
SmallFilesControl c;
- Client::ReadContext ctx( "local" );
+ OperationContextImpl txn;
+ Client::ReadContext ctx(&txn, "local");
Database* db = ctx.ctx().db();
ExtentManager* em = db->getExtentManager();
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 970e59a97ca..02939707de3 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/get_runner.h"
#include "mongo/db/query/qlog.h"
#include "mongo/db/query/query_knobs.h"
@@ -45,6 +46,7 @@
#include "mongo/db/query/stage_builder.h"
#include "mongo/dbtests/dbtests.h"
+
namespace mongo {
// How we access the external setParameter testing bool.
@@ -59,7 +61,7 @@ namespace PlanRankingTests {
class PlanRankingTestBase {
public:
PlanRankingTestBase() : _internalQueryForceIntersectionPlans(internalQueryForceIntersectionPlans) {
- Client::WriteContext ctx(ns);
+ Client::WriteContext ctx(&_txn, ns);
_client.dropCollection(ns);
}
@@ -69,12 +71,12 @@ namespace PlanRankingTests {
}
void insert(const BSONObj& obj) {
- Client::WriteContext ctx(ns);
+ Client::WriteContext ctx(&_txn, ns);
_client.insert(ns, obj);
}
void addIndex(const BSONObj& obj) {
- Client::WriteContext ctx(ns);
+ Client::WriteContext ctx(&_txn, ns);
_client.ensureIndex(ns, obj);
}
@@ -85,7 +87,7 @@ namespace PlanRankingTests {
* Takes ownership of 'cq'. Caller DOES NOT own the returned QuerySolution*.
*/
QuerySolution* pickBestPlan(CanonicalQuery* cq) {
- Client::ReadContext ctx(ns);
+ Client::ReadContext ctx(&_txn, ns);
Collection* collection = ctx.ctx().db()->getCollection(ns);
QueryPlannerParams plannerParams;
@@ -135,16 +137,17 @@ namespace PlanRankingTests {
// determining the number of documents in the tests below.
static const int N;
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+
+ DBDirectClient _client;
scoped_ptr<MultiPlanStage> _mps;
// Holds the value of global "internalQueryForceIntersectionPlans" setParameter flag.
// Restored at end of test invocation regardless of test result.
bool _internalQueryForceIntersectionPlans;
};
- DBDirectClient PlanRankingTestBase::_client;
-
// static
const int PlanRankingTestBase::N = internalQueryPlanEvaluationWorks + 1000;
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index bc853298488..99d1f61d172 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -38,6 +38,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
@@ -92,7 +93,8 @@ namespace QueryMultiPlanRunner {
class MPRCollectionScanVsHighlySelectiveIXScan : public MultiPlanRunnerBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
const int N = 5000;
for (int i = 0; i < N; ++i) {
diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp
index 30d7ca0535f..3e2e1330323 100644
--- a/src/mongo/dbtests/query_single_solution_runner.cpp
+++ b/src/mongo/dbtests/query_single_solution_runner.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/single_solution_runner.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/dbtests.h"
namespace QuerySingleSolutionRunner {
@@ -147,7 +148,7 @@ namespace QuerySingleSolutionRunner {
static const char* ns() { return "unittests.QueryStageSingleSolutionRunner"; }
size_t numCursors() {
- Client::ReadContext ctx( ns() );
+ Client::ReadContext ctx(&_txn, ns() );
Collection* collection = ctx.ctx().db()->getCollection( ns() );
if ( !collection )
return 0;
@@ -155,28 +156,29 @@ namespace QuerySingleSolutionRunner {
}
void registerRunner( Runner* runner ) {
- Client::ReadContext ctx( ns() );
+ Client::ReadContext ctx(&_txn, ns());
Collection* collection = ctx.ctx().db()->getOrCreateCollection( ns() );
return collection->cursorCache()->registerRunner( runner );
}
void deregisterRunner( Runner* runner ) {
- Client::ReadContext ctx( ns() );
+ Client::ReadContext ctx(&_txn, ns());
Collection* collection = ctx.ctx().db()->getOrCreateCollection( ns() );
return collection->cursorCache()->deregisterRunner( runner );
}
+ protected:
+ OperationContextImpl _txn;
+
private:
IndexDescriptor* getIndex(Database* db, const BSONObj& obj) {
Collection* collection = db->getCollection( ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern(obj);
}
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient SingleSolutionRunnerBase::_client;
-
/**
* Test dropping the collection while the
* SingleSolutionRunner is doing a collection scan.
@@ -184,7 +186,7 @@ namespace QuerySingleSolutionRunner {
class DropCollScan : public SingleSolutionRunnerBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
insert(BSON("_id" << 1));
insert(BSON("_id" << 2));
@@ -212,7 +214,7 @@ namespace QuerySingleSolutionRunner {
class DropIndexScan : public SingleSolutionRunnerBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
insert(BSON("_id" << 1 << "a" << 6));
insert(BSON("_id" << 2 << "a" << 7));
insert(BSON("_id" << 3 << "a" << 8));
@@ -283,7 +285,7 @@ namespace QuerySingleSolutionRunner {
class SnapshotControl : public SnapshotBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
setupCollection();
BSONObj filterObj = fromjson("{a: {$gte: 2}}");
@@ -308,7 +310,7 @@ namespace QuerySingleSolutionRunner {
class SnapshotTest : public SnapshotBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
setupCollection();
BSONObj indexSpec = BSON("_id" << 1);
addIndex(indexSpec);
@@ -339,7 +341,7 @@ namespace QuerySingleSolutionRunner {
class Invalidate : public SingleSolutionRunnerBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
@@ -364,7 +366,7 @@ namespace QuerySingleSolutionRunner {
class InvalidatePinned : public SingleSolutionRunnerBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
Collection* collection = ctx.ctx().db()->getCollection(ns());
@@ -402,12 +404,12 @@ namespace QuerySingleSolutionRunner {
public:
void run() {
{
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
}
{
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
Collection* collection = ctx.ctx().db()->getCollection(ns());
BSONObj filterObj = fromjson("{_id: {$gt: 0}, b: {$gt: 0}}");
@@ -420,7 +422,7 @@ namespace QuerySingleSolutionRunner {
// There should be one cursor before timeout,
// and zero cursors after timeout.
ASSERT_EQUALS(1U, numCursors());
- CollectionCursorCache::timeoutCursorsGlobal(600001);
+ CollectionCursorCache::timeoutCursorsGlobal(&_txn, 600001);
ASSERT_EQUALS(0U, numCursors());
}
};
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 3f9d3526f2e..f1d33ff7fb8 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -109,12 +109,13 @@ namespace QueryStageAnd {
static const char* ns() { return "unittests.QueryStageAnd"; }
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient QueryStageAndBase::_client;
-
//
// Hash AND tests
//
@@ -126,12 +127,12 @@ namespace QueryStageAnd {
class QueryStageAndHashInvalidation : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -228,12 +229,12 @@ namespace QueryStageAnd {
class QueryStageAndHashInvalidateLookahead : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -314,12 +315,12 @@ namespace QueryStageAnd {
class QueryStageAndHashTwoLeaf : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -363,12 +364,12 @@ namespace QueryStageAnd {
class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
// Generate large keys for {foo: 1, big: 1} index.
@@ -415,12 +416,12 @@ namespace QueryStageAnd {
class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
// Generate large keys for {baz: 1, big: 1} index.
@@ -466,12 +467,12 @@ namespace QueryStageAnd {
class QueryStageAndHashThreeLeaf : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -527,12 +528,12 @@ namespace QueryStageAnd {
class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
// Generate large keys for {bar: 1, big: 1} index.
@@ -586,12 +587,12 @@ namespace QueryStageAnd {
class QueryStageAndHashWithNothing : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -645,12 +646,12 @@ namespace QueryStageAnd {
class QueryStageAndHashProducesNothing : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 10; ++i) {
@@ -693,12 +694,12 @@ namespace QueryStageAnd {
class QueryStageAndHashWithMatcher : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -749,12 +750,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedInvalidation : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
// Insert a bunch of data
@@ -866,12 +867,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedThreeLeaf : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
// Insert a bunch of data
@@ -919,12 +920,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedWithNothing : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
@@ -964,12 +965,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedProducesNothing : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -1012,12 +1013,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedWithMatcher : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
@@ -1057,12 +1058,12 @@ namespace QueryStageAnd {
class QueryStageAndSortedByLastChild : public QueryStageAndBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
for (int i = 0; i < 50; ++i) {
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index aa0fb3a2e65..fc30e82ad99 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -315,7 +315,7 @@ namespace QueryStageCollectionScan {
class QueryStageCollectionScanBase {
public:
QueryStageCollectionScanBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
for (int i = 0; i < numObj(); ++i) {
BSONObjBuilder bob;
@@ -325,7 +325,7 @@ namespace QueryStageCollectionScan {
}
virtual ~QueryStageCollectionScanBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
}
@@ -334,7 +334,7 @@ namespace QueryStageCollectionScan {
}
int countResults(CollectionScanParams::Direction direction, const BSONObj& filterObj) {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
// Configure the scan.
CollectionScanParams params;
@@ -384,11 +384,13 @@ namespace QueryStageCollectionScan {
static const char* ns() { return "unittests.QueryStageCollectionScan"; }
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient QueryStageCollectionScanBase::_client;
//
// Go forwards, get everything.
@@ -442,7 +444,7 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanObjectsInOrderForward : public QueryStageCollectionScanBase {
public:
void run() {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
// Configure the scan.
CollectionScanParams params;
@@ -473,7 +475,7 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanObjectsInOrderBackward : public QueryStageCollectionScanBase {
public:
void run() {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
CollectionScanParams params;
params.collection = ctx.ctx().db()->getCollection( ns() );
@@ -502,7 +504,7 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
Collection* coll = ctx.ctx().db()->getCollection( ns() );
@@ -564,7 +566,7 @@ namespace QueryStageCollectionScan {
class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
Collection* coll = ctx.ctx().db()->getCollection(ns());
// Get the DiskLocs that would be returned by an in-order scan.
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 090bf068f63..0d0dcbc0673 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/pdfile.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
@@ -51,7 +52,7 @@ namespace QueryStageCount {
CountBase() { }
virtual ~CountBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
}
@@ -92,11 +93,13 @@ namespace QueryStageCount {
static const char* ns() { return "unittests.QueryStageCount"; }
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
-
- DBDirectClient CountBase::_client;
+
//
// Check that dups are properly identified
@@ -104,7 +107,7 @@ namespace QueryStageCount {
class QueryStageCountDups : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert some docs
insert(BSON("a" << BSON_ARRAY(5 << 7)));
@@ -136,7 +139,7 @@ namespace QueryStageCount {
class QueryStageCountInclusiveBounds : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert some docs
for (int i = 0; i < 10; ++i) {
@@ -168,7 +171,7 @@ namespace QueryStageCount {
class QueryStageCountExclusiveBounds : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert some docs
for (int i = 0; i < 10; ++i) {
@@ -200,7 +203,7 @@ namespace QueryStageCount {
class QueryStageCountLowerBound : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert doc, add index
insert(BSON("a" << 2));
@@ -228,7 +231,7 @@ namespace QueryStageCount {
class QueryStageCountNothingInInterval : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
insert(BSON("a" << 2));
@@ -258,7 +261,7 @@ namespace QueryStageCount {
class QueryStageCountNothingInIntervalFirstMatchTooHigh : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert some documents, add index
insert(BSON("a" << 2));
@@ -288,7 +291,7 @@ namespace QueryStageCount {
class QueryStageCountNoChangeDuringYield : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -339,7 +342,7 @@ namespace QueryStageCount {
class QueryStageCountDeleteDuringYield : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -393,7 +396,7 @@ namespace QueryStageCount {
class QueryStageCountInsertNewDocsDuringYield : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -450,7 +453,7 @@ namespace QueryStageCount {
class QueryStageCountBecomesMultiKeyDuringYield : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
@@ -503,7 +506,7 @@ namespace QueryStageCount {
class QueryStageCountUnusedKeys : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert docs, add index
for (int i = 0; i < 10; ++i) {
@@ -538,7 +541,7 @@ namespace QueryStageCount {
class QueryStageCountUnusedEndKey : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert docs, add index
for (int i = 0; i < 10; ++i) {
@@ -571,7 +574,7 @@ namespace QueryStageCount {
class QueryStageCountKeyBecomesUnusedDuringYield : public CountBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
// Insert documents, add index
for (int i = 0; i < 10; ++i) {
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 37a0a6473b1..ecbbbaf5561 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/catalog/collection.h"
@@ -48,22 +49,22 @@ namespace QueryStageDistinct {
DistinctBase() { }
virtual ~DistinctBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.ensureIndex(ns(), obj);
}
void insert(const BSONObj& obj) {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.insert(ns(), obj);
}
IndexDescriptor* getIndex(const BSONObj& obj) {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
Collection* collection = ctx.ctx().db()->getCollection( ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern( obj );
}
@@ -96,11 +97,13 @@ namespace QueryStageDistinct {
static const char* ns() { return "unittests.QueryStageDistinct"; }
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient DistinctBase::_client;
// Tests distinct with single key indices.
class QueryStageDistinctBasic : public DistinctBase {
@@ -121,7 +124,7 @@ namespace QueryStageDistinct {
// Make an index on a:1
addIndex(BSON("a" << 1));
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
// Set up the distinct stage.
DistinctParams params;
@@ -184,7 +187,7 @@ namespace QueryStageDistinct {
// Make an index on a:1
addIndex(BSON("a" << 1));
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
// Set up the distinct stage.
DistinctParams params;
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 4aaaa2657b7..b99a95b1d87 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -87,8 +87,9 @@ namespace QueryStageFetch {
class FetchStageAlreadyFetched : public QueryStageFetchBase {
public:
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -145,8 +146,9 @@ namespace QueryStageFetch {
class FetchStageFilter : public QueryStageFetchBase {
public:
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index c030387ff09..5ea488a37f2 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -104,8 +104,9 @@ namespace QueryStageKeep {
class KeepStageBasic : public QueryStageKeepBase {
public:
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 6ddf69290e5..43319528bb2 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -50,7 +50,7 @@ namespace QueryStageMergeSortTests {
QueryStageMergeSortTestBase() { }
virtual ~QueryStageMergeSortTestBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
}
@@ -95,23 +95,25 @@ namespace QueryStageMergeSortTests {
}
static const char* ns() { return "unittests.QueryStageMergeSort"; }
+
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient QueryStageMergeSortTestBase::_client;
-
// SERVER-1205:
// find($or[{a:1}, {b:1}]).sort({c:1}) with indices {a:1, c:1} and {b:1, c:1}.
class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
const int N = 50;
@@ -170,12 +172,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortDups : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
const int N = 50;
@@ -233,12 +235,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
const int N = 50;
@@ -297,12 +299,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
const int N = 50;
@@ -362,12 +364,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
const int N = 50;
@@ -425,12 +427,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
WorkingSet* ws = new WorkingSet();
@@ -478,12 +480,12 @@ namespace QueryStageMergeSortTests {
class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase {
public:
void run() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
OperationContextImpl txn;
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
- coll = db->createCollection(&txn, ns());
+ coll = db->createCollection(&_txn, ns());
}
WorkingSet ws;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index f0605a7abbc..2440f67948f 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -169,10 +169,9 @@ namespace QueryStageSortTests {
static const char* ns() { return "unittests.QueryStageSort"; }
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient QueryStageSortTestBase::_client;
// Sort some small # of results in increasing order.
class QueryStageSortInc: public QueryStageSortTestBase {
@@ -180,8 +179,9 @@ namespace QueryStageSortTests {
virtual int numObj() { return 100; }
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -199,8 +199,9 @@ namespace QueryStageSortTests {
virtual int numObj() { return 100; }
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -227,8 +228,9 @@ namespace QueryStageSortTests {
virtual int numObj() { return 10000; }
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -246,8 +248,9 @@ namespace QueryStageSortTests {
virtual int numObj() { return 2000; }
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -336,8 +339,9 @@ namespace QueryStageSortTests {
virtual int numObj() { return 100; }
void run() {
- Client::WriteContext ctx(ns());
OperationContextImpl txn;
+ Client::WriteContext ctx(&txn, ns());
+
Database* db = ctx.ctx().db();
Collection* coll = db->getCollection(ns());
if (!coll) {
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index 3bf9b0ca31f..7e0b0f20c6e 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/dbtests/dbtests.h"
@@ -46,7 +47,7 @@ namespace QueryStageTests {
class IndexScanBase {
public:
IndexScanBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
for (int i = 0; i < numObj(); ++i) {
BSONObjBuilder bob;
@@ -61,17 +62,17 @@ namespace QueryStageTests {
}
virtual ~IndexScanBase() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
}
void addIndex(const BSONObj& obj) {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
_client.ensureIndex(ns(), obj);
}
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj);
verify(swme.isOK());
@@ -91,7 +92,7 @@ namespace QueryStageTests {
}
void makeGeoData() {
- Client::WriteContext ctx(ns());
+ Client::WriteContext ctx(&_txn, ns());
for (int i = 0; i < numObj(); ++i) {
double lat = double(rand()) / RAND_MAX;
@@ -101,7 +102,7 @@ namespace QueryStageTests {
}
IndexDescriptor* getIndex(const BSONObj& obj) {
- Client::ReadContext ctx(ns());
+ Client::ReadContext ctx(&_txn, ns());
Collection* collection = ctx.ctx().db()->getCollection( ns() );
return collection->getIndexCatalog()->findIndexByKeyPattern( obj );
}
@@ -109,12 +110,13 @@ namespace QueryStageTests {
static int numObj() { return 50; }
static const char* ns() { return "unittests.IndexScan"; }
+ protected:
+ OperationContextImpl _txn;
+
private:
- static DBDirectClient _client;
+ DBDirectClient _client;
};
- DBDirectClient IndexScanBase::_client;
-
class QueryStageIXScanBasic : public IndexScanBase {
public:
virtual ~QueryStageIXScanBasic() { }
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 0f834d1d50c..0e73c01e3b5 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -157,14 +157,13 @@ namespace QueryTests {
// an empty object (one might be allowed inside a reserved namespace at some point).
Lock::GlobalWrite lk;
Client::Context ctx( "unittests.querytests" );
- OperationContextImpl txn;
Database* db = ctx.db();
if ( db->getCollection( ns() ) ) {
_collection = NULL;
- db->dropCollection( &txn, ns() );
+ db->dropCollection( &_txn, ns() );
}
- _collection = db->createCollection( &txn, ns(), CollectionOptions(), true, false );
+ _collection = db->createCollection( &_txn, ns(), CollectionOptions(), true, false );
ASSERT( _collection );
DBDirectClient cl;
@@ -189,21 +188,25 @@ namespace QueryTests {
~ClientBase() {
//mongo::lastError.release();
}
+
protected:
- static void insert( const char *ns, BSONObj o ) {
+ void insert( const char *ns, BSONObj o ) {
client_.insert( ns, o );
}
- static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
client_.update( ns, Query( q ), o, upsert );
}
- static bool error() {
+ bool error() {
return !client_.getPrevError().getField( "err" ).isNull();
}
- DBDirectClient &client() const { return client_; }
- static DBDirectClient client_;
+ const DBDirectClient& client() const { return client_; }
+ DBDirectClient& client() { return client_; }
+
+ DBDirectClient client_;
+
+ OperationContextImpl _txn;
};
- DBDirectClient ClientBase::client_;
class BoundedKey : public ClientBase {
public:
@@ -239,7 +242,7 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
- Lock::DBWrite lk(ns);
+ Lock::DBWrite lk(_txn.lockState(), ns);
Client::Context ctx( ns );
ClientCursorPin clientCursor( ctx.db()->getCollection(ns), cursorId );
// pq doesn't exist if it's a runner inside of the clientcursor.
@@ -252,6 +255,9 @@ namespace QueryTests {
ASSERT( cursor->more() );
ASSERT_EQUALS( 3, cursor->next().getIntField( "a" ) );
}
+
+ protected:
+ OperationContextImpl _txn;
};
/**
@@ -294,10 +300,11 @@ namespace QueryTests {
// Check that the cursor has been removed.
{
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(&_txn, ns);
ASSERT( 0 == ctx.ctx().db()->getCollection( ns )->cursorCache()->numCursors() );
}
- ASSERT_FALSE( CollectionCursorCache::eraseCursorGlobal( cursorId ) );
+
+ ASSERT_FALSE(CollectionCursorCache::eraseCursorGlobal(&_txn, cursorId));
// Check that a subsequent get more fails with the cursor removed.
ASSERT_THROWS( client().getMore( ns, cursorId ), UserException );
@@ -343,7 +350,7 @@ namespace QueryTests {
// Check that the cursor still exists
{
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(&_txn, ns);
ASSERT( 1 == ctx.ctx().db()->getCollection( ns )->cursorCache()->numCursors() );
ASSERT( ctx.ctx().db()->getCollection( ns )->cursorCache()->find( cursorId, false ) );
}
@@ -583,7 +590,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
- Lock::DBWrite lk(ns);
+ Lock::DBWrite lk(_txn.lockState(), ns);
Client::Context ctx( ns );
BSONObj info;
@@ -654,7 +661,7 @@ namespace QueryTests {
count( 2 );
}
private:
- void count( unsigned long long c ) const {
+ void count( unsigned long long c ) {
ASSERT_EQUALS( c, client().count( "unittests.querytests.BasicCount", BSON( "a" << 4 ) ) );
}
};
@@ -749,8 +756,8 @@ namespace QueryTests {
}
static const char *ns() { return "unittests.querytests.AutoResetIndexCache"; }
static const char *idxNs() { return "unittests.system.indexes"; }
- void index() const { ASSERT( !client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) ).isEmpty() ); }
- void noIndex() const {
+ void index() { ASSERT( !client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) ).isEmpty() ); }
+ void noIndex() {
BSONObj o = client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) );
if( !o.isEmpty() ) {
cout << o.toString() << endl;
@@ -1130,7 +1137,7 @@ namespace QueryTests {
}
size_t numCursorsOpen() {
- Client::ReadContext ctx( _ns );
+ Client::ReadContext ctx(&_txn, _ns);
Collection* collection = ctx.ctx().db()->getCollection( _ns );
if ( !collection )
return 0;
@@ -1172,16 +1179,13 @@ namespace QueryTests {
}
void run() {
string err;
-
- Client::WriteContext ctx( "unittests" );
- OperationContextImpl txn;
+ Client::WriteContext ctx(&_txn, "unittests" );
// note that extents are always at least 4KB now - so this will get rounded up a bit.
- ASSERT( userCreateNS( &txn, ctx.ctx().db(), ns(),
+ ASSERT( userCreateNS( &_txn, ctx.ctx().db(), ns(),
fromjson( "{ capped : true, size : 2000 }" ), false ).isOK() );
for ( int i=0; i<200; i++ ) {
insertNext();
-// cout << count() << endl;
ASSERT( count() < 90 );
}
@@ -1224,7 +1228,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx( "unittests" );
+ Client::WriteContext ctx(&_txn, "unittests" );
for ( int i=0; i<50; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1275,7 +1279,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx( "unittests" );
+ Client::WriteContext ctx(&_txn, "unittests" );
for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1298,7 +1302,7 @@ namespace QueryTests {
}
void run() {
- Client::WriteContext ctx( "unittests" );
+ Client::WriteContext ctx(&_txn, "unittests" );
for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -1414,7 +1418,7 @@ namespace QueryTests {
public:
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
- _lk( ns() ),
+ _lk(_txn.lockState(), ns() ),
_ctx( ns() ) {
}
private:
@@ -1439,8 +1443,7 @@ namespace QueryTests {
DbMessage dbMessage( message );
QueryMessage queryMessage( dbMessage );
Message result;
- OperationContextImpl txn;
- string exhaust = newRunQuery( &txn, message, queryMessage, *cc().curop(), result );
+ string exhaust = newRunQuery( &_txn, message, queryMessage, *cc().curop(), result );
ASSERT( exhaust.size() );
ASSERT_EQUALS( string( ns() ), exhaust );
}
@@ -1459,7 +1462,7 @@ namespace QueryTests {
ClientCursor *clientCursor = 0;
{
- Client::ReadContext ctx( ns() );
+ Client::ReadContext ctx(&_txn, ns());
ClientCursorPin clientCursorPointer( ctx.ctx().db()->getCollection( ns() ),
cursorId );
clientCursor = clientCursorPointer.c();
@@ -1497,10 +1500,11 @@ namespace QueryTests {
long long cursorId = cursor->getCursorId();
{
- Client::WriteContext ctx( ns() );
+ Client::WriteContext ctx(&_txn, ns() );
ClientCursorPin pinCursor( ctx.ctx().db()->getCollection( ns() ), cursorId );
-
- ASSERT_THROWS( client().killCursor( cursorId ), MsgAssertionException );
+
+ ASSERT_THROWS(CollectionCursorCache::eraseCursorGlobal(&_txn, cursorId),
+ MsgAssertionException);
string expectedAssertion =
str::stream() << "Cannot kill active cursor " << cursorId;
ASSERT_EQUALS( expectedAssertion, client().getLastError() );
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index fb2a2afdd0a..deb82f41e17 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -147,9 +147,10 @@ namespace ReplSetTests {
DBDirectClient *client() const { return &client_; }
static void insert( const BSONObj &o, bool god = false ) {
- Lock::DBWrite lk(ns());
- Client::Context ctx(ns());
OperationContextImpl txn;
+ Lock::DBWrite lk(txn.lockState(), ns());
+ Client::Context ctx(ns());
+
Database* db = ctx.db();
Collection* coll = db->getCollection(ns());
if (!coll) {
@@ -174,8 +175,8 @@ namespace ReplSetTests {
}
void drop() {
- Client::WriteContext c(ns());
OperationContextImpl txn;
+ Client::WriteContext c(&txn, ns());
Database* db = c.ctx().db();
@@ -306,6 +307,8 @@ namespace ReplSetTests {
class CappedInitialSync : public Base {
string _cappedNs;
+
+ OperationContextImpl _txn;
Lock::DBWrite _lk;
string spec() const {
@@ -342,7 +345,8 @@ namespace ReplSetTests {
return o;
}
public:
- CappedInitialSync() : _cappedNs("unittests.foo.bar"), _lk(_cappedNs) {
+ CappedInitialSync() :
+ _cappedNs("unittests.foo.bar"), _lk(_txn.lockState(), _cappedNs) {
dropCapped();
create();
}
@@ -363,7 +367,8 @@ namespace ReplSetTests {
}
void run() {
- Lock::DBWrite lk(_cappedNs);
+ OperationContextImpl txn;
+ Lock::DBWrite lk(txn.lockState(), _cappedNs);
BSONObj op = updateFail();
diff --git a/src/mongo/dbtests/runner_registry.cpp b/src/mongo/dbtests/runner_registry.cpp
index 8881ff2d4f3..b29088e36d8 100644
--- a/src/mongo/dbtests/runner_registry.cpp
+++ b/src/mongo/dbtests/runner_registry.cpp
@@ -51,7 +51,7 @@ namespace RunnerRegistry {
class RunnerRegistryBase {
public:
RunnerRegistryBase() {
- _ctx.reset(new Client::WriteContext(ns()));
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
_client.dropCollection(ns());
for (int i = 0; i < N(); ++i) {
@@ -269,7 +269,7 @@ namespace RunnerRegistry {
// requires a "global write lock."
_ctx.reset();
_client.dropDatabase("somesillydb");
- _ctx.reset(new Client::WriteContext(ns()));
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
// Unregister and restore state.
deregisterRunner(run.get());
@@ -285,7 +285,7 @@ namespace RunnerRegistry {
// Drop our DB. Once again, must give up the lock.
_ctx.reset();
_client.dropDatabase("unittests");
- _ctx.reset(new Client::WriteContext(ns()));
+ _ctx.reset(new Client::WriteContext(&_opCtx, ns()));
// Unregister and restore state.
deregisterRunner(run.get());
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index b5537364525..96e6e0df7b4 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -35,6 +35,7 @@
#include "mongo/bson/util/atomic_int.h"
#include "mongo/db/d_concurrency.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/stdx/functional.h"
@@ -115,6 +116,8 @@ namespace ThreadedTests {
}
virtual void subthread(int tnumber) {
Client::initThread("mongomutextest");
+ LockState lockState;
+
sleepmillis(0);
for( int i = 0; i < N; i++ ) {
int x = std::rand();
@@ -169,13 +172,13 @@ namespace ThreadedTests {
}
else if( i % 7 == 5 ) {
{
- Lock::DBRead r("foo");
+ Lock::DBRead r(&lockState, "foo");
if( sometimes ) {
Lock::TempRelease t;
}
}
{
- Lock::DBRead r("bar");
+ Lock::DBRead r(&lockState, "bar");
}
}
else if( i % 7 == 6 ) {
@@ -183,13 +186,13 @@ namespace ThreadedTests {
int q = i % 11;
if( q == 0 ) {
char what = 'r';
- Lock::DBRead r("foo");
+ Lock::DBRead r(&lockState, "foo");
ASSERT( Lock::isLocked() == what && Lock::atLeastReadLocked("foo") );
ASSERT( !Lock::nested() );
- Lock::DBRead r2("foo");
+ Lock::DBRead r2(&lockState, "foo");
ASSERT( Lock::nested() );
ASSERT( Lock::isLocked() == what && Lock::atLeastReadLocked("foo") );
- Lock::DBRead r3("local");
+ Lock::DBRead r3(&lockState, "local");
if( sometimes ) {
Lock::TempRelease t;
}
@@ -199,41 +202,48 @@ namespace ThreadedTests {
else if( q == 1 ) {
// test locking local only -- with no preceeding lock
{
- Lock::DBRead x("local");
+ Lock::DBRead x(&lockState, "local");
//Lock::DBRead y("q");
if( sometimes ) {
Lock::TempRelease t; // we don't temprelease (cant=true) here thus this is just a check that nothing weird happens...
}
}
- {
- Lock::DBWrite x("local");
+ {
+ OperationContextImpl txn;
+ Lock::DBWrite x(txn.lockState(), "local");
if( sometimes ) {
Lock::TempRelease t;
}
}
} else if( q == 1 ) {
- { Lock::DBRead x("admin"); }
- { Lock::DBWrite x("admin"); }
+ { Lock::DBRead x(&lockState, "admin"); }
+ {
+ OperationContextImpl txn;
+ Lock::DBWrite x(txn.lockState(), "admin");
+ }
} else if( q == 2 ) {
/*Lock::DBWrite x("foo");
Lock::DBWrite y("admin");
{ Lock::TempRelease t; }*/
}
else if( q == 3 ) {
- Lock::DBWrite x("foo");
- Lock::DBRead y("admin");
+ OperationContextImpl txn;
+ Lock::DBWrite x(txn.lockState(), "foo");
+ Lock::DBRead y(&lockState, "admin");
{ Lock::TempRelease t; }
}
else if( q == 4 ) {
- Lock::DBRead x("foo2");
- Lock::DBRead y("admin");
+ Lock::DBRead x(&lockState, "foo2");
+ Lock::DBRead y(&lockState, "admin");
{ Lock::TempRelease t; }
}
else if ( q > 4 && q < 8 ) {
static const char * const dbnames[] = {
"bar0", "bar1", "bar2", "bar3", "bar4", "bar5",
"bar6", "bar7", "bar8", "bar9", "bar10" };
- Lock::DBWrite w(dbnames[q]);
+
+ OperationContextImpl txn;
+ Lock::DBWrite w(txn.lockState(), dbnames[q]);
{
Lock::UpgradeGlobalLockToExclusive wToX;
if (wToX.gotUpgrade()) {
@@ -245,21 +255,24 @@ namespace ThreadedTests {
}
}
else {
- Lock::DBWrite w("foo");
+ OperationContextImpl txn;
+ Lock::DBWrite w(txn.lockState(), "foo");
+
{
Lock::TempRelease t;
}
- Lock::DBRead r2("foo");
- Lock::DBRead r3("local");
+
+ Lock::DBRead r2(&lockState, "foo");
+ Lock::DBRead r3(&lockState, "local");
if( sometimes ) {
Lock::TempRelease t;
}
}
}
else {
- Lock::DBRead r("foo");
- Lock::DBRead r2("foo");
- Lock::DBRead r3("local");
+ Lock::DBRead r(&lockState, "foo");
+ Lock::DBRead r2(&lockState, "foo");
+ Lock::DBRead r3(&lockState, "local");
}
}
pm.hit();
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 32e20bd9d01..e89997330cf 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -1060,14 +1060,11 @@ namespace UpdateTests {
BSONObj result;
BSONObj expected;
- switch ( i ) {
- default:
- client().update( ns(), Query(), getUpdate(i) );
- result = client().findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[]}" );
- ASSERT_EQUALS( result, expected );
- break;
- }
+
+ client().update( ns(), Query(), getUpdate(i) );
+ result = client().findOne( ns(), Query() );
+ expected = fromjson( "{'_id':0,x:[]}" );
+ ASSERT_EQUALS( result, expected );
}
}
};
@@ -1092,14 +1089,11 @@ namespace UpdateTests {
BSONObj result;
BSONObj expected;
- switch ( i ) {
- default:
- client().update( ns(), Query(), getUpdate(i) );
- result = client().findOne( ns(), Query() );
- expected = fromjson( "{'_id':0,x:[]}" );
- ASSERT_EQUALS( result, expected );
- break;
- }
+
+ client().update( ns(), Query(), getUpdate(i) );
+ result = client().findOne( ns(), Query() );
+ expected = fromjson( "{'_id':0,x:[]}" );
+ ASSERT_EQUALS( result, expected );
}
}
};
diff --git a/src/mongo/s/commands/auth_schema_upgrade_s.cpp b/src/mongo/s/commands/auth_schema_upgrade_s.cpp
index 144d5c80380..41f569815b1 100644
--- a/src/mongo/s/commands/auth_schema_upgrade_s.cpp
+++ b/src/mongo/s/commands/auth_schema_upgrade_s.cpp
@@ -157,7 +157,7 @@ namespace {
return appendCommandStatus(result, status);
}
- status = authzManager->upgradeSchema(maxSteps, writeConcern);
+ status = authzManager->upgradeSchema(txn, maxSteps, writeConcern);
if (!status.isOK())
return appendCommandStatus(result, status);
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index 748327e7d57..0d0f11c6469 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -28,6 +28,7 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/d_concurrency.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/d_logic.h"
#include "mongo/s/distlock.h"
@@ -290,7 +291,8 @@ namespace mongo {
//
{
- Lock::DBWrite writeLk( nss.ns() );
+ OperationContextImpl txn; // XXX?
+ Lock::DBWrite writeLk(txn.lockState(), nss.ns());
shardingState.mergeChunks( nss.ns(), minKey, maxKey, mergeVersion );
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 81c1941c8d3..b041b4d4e5a 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -369,7 +369,7 @@ namespace mongo {
* called from the dest of a migrate
* transfers mods from src to dest
*/
- bool transferMods( string& errmsg , BSONObjBuilder& b ) {
+ bool transferMods(OperationContext* txn, string& errmsg, BSONObjBuilder& b) {
if ( ! _getActive() ) {
errmsg = "no active migration!";
return false;
@@ -378,7 +378,7 @@ namespace mongo {
long long size = 0;
{
- Client::ReadContext cx( _ns );
+ Client::ReadContext cx(txn, _ns);
xfer( cx.ctx().db(), &_deleted, b, "deleted", size, false );
xfer( cx.ctx().db(), &_reload, b, "reload", size, true );
@@ -396,8 +396,11 @@ namespace mongo {
* @param errmsg filled with textual description of error if this call return false
* @return false if approximate chunk size is too big to move or true otherwise
*/
- bool storeCurrentLocs( long long maxChunkSize , string& errmsg , BSONObjBuilder& result ) {
- Client::ReadContext ctx( _ns );
+ bool storeCurrentLocs(OperationContext* txn,
+ long long maxChunkSize,
+ string& errmsg,
+ BSONObjBuilder& result ) {
+ Client::ReadContext ctx(txn, _ns);
Collection* collection = ctx.ctx().db()->getCollection( _ns );
if ( !collection ) {
errmsg = "ns not found, should be impossible";
@@ -405,7 +408,7 @@ namespace mongo {
}
invariant( _dummyRunner.get() == NULL );
- _dummyRunner.reset( new DummyRunner( _ns, collection ) );
+ _dummyRunner.reset(new DummyRunner(txn, _ns, collection));
// Allow multiKey based on the invariant that shard keys must be single-valued.
// Therefore, any multi-key index prefixed by shard key cannot be multikey over
@@ -477,7 +480,7 @@ namespace mongo {
return true;
}
- bool clone( string& errmsg , BSONObjBuilder& result ) {
+ bool clone(OperationContext* txn, string& errmsg , BSONObjBuilder& result ) {
if ( ! _getActive() ) {
errmsg = "not active";
return false;
@@ -487,7 +490,7 @@ namespace mongo {
int allocSize;
{
- Client::ReadContext ctx( _ns );
+ Client::ReadContext ctx(txn, _ns);
Collection* collection = ctx.ctx().db()->getCollection( _ns );
verify( collection );
scoped_spinlock lk( _trackerLocks );
@@ -500,7 +503,7 @@ namespace mongo {
while ( 1 ) {
bool filledBuffer = false;
- Client::ReadContext ctx( _ns );
+ Client::ReadContext ctx(txn, _ns);
Collection* collection = ctx.ctx().db()->getCollection( _ns );
scoped_spinlock lk( _trackerLocks );
@@ -614,16 +617,18 @@ namespace mongo {
class DummyRunner : public Runner {
public:
- DummyRunner( const StringData& ns,
- Collection* collection ) {
+ DummyRunner(OperationContext* txn,
+ const StringData& ns,
+ Collection* collection ) {
_ns = ns.toString();
+ _txn = txn;
_collection = collection;
_collection->cursorCache()->registerRunner( this );
}
~DummyRunner() {
if ( !_collection )
return;
- Client::ReadContext ctx( _ns );
+ Client::ReadContext ctx(_txn, _ns);
Collection* collection = ctx.ctx().db()->getCollection( _ns );
invariant( _collection == collection );
_collection->cursorCache()->deregisterRunner( this );
@@ -658,6 +663,7 @@ namespace mongo {
private:
string _ns;
+ OperationContext* _txn;
Collection* _collection;
};
@@ -713,7 +719,7 @@ namespace mongo {
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
bool run(OperationContext* txn, const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- return migrateFromStatus.transferMods( errmsg, result );
+ return migrateFromStatus.transferMods(txn, errmsg, result);
}
} transferModsCommand;
@@ -730,7 +736,7 @@ namespace mongo {
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
bool run(OperationContext* txn, const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- return migrateFromStatus.clone( errmsg, result );
+ return migrateFromStatus.clone(txn, errmsg, result);
}
} initialCloneCommand;
@@ -1042,8 +1048,9 @@ namespace mongo {
{
// this gets a read lock, so we know we have a checkpoint for mods
- if ( ! migrateFromStatus.storeCurrentLocs( maxChunkSize , errmsg , result ) )
+ if (!migrateFromStatus.storeCurrentLocs(txn, maxChunkSize, errmsg, result)) {
return false;
+ }
ScopedDbConnection connTo(toShard.getConnString());
BSONObj res;
@@ -1204,7 +1211,7 @@ namespace mongo {
myVersion.incMajor();
{
- Lock::DBWrite lk( ns );
+ Lock::DBWrite lk(txn->lockState(), ns );
verify( myVersion > shardingState.getVersion( ns ) );
// bump the metadata's version up and "forget" about the chunk being moved
@@ -1587,7 +1594,7 @@ namespace mongo {
if ( state != DONE ) {
// Unprotect the range if needed/possible on unsuccessful TO migration
- Lock::DBWrite lk( ns );
+ Lock::DBWrite lk(txn->lockState(), ns);
string errMsg;
if ( !shardingState.forgetPending( ns, min, max, epoch, &errMsg ) ) {
warning() << errMsg << endl;
@@ -1618,7 +1625,7 @@ namespace mongo {
{
// 0. copy system.namespaces entry if collection doesn't already exist
- Client::WriteContext ctx( ns );
+ Client::WriteContext ctx(txn, ns );
// Only copy if ns doesn't already exist
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( ns );
@@ -1653,7 +1660,7 @@ namespace mongo {
for ( unsigned i=0; i<all.size(); i++ ) {
BSONObj idx = all[i];
- Client::WriteContext ctx( ns );
+ Client::WriteContext ctx(txn, ns );
Database* db = ctx.ctx().db();
Collection* collection = db->getCollection( txn, ns );
if ( !collection ) {
@@ -1703,7 +1710,7 @@ namespace mongo {
{
// Protect the range by noting that we're now starting a migration to it
- Lock::DBWrite lk( ns );
+ Lock::DBWrite lk(txn->lockState(), ns);
if ( !shardingState.notePending( ns, min, max, epoch, &errmsg ) ) {
warning() << errmsg << endl;
state = FAIL;
@@ -1749,7 +1756,7 @@ namespace mongo {
while( i.more() ) {
BSONObj o = i.next().Obj();
{
- Client::WriteContext cx( ns );
+ Client::WriteContext cx(txn, ns );
BSONObj localDoc;
if ( willOverrideLocalId( cx.ctx().db(), o, &localDoc ) ) {
@@ -1944,7 +1951,7 @@ namespace mongo {
BSONObjIterator i( xfer["deleted"].Obj() );
while ( i.more() ) {
- Client::WriteContext cx(ns);
+ Client::WriteContext cx(txn, ns);
BSONObj id = i.next().Obj();
@@ -1979,7 +1986,7 @@ namespace mongo {
if ( xfer["reload"].isABSONObj() ) {
BSONObjIterator i( xfer["reload"].Obj() );
while ( i.more() ) {
- Client::WriteContext cx(ns);
+ Client::WriteContext cx(txn, ns);
BSONObj it = i.next().Obj();
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 04e17d553e3..7bb1d4d82d7 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -119,7 +119,7 @@ namespace mongo {
return false;
}
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection ) {
errmsg = "ns not found";
@@ -275,7 +275,7 @@ namespace mongo {
{
// Get the size estimate for this namespace
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
if ( !collection ) {
errmsg = "ns not found";
@@ -824,7 +824,7 @@ namespace mongo {
maxVersion.incMinor();
{
- Lock::DBWrite writeLk( ns );
+ Lock::DBWrite writeLk(txn->lockState(), ns);
shardingState.splitChunk( ns , min , max , splitKeys , maxVersion );
}
@@ -858,7 +858,7 @@ namespace mongo {
// If one of the chunks has only one object in it we should move it
for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj
- Client::ReadContext ctx( ns );
+ Client::ReadContext ctx(txn, ns);
Collection* collection = ctx.ctx().db()->getCollection( ns );
verify( collection );
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index c70ca52ea55..08560b9149a 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -48,6 +48,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/db.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/wire_version.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/client/connpool.h"
@@ -60,7 +61,6 @@
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/concurrency/ticketholder.h"
-using namespace std;
namespace mongo {
@@ -558,7 +558,8 @@ namespace mongo {
{
// DBLock needed since we're now potentially changing the metadata, and don't want
// reads/writes to be ongoing.
- Lock::DBWrite writeLk( ns );
+ OperationContextImpl txn;
+ Lock::DBWrite writeLk(txn.lockState(), ns );
//
// Get the metadata now that the load has completed
@@ -1241,7 +1242,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- Lock::DBWrite dbXLock(dbname);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(dbname);
shardingState.appendInfo( result );
diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp
index 73f891368d4..c4916fc2438 100644
--- a/src/mongo/s/request.cpp
+++ b/src/mongo/s/request.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
#include "mongo/db/dbmessage.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/stats/counters.h"
#include "mongo/s/chunk.h"
#include "mongo/s/client_info.h"
@@ -52,6 +53,8 @@ namespace mongo {
verify( _d.getns() );
_id = _m.header()->id;
+ _txn.reset(new OperationContextNoop());
+
_clientInfo = ClientInfo::get();
if ( p ) {
_clientInfo->newPeerRequest( p->remote() );
@@ -66,7 +69,7 @@ namespace mongo {
return;
_didInit = true;
reset();
- _clientInfo->getAuthorizationSession()->startRequest();
+ _clientInfo->getAuthorizationSession()->startRequest(_txn.get());
}
// Deprecated, will move to the strategy itself
diff --git a/src/mongo/s/request.h b/src/mongo/s/request.h
index 3e494952005..7ca59139e66 100644
--- a/src/mongo/s/request.h
+++ b/src/mongo/s/request.h
@@ -41,6 +41,8 @@ namespace mongo {
class OpCounters;
class ClientInfo;
+ class OperationContext;
+
class Request : boost::noncopyable {
public:
@@ -93,6 +95,8 @@ namespace mongo {
OpCounters* _counter;
+ boost::scoped_ptr<OperationContext> _txn;
+
bool _didInit;
};
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index a7672e0b722..f699e76585e 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/db.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/tools/mongodump_options.h"
#include "mongo/tools/tool.h"
@@ -252,7 +253,7 @@ public:
int repair() {
toolInfoLog() << "going to try and recover data from: " << toolGlobalParams.db << std::endl;
- return _repair(toolGlobalParams.db);
+ return _repairByName(toolGlobalParams.db);
}
void _repairExtents(Collection* coll, Writer& writer) {
@@ -330,9 +331,11 @@ public:
<< std::endl;
}
- int _repair( string dbname ) {
- Client::WriteContext cx( dbname );
- Database * db = cx.ctx().db();
+ int _repairByName(string dbname) {
+ OperationContextImpl txn;
+ Client::WriteContext cx(&txn, dbname);
+
+ Database* db = dbHolderUnchecked().get(dbname, storageGlobalParams.dbpath);
list<string> namespaces;
db->getDatabaseCatalogEntry()->getCollectionNamespaces( &namespaces );
diff --git a/src/mongo/util/admin_access.h b/src/mongo/util/admin_access.h
index 115004427b0..6f9834147df 100644
--- a/src/mongo/util/admin_access.h
+++ b/src/mongo/util/admin_access.h
@@ -36,6 +36,8 @@
namespace mongo {
+ class OperationContext;
+
/*
* An AdminAccess is an interface class used to determine if certain users have
* privileges to a given resource.
@@ -48,14 +50,14 @@ namespace mongo {
/** @return if there are any priviledge users. This should not
* block for long and throw if can't get a lock if needed.
*/
- virtual bool haveAdminUsers() const = 0;
+ virtual bool haveAdminUsers(OperationContext* txn) const = 0;
};
class NoAdminAccess : public AdminAccess {
public:
virtual ~NoAdminAccess() { }
- virtual bool haveAdminUsers() const { return false; }
+ virtual bool haveAdminUsers(OperationContext* txn) const { return false; }
};
} // namespace mongo
diff --git a/src/mongo/util/net/ssl_options.cpp b/src/mongo/util/net/ssl_options.cpp
index 382513c7079..b651c95d07c 100644
--- a/src/mongo/util/net/ssl_options.cpp
+++ b/src/mongo/util/net/ssl_options.cpp
@@ -25,6 +25,8 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/util/net/ssl_options.h"
#include <boost/filesystem/operations.hpp>